def verify_config(self, table_name, config, window=None): ''' Verify experiment configuration for the first time. Return processed config if successful, otherwise, return None. ''' assert self.table_name is None if window is None: window = self.root # check if experiment exists if self.database.check_table_exist(name=table_name): tk.messagebox.showinfo('Error', f'Experiment {table_name} already exists', parent=window) return # check if config is valid try: config = complete_config(config, check=True) except Exception as e: tk.messagebox.showinfo('Error', 'Invalid configurations: ' + str(e), parent=window) return # check if problem can be built try: problem = build_problem(config['problem']['name']) except Exception as e: tk.messagebox.showinfo('Error', 'Failed to build problem: ' + str(e), parent=window) return # check if initial samples to be loaded are valid if 'init_sample_path' in config['experiment'] and config['experiment'][ 'init_sample_path'] is not None: try: X_init, Y_init = load_provided_initial_samples( config['experiment']['init_sample_path']) problem_cfg = problem.get_config() n_var, n_obj = problem_cfg['n_var'], problem_cfg['n_obj'] verify_provided_initial_samples(X_init, Y_init, n_var, n_obj) except Exception as e: tk.messagebox.showinfo( 'Error', 'Failed to load initial samples from file: ' + str(e), parent=window) return # success return config
def _build_optimizer(config): ''' Build optimizer based on the problem and experiment configurations. Parameters ---------- config: dict Experiment configuration dict. Returns ------- optimizer: autooed.mobo.mobo.MOBO The built optimizer. ''' prob_cfg, algo_cfg = config['problem'], config['algorithm'] problem = build_problem(prob_cfg['name']) algo = get_algorithm(algo_cfg['name']) optimizer = algo(problem, algo_cfg) return optimizer
def set_config(self, config): ''' Set config, update agent's config and start initialization if available. ''' if not self.agent.check_table_exist() and not self.initializing: # check if initializing problem = build_problem(config['problem']['name']) n_random_sample, init_sample_path = config['experiment']['n_random_sample'], config['experiment']['init_sample_path'] X_init_evaluated, X_init_unevaluated, Y_init_evaluated = get_initial_samples(problem, n_random_sample, init_sample_path) self.initializing = True self.config = config.copy() self.agent.set_config(self.config) rowids_unevaluated = self.agent.initialize(X_init_evaluated, X_init_unevaluated, Y_init_evaluated) if rowids_unevaluated is not None: self.evaluate_manual(rowids_unevaluated) else: self.config = config.copy() self.agent.set_config(self.config)
def evaluate(name, x_next): ''' Evaluate performance of a given design. Parameters ---------- name: str Name of the problem. x_next: np.array Design to be evaluated. Returns ------- y_next: np.array Performance of the given design. ''' # build problem problem = build_problem(name) # evaluate x_next with real problem y_next = np.array(problem.evaluate_objective(x_next)) return y_next
def init_config(self, table_name, config=None, window=None): ''' ''' if window is None: window = self.root # check if table exists if config is None: # load experiment config = self.database.query_config(table_name) if config is None: tk.messagebox.showinfo( 'Error', f'Database cannot find config of {table_name}, please recreate this experiment', parent=window) return table_exist = True else: # create experiment table_exist = False # create database table if not table_exist: try: self.database.create_table(table_name) except Exception as e: tk.messagebox.showinfo('Error', 'Failed to create database table: ' + str(e), parent=window) return # create agent and scheduler agent = OptimizeAgent(self.database, table_name) scheduler = OptimizeScheduler(agent) try: scheduler.set_config(config) except Exception as e: scheduler.stop_all() self.database.remove_table(table_name) tk.messagebox.showinfo('Error', 'Invalid values in configuration: ' + str(e), parent=window) return # set properties self.table_name = table_name self.config = config problem, self.true_pfront = build_problem( self.config['problem']['name'], get_pfront=True) self.problem_cfg = problem.get_config() self.problem_cfg.update(self.config['problem']) self.agent = agent self.scheduler = scheduler # initialize window self._quit_init(quit_db=False) self.root = tk.Tk() self.root.title('AutoOED') self.root.protocol('WM_DELETE_WINDOW', self._quit) self.root.iconphoto(True, tk.Image('photo', file=get_icon_path())) # initialize main GUI self.view = GUIView(self.root) self.controller = { 'menu_export': MenuExportController(self), 'panel_info': PanelInfoController(self), 'panel_control': PanelControlController(self), 'panel_log': PanelLogController(self), 'viz_space': VizSpaceController(self), 'viz_stats': VizStatsController(self), 'viz_database': VizDatabaseController(self), } self.view.menu_export.entryconfig( 0, command=self.controller['menu_export'].export_db) self.view.menu_export.entryconfig( 1, command=self.controller['menu_export'].export_stats) self.view.menu_export.entryconfig( 2, command=self.controller['menu_export'].export_figures) # initialize GUI params if not self.agent.can_eval: entry_mode = self.controller['panel_control'].view.widget['mode'] entry_mode.widget['Auto'].config(state=tk.DISABLED) entry_batch_size = self.controller['panel_control'].view.widget[ 'batch_size'] entry_batch_size.set(self.config['experiment']['batch_size']) # trigger periodic refresh self.root.after(self.refresh_rate, self.refresh) # center(self.root) self.root.mainloop()
from autooed.utils.initialization import generate_random_initial_samples from autooed.utils.plot import plot_performance_space, plot_performance_metric from arguments import get_args if __name__ == '__main__': # load arguments args, module_cfg = get_args() # set random seed set_seed(args.seed) # build problem problem = build_problem(args.problem) print(problem) # build algorithm algorithm = build_algorithm(args.algo, problem, module_cfg) print(algorithm) # generate initial random samples X = generate_random_initial_samples(problem, args.n_init_sample) Y = np.array([problem.evaluate_objective(x) for x in X]) # optimization while len(X) < args.n_total_sample: # propose design samples X_next = algorithm.optimize(X, Y, None, args.batch_size)
def refresh(self): ''' Refresh the agent to load the up-to-date config. ''' # load config config = self.get_config() if config is None: return if self.problem_cfg is None: # first time # update agent's problem config problem_cfg = config['problem'] problem = build_problem(problem_cfg['name']) self.problem_cfg = problem.get_config() self.problem_cfg.update(problem_cfg) # whether evaluation function is provided self.can_eval = hasattr( problem, 'evaluate_objective' ) or self.problem_cfg['obj_func'] is not None # mapping from keys to database column names (e.g., X -> [x1, x2, ...]) self.key_map = { 'status': 'status', 'X': self.problem_cfg['var_name'], 'Y': self.problem_cfg['obj_name'], '_Y_pred_mean': [ f'_{name}_pred_mean' for name in self.problem_cfg['obj_name'] ], '_Y_pred_std': [f'_{name}_pred_std' for name in self.problem_cfg['obj_name']], 'pareto': 'pareto', 'batch': 'batch', '_order': '_order', '_hypervolume': '_hypervolume', } # mapping from problem domains to data types in database var_type_map = { 'continuous': float, 'integer': int, 'binary': int, 'categorical': str, 'mixed': object, } # mapping from keys to data types in database self.type_map = { 'status': str, 'X': var_type_map[self.problem_cfg['type']], 'Y': float, '_Y_pred_mean': float, '_Y_pred_std': float, 'pareto': bool, 'batch': int, '_order': int, '_hypervolume': float, } elif config != self.problem_cfg: # update in the middle # update agent's problem config self.problem_cfg.update(config['problem'])