def wrapper(param): objective_function, config, time_limit_per_trial = param trial_state = SUCCESS start_time = time.time() try: args, kwargs = (config, ), dict() timeout_status, _result = time_limit(objective_function, time_limit_per_trial, args=args, kwargs=kwargs) if timeout_status: raise TimeoutException( 'Timeout: time limit for this evaluation is %.1fs' % time_limit_per_trial) else: objs, constraints = get_result(_result) except Exception as e: if isinstance(e, TimeoutException): trial_state = TIMEOUT else: traceback.print_exc(file=sys.stdout) trial_state = FAILED objs = None constraints = None elapsed_time = time.time() - start_time return Observation(config, trial_state, constraints, objs, elapsed_time)
def iterate(self, budget_left=None): config = self.config_advisor.get_suggestion() trial_state = SUCCESS _budget_left = int(1e10) if budget_left is None else budget_left _time_limit_per_trial = math.ceil(min(self.time_limit_per_trial, _budget_left)) if config not in self.config_advisor.history_container.configurations: start_time = time.time() try: args, kwargs = (config,), dict() timeout_status, _result = time_limit(self.objective_function, _time_limit_per_trial, args=args, kwargs=kwargs) if timeout_status: raise TimeoutException( 'Timeout: time limit for this evaluation is %.1fs' % _time_limit_per_trial) else: objs, constraints = get_result(_result) except Exception as e: if isinstance(e, TimeoutException): self.logger.warning(str(e)) trial_state = TIMEOUT else: self.logger.warning('Exception when calling objective function: %s' % str(e)) trial_state = FAILED objs = self.FAILED_PERF constraints = None elapsed_time = time.time() - start_time observation = Observation(config, trial_state, constraints, objs, elapsed_time) if _time_limit_per_trial != self.time_limit_per_trial and trial_state == TIMEOUT: # Timeout in the last iteration. pass else: self.config_advisor.update_observation(observation) else: self.logger.info('This configuration has been evaluated! Skip it: %s' % config) history = self.get_history() config_idx = history.configurations.index(config) trial_state = history.trial_states[config_idx] objs = history.perfs[config_idx] constraints = history.constraint_perfs[config_idx] if self.task_info['num_constraints'] > 0 else None if self.task_info['num_objs'] == 1: objs = (objs,) self.iteration_id += 1 # Logging. if self.task_info['num_constraints'] > 0: self.logger.info('Iteration %d, objective value: %s. constraints: %s.' % (self.iteration_id, objs, constraints)) else: self.logger.info('Iteration %d, objective value: %s.' % (self.iteration_id, objs)) # Visualization. for idx, obj in enumerate(objs): if obj < self.FAILED_PERF[idx]: self.writer.add_scalar('data/objective-%d' % (idx + 1), obj, self.iteration_id) return config, trial_state, constraints, objs
def run(self): while True: # Get config try: msg = self.worker_messager.receive_message() except Exception as e: print("Worker receive message error:", str(e)) return if msg is None: # Wait for configs time.sleep(1) continue print("Worker: get config. start working.") config, time_limit_per_trial = msg # Start working trial_state = SUCCESS start_time = time.time() try: args, kwargs = (config, ), dict() timeout_status, _result = time_limit(self.objective_function, time_limit_per_trial, args=args, kwargs=kwargs) if timeout_status: raise TimeoutException( 'Timeout: time limit for this evaluation is %.1fs' % time_limit_per_trial) else: objs, constraints = get_result(_result) except Exception as e: if isinstance(e, TimeoutException): trial_state = TIMEOUT else: traceback.print_exc(file=sys.stdout) trial_state = FAILED objs = None constraints = None elapsed_time = time.time() - start_time observation = Observation( config=config, objs=objs, constraints=constraints, trial_state=trial_state, elapsed_time=elapsed_time, ) # Send result print("Worker: observation=%s. sending result." % str(observation)) try: self.worker_messager.send_message(observation) except Exception as e: print("Worker send message error:", str(e)) return
def iterate(self): config = self.config_advisor.get_suggestion( ) # here is the key step !!!!! trial_state, trial_info = SUCCESS, None if config not in (self.config_advisor.configurations + self.config_advisor.failed_configurations): try: args, kwargs = (config, ), dict() timeout_status, _result = time_limit(self.objective_function, self.time_limit_per_trial, args=args, kwargs=kwargs) if timeout_status: raise TimeoutException( 'Timeout: time limit for this evaluation is %.1fs' % self.time_limit_per_trial) else: if _result is None: perf = MAXINT elif isinstance(_result, dict): perf = _result['objective_value'] else: perf = _result except Exception as e: if isinstance(e, TimeoutException): trial_state = TIMEOUT else: traceback.print_exc(file=sys.stdout) trial_state = FAILED perf = MAXINT trial_info = str(e) observation = [config, perf, trial_state] self.config_advisor.update_observation( observation) # here is the key step !!!!! else: self.logger.info('This configuration has been evaluated! Skip it.') if config in self.config_advisor.configurations: config_idx = self.config_advisor.configurations.index(config) trial_state, perf = SUCCESS, self.config_advisor.perfs[ config_idx] else: trial_state, perf = FAILED, MAXINT self.iteration_id += 1 self.logger.info('Iteration %d, perf: %.3f' % (self.iteration_id, perf)) return config, trial_state, perf, trial_info
def iterate(self): if len(self.configurations) == 0: X = np.array([]) else: failed_configs = list() if self.max_y is None else self.failed_configurations.copy() X = convert_configurations_to_array(self.configurations + failed_configs) failed_perfs = list() if self.max_y is None else [self.max_y] * len(self.failed_configurations) Y = np.array(self.perfs + failed_perfs, dtype=np.float64) config_list = self.choose_next(X, Y) trial_state_list = list() trial_info_list = list() perf_list = list() for i, config in enumerate(config_list): trial_state_list.append(SUCCESS) trial_info_list.append(None) perf_list.append(None) if config not in (self.configurations + self.failed_configurations): # Evaluate this configuration. try: args, kwargs = (config,), dict() timeout_status, _result = time_limit(self.objective_function, self.time_limit_per_trial, args=args, kwargs=kwargs) if timeout_status: raise TimeoutException( 'Timeout: time limit for this evaluation is %.1fs' % self.time_limit_per_trial) else: perf_list[i] = _result except Exception as e: if isinstance(e, TimeoutException): trial_state_list[i] = TIMEOUT else: traceback.print_exc(file=sys.stdout) trial_state_list[i] = FAILED perf_list[i] = MAXINT trial_info_list[i] = str(e) self.logger.error(trial_info_list[i]) if trial_state_list[i] == SUCCESS and perf_list[i] < MAXINT: if len(self.configurations) == 0: self.default_obj_value = perf_list[i] self.configurations.append(config) self.perfs.append(perf_list[i]) self.history_container.add(config, perf_list[i]) self.perc = np.percentile(self.perfs, self.scale_perc) self.min_y = np.min(self.perfs) self.max_y = np.max(self.perfs) else: self.failed_configurations.append(config) else: self.logger.debug('This configuration has been evaluated! Skip it.') if config in self.configurations: config_idx = self.configurations.index(config) trial_state_list[i], perf_list[i] = SUCCESS, self.perfs[config_idx] else: trial_state_list[i], perf_list[i] = FAILED, MAXINT self.iteration_id += 1 self.logger.info( 'Iteration-%d, objective improvement: %.4f' % ( self.iteration_id, max(0, self.default_obj_value - min(perf_list)))) return config_list, trial_state_list, perf_list, trial_info_list
def run(self): while True: # Get config try: msg = self.worker_messager.receive_message() except Exception as e: print("Worker receive message error:", str(e)) return if msg is None: # Wait for configs time.sleep(1) continue print("Worker: get config. start working.") config, extra_conf, time_limit_per_trial, n_iteration, trial_id = msg # Start working start_time = time.time() trial_state = SUCCESS ref_id = None early_stop = False try: args, kwargs = (config, n_iteration, extra_conf), dict() timeout_status, _result = time_limit(self.objective_function, time_limit_per_trial, args=args, kwargs=kwargs) if timeout_status: raise TimeoutException( 'Timeout: time limit for this evaluation is %.1fs' % time_limit_per_trial) else: if _result is None: perf = MAXINT elif isinstance(_result, dict): perf = _result['objective_value'] if perf is None: perf = MAXINT ref_id = _result.get('ref_id', None) early_stop = _result.get('early_stop', False) else: perf = _result except Exception as e: if isinstance(e, TimeoutException): trial_state = TIMEOUT else: traceback.print_exc(file=sys.stdout) trial_state = FAILED perf = MAXINT time_taken = time.time() - start_time return_info = dict(loss=perf, n_iteration=n_iteration, ref_id=ref_id, early_stop=early_stop, trial_state=trial_state) observation = [return_info, time_taken, trial_id, config] # Send result print("Worker: perf=%f. time=%d. sending result." % (perf, int(time_taken))) try: self.worker_messager.send_message(observation) except Exception as e: print("Worker send message error:", str(e)) return
# def test_func(*args, **kwargs): # import time # n = args[0] # time.sleep(n) # return n * n def test_func(*args, **kwargs): import numpy as np # change mat_n: {10000, 1000, 100, 10} mat_n = 1000 m = np.random.random((mat_n, mat_n)) from sklearn.decomposition import KernelPCA for _ in range(1000): pca = KernelPCA() pca.fit_transform(m) return m * m if __name__ == "__main__": # change the value of a: # (1) a = (3) # (2) a = (6) a = (3, ) b = dict() res = time_limit(test_func, 5, a, b) # res = ps_time_limit(test, a, b, 1) print(res)
def run(self): while True: try: msg = self.receiver_messager.receive_message() except Exception as e: self.logger.error("Worker receive message error: %s." % str(e)) if msg == 'ready': break # Get config try: msg = self.worker_messager.receive_message() except Exception as e: self.logger.error("Worker receive message error: %s." % str(e)) if msg is None: # Wait for configs time.sleep(0.3) continue self.logger.info("Worker: get config. start working.") config, time_limit_per_trial = msg # Start working trial_state = SUCCESS start_time = time.time() try: args, kwargs = (config, ), dict() timeout_status, _result = time_limit(self.evaluator, time_limit_per_trial, args=args, kwargs=kwargs) if timeout_status: raise TimeoutException( 'Timeout: time limit for this evaluation is %.1fs' % time_limit_per_trial) else: objs, constraints = get_result(_result) except Exception as e: if isinstance(e, TimeoutException): trial_state = TIMEOUT else: traceback.print_exc(file=sys.stdout) trial_state = FAILED objs = None constraints = None _perf = float("INF") if objs is None else objs[0] self.configs.append(config) self.perfs.append(_perf) self.eval_dict[config] = [-_perf, time.time(), trial_state] if -_perf > self.incumbent_perf: self.incumbent_perf = -_perf self.incumbent_config = config elapsed_time = time.time() - start_time observation = Observation(config, trial_state, constraints, objs, elapsed_time, worker_info={ 'ip': get_host_ip(), 'port': self.worker_port }) # Send result self.logger.info("Worker: observation=%s. sending result." % str(observation)) try: self.worker_messager.send_message(observation) except Exception as e: self.logger.error("Worker send message error: %s." % str(e)) eval_list = self.eval_dict.items() sorted_list = sorted(eval_list, key=lambda x: x[1][0], reverse=True) if len(sorted_list) > 10: ensemble_dict = dict(sorted_list[:int(len(sorted_list) / 10)]) else: ensemble_dict = dict(sorted_list[:1]) self.best_configs = list(ensemble_dict.keys()) preds = self.fetch_ensemble_pred() self.receiver_messager.send_message(preds) while True: time.sleep(5) try: msg = self.receiver_messager.receive_message() except Exception as e: self.logger.error("Worker receive message error: %s." % str(e)) if msg == 'over': break elif msg is not None: self.receiver_messager.send_message(msg)