def get_bo_candidates(self, num_configs): # todo: parallel methods std_incumbent_value = np.min(std_normalization(self.target_y[self.iterate_r[-1]])) # Update surrogate model in acquisition function. self.acquisition_function.update(model=self.weighted_surrogate, eta=std_incumbent_value, num_data=len(self.history_container.data)) challengers = self.acq_optimizer.maximize( runhistory=self.history_container, num_points=5000, ) return challengers.challengers[:num_configs]
def update_observation(self, config, perf, n_iteration): rung_id = self.get_rung_id(self.bracket, n_iteration) updated = False for job in self.bracket[rung_id]['jobs']: _job_status, _config, _perf, _extra_conf = job if _config == config: assert _job_status == RUNNING job[0] = COMPLETED job[2] = perf updated = True break assert updated # print('=== bracket after update_observation:', self.get_bracket_status(self.bracket)) configs_running = list() for _config in self.bracket[rung_id]['configs']: if _config not in self.target_x[n_iteration]: configs_running.append(_config) value_imputed = np.median(self.target_y[n_iteration]) n_iteration = int(n_iteration) self.target_x[n_iteration].append(config) self.target_y[n_iteration].append(perf) if n_iteration == self.R: self.incumbent_configs.append(config) self.incumbent_perfs.append(perf) # Update history container. self.history_container.add(config, perf) # Refit the ensemble surrogate model. configs_train = self.target_x[n_iteration] + configs_running results_train = self.target_y[n_iteration] + [value_imputed ] * len(configs_running) results_train = np.array(std_normalization(results_train), dtype=np.float64) if not self.use_bohb_strategy: self.surrogate.train( convert_configurations_to_array(configs_train), results_train, r=n_iteration) else: if n_iteration == self.R: self.surrogate.train( convert_configurations_to_array(configs_train), results_train)
def iterate(self, skip_last=0): for s in reversed(range(self.s_max + 1)): if self.update_enable and self.weight_update_id > self.s_max: self.update_weight() self.weight_update_id += 1 # Set initial number of configurations n = int(ceil(self.B / self.R / (s + 1) * self.eta ** s)) # initial number of iterations per config r = int(self.R * self.eta ** (-s)) # Choose a batch of configurations in different mechanisms. start_time = time.time() T = self.choose_next(n) time_elapsed = time.time() - start_time self.logger.info("[%s] Choosing next configurations took %.2f sec." % (self.method_name, time_elapsed)) extra_info = None last_run_num = None for i in range((s + 1) - int(skip_last)): # changed from s + 1 # Run each of the n configs for <iterations> # and keep best (n_configs / eta) configurations n_configs = n * self.eta ** (-i) n_iteration = r * self.eta ** (i) n_iter = n_iteration if last_run_num is not None and not self.restart_needed: n_iter -= last_run_num last_run_num = n_iteration self.logger.info("%s: %d configurations x %d iterations each" % (self.method_name, int(n_configs), int(n_iteration))) ret_val, early_stops = self.run_in_parallel(T, n_iter, extra_info) val_losses = [item['loss'] for item in ret_val] ref_list = [item['ref_id'] for item in ret_val] self.target_x[int(n_iteration)].extend(T) self.target_y[int(n_iteration)].extend(val_losses) if int(n_iteration) == self.R: self.incumbent_configs.extend(T) self.incumbent_perfs.extend(val_losses) # Update history container. for _config, _perf in zip(T, val_losses): self.history_container.add(_config, _perf) # Select a number of best configurations for the next loop. # Filter out early stops, if any. indices = np.argsort(val_losses) if len(T) == sum(early_stops): break if len(T) >= self.eta: indices = [i for i in indices if not early_stops[i]] T = [T[i] for i in indices] extra_info = [ref_list[i] for i in indices] reduced_num = int(n_configs / self.eta) T = T[0:reduced_num] extra_info = extra_info[0:reduced_num] else: T = [T[indices[0]]] # todo: confirm no filter early stops? extra_info = [ref_list[indices[0]]] val_losses = [val_losses[i] for i in indices][0:len(T)] # update: sorted incumbent_loss = val_losses[0] self.add_stage_history(self.stage_id, min(self.global_incumbent, incumbent_loss)) self.stage_id += 1 # self.remove_immediate_model() for item in self.iterate_r[self.iterate_r.index(r):]: # NORMALIZE Objective value: normalization normalized_y = std_normalization(self.target_y[item]) self.weighted_surrogate.train(convert_configurations_to_array(self.target_x[item]), np.array(normalized_y, dtype=np.float64), r=item)