def __init__(self, optimize_mode='maximize', sample_size=1000, trials_per_update=20, num_epochs_per_training=500): self.searchspace_json = None self.random_state = None self.model = DNGO(do_mcmc=False, num_epochs=num_epochs_per_training) self._model_initialized = False self.sample_size = sample_size self.trials_per_update = trials_per_update self.optimize_mode = optimize_mode self.x = [] self.y = []
rng = np.random.RandomState(42) x = rng.rand(20) y = f(x) grid = np.linspace(0, 1, 100) fvals = f(grid) plt.plot(grid, fvals, "k--") plt.plot(x, y, "ro") plt.grid() plt.xlim(0, 1) plt.show() # -- Train Model --- model = DNGO(do_mcmc=False) model.train(x[:, None], y, do_optimize=True) # -- Predict with Model --- m, v = model.predict(grid[:, None]) plt.plot(x, y, "ro") plt.grid() plt.plot(grid, fvals, "k--") plt.plot(grid, m, "blue") plt.fill_between(grid, m + np.sqrt(v), m - np.sqrt(v), color="orange", alpha=0.8) plt.fill_between(grid,
class DNGOTuner(Tuner): def __init__(self, optimize_mode='maximize', sample_size=1000, trials_per_update=20, num_epochs_per_training=500): self.searchspace_json = None self.random_state = None self.model = DNGO(do_mcmc=False, num_epochs=num_epochs_per_training) self._model_initialized = False self.sample_size = sample_size self.trials_per_update = trials_per_update self.optimize_mode = optimize_mode self.x = [] self.y = [] def receive_trial_result(self, parameter_id, parameters, value, **kwargs): self.x.append(parameters) self.y.append(self._get_default_value(value)) if len(self.y) % self.trials_per_update == 0: self._update_model() def generate_parameters(self, parameter_id, **kwargs): if not self._model_initialized: return _random_config(self.searchspace_json, self.random_state) else: # random samples and pick best with model candidate_x = [_random_config(self.searchspace_json, self.random_state) for _ in range(self.sample_size)] x_test = np.array([np.array(list(xi.values())) for xi in candidate_x]) m, v = self.model.predict(x_test) mean = torch.Tensor(m) sigma = torch.Tensor(v) u = (mean - torch.Tensor([0.95]).expand_as(mean)) / sigma normal = Normal(torch.zeros_like(u), torch.ones_like(u)) ucdf = normal.cdf(u) updf = torch.exp(normal.log_prob(u)) ei = sigma * (updf + u * ucdf) if self.optimize_mode == 'maximize': ind = torch.argmax(ei) else: ind = torch.argmin(ei) new_x = candidate_x[ind] return new_x def update_search_space(self, search_space): self.searchspace_json = search_space self.random_state = np.random.RandomState() def import_data(self, data): for d in data: self.x.append(d['parameter']) self.y.append(self._get_default_value(d['value'])) self._update_model() def _update_model(self): _logger.info('Updating model on %d samples', len(self.x)) x_arr = [] for x in self.x: x_arr.append([x[k] for k in sorted(x.keys())]) self.model.train(np.array(x_arr), np.array(self.y), do_optimize=True) self._model_initialized = True def _get_default_value(self, value): if isinstance(value, dict) and 'default' in value: return value['default'] elif isinstance(value, float): return value else: raise ValueError(f'Unsupported value: {value}')
def dngo_search(search_space, num_init=10, k=10, loss='val_loss', total_queries=150, encoding_type='path', cutoff=40, acq_opt_type='mutation', explore_type='ucb', deterministic=True, verbose=True): import torch from pybnn import DNGO from pybnn.util.normalization import zero_mean_unit_var_normalization, zero_mean_unit_var_denormalization from acquisition_functions import acq_fn def fn(arch): return search_space.query_arch(arch, deterministic=deterministic)[loss] # set up initial data data = search_space.generate_random_dataset(num=num_init, encoding_type=encoding_type, cutoff=cutoff, deterministic_loss=deterministic) query = num_init + k while query <= total_queries: # set up data x = np.array([d['encoding'] for d in data]) y = np.array([d[loss] for d in data]) # get a set of candidate architectures candidates = search_space.get_candidates(data, acq_opt_type=acq_opt_type, encoding_type=encoding_type, cutoff=cutoff, deterministic_loss=deterministic) xcandidates = np.array([d['encoding'] for d in candidates]) # train the model model = DNGO(do_mcmc=False) model.train(x, y, do_optimize=True) predictions = model.predict(xcandidates) candidate_indices = acq_fn(np.array(predictions), explore_type) # add the k arches with the minimum acquisition function values for i in candidate_indices[:k]: arch_dict = search_space.query_arch(candidates[i]['spec'], encoding_type=encoding_type, cutoff=cutoff, deterministic=deterministic) data.append(arch_dict) if verbose: top_5_loss = sorted([(d[loss], d['epochs']) for d in data], key=lambda d: d[0])[:min(5, len(data))] print('dngo, query {}, top 5 val losses (val, test, epoch): {}'.format(query, top_5_loss)) recent_10_loss = [(d[loss], d['epochs']) for d in data[-10:]] print('dngo, query {}, most recent 10 (val, test, epoch): {}'.format(query, recent_10_loss)) query += k return data
def pybnn_search(search_space, model_type, num_init=20, k=DEFAULT_K, loss=DEFAULT_LOSS, total_queries=DEFAULT_TOTAL_QUERIES, predictor_encoding='adj', cutoff=0, acq_opt_type='mutation', explore_type='ucb', deterministic=True, verbose=True): import torch from pybnn import DNGO from pybnn.bohamiann import Bohamiann from pybnn.util.normalization import zero_mean_unit_var_normalization, zero_mean_unit_var_denormalization def fn(arch): return search_space.query_arch(arch, deterministic=deterministic)[loss] # set up initial data data = search_space.generate_random_dataset( num=num_init, predictor_encoding=predictor_encoding, cutoff=cutoff, deterministic_loss=deterministic) query = num_init + k while query <= total_queries: # set up data x = np.array([d['encoding'] for d in data]) y = np.array([d[loss] for d in data]) scaled_y = np.array([elt / 30 for elt in y]) # get a set of candidate architectures candidates = search_space.get_candidates( data, acq_opt_type=acq_opt_type, predictor_encoding=predictor_encoding, cutoff=cutoff, deterministic_loss=deterministic) xcandidates = np.array([d['encoding'] for d in candidates]) # train the model if model_type == 'dngo': model = DNGO(do_mcmc=False) model.train(x, y, do_optimize=True) elif model_type == 'bohamiann': model = Bohamiann() model.train(x, scaled_y, num_steps=10000, num_burn_in_steps=1000, keep_every=50, lr=1e-2) predictions, var = model.predict(xcandidates) predictions = np.array([pred * 30 for pred in predictions]) stds = np.sqrt(np.array([v * 30 for v in var])) candidate_indices = acq_fn(np.array(predictions), explore_type, stds=stds) model = None gc.collect() # add the k arches with the minimum acquisition function values for i in candidate_indices[:k]: arch_dict = search_space.query_arch( candidates[i]['spec'], epochs=0, predictor_encoding=predictor_encoding, cutoff=cutoff, deterministic=deterministic) data.append(arch_dict) if verbose: top_5_loss = sorted([d[loss] for d in data])[:min(5, len(data))] print('dngo, query {}, top 5 val losses: {}'.format( query, top_5_loss)) query += k return data
func_name = 'camelback-2d' f, bounds, _, true_fmin = get_function(func_name) d = bounds.shape[0] n_init = 40 var_noise = 1.0e-10 np.random.seed(3) x = np.random.uniform(bounds[:, 0], bounds[:, 1], (n_init, d)) y = f(x) x1, x2 = np.mgrid[-1:1:50j, -1:1:50j] grid = np.vstack((x1.flatten(), x2.flatten())).T fvals = f(grid) # -- Train Model --- model = DNGO(do_mcmc=False) model.train(x, y.flatten(), do_optimize=True) # -- Predict with Model --- m, v = model.predict(grid) figure, axes = plt.subplots(2, 1, figsize=(6, 18)) sub1 = axes[0].contourf(x1, x2, fvals.reshape(50, 50)) axes[0].plot(x[:, 0], x[:, 1], 'rx') axes[0].set_title('objective func ') sub2 = axes[1].contourf(x1, x2, m.reshape(50, 50)) axes[1].plot(x[:, 0], x[:, 1], 'rx') gp_title = f'prediction by DNGO' axes[1].set_title(gp_title)
start_time = time.time() # -- Train and Prediction with MC Model --- T = 100 model_mcdrop = MCDROP(T=T) model_mcdrop.train(x, y.flatten()) m_mcdrop, v_mcdrop = model_mcdrop.predict(grid) mcdrop_complete_time = time.time() # -- Train and Prediction with MC Concrete Dropout Model --- model_mcconcdrop = MCCONCRETEDROP(T=T) model_mcconcdrop.train(x, y.flatten()) m_mcconcdrop, v_mcconcdrop = model_mcconcdrop.predict(grid) mcconcdrop_complete_time = time.time() # -- Train and Prediction with DNGO Model --- model_dngo = DNGO(do_mcmc=False) model_dngo.train(x, y.flatten(), do_optimize=True) m_dngo, v_dngo = model_dngo.predict(grid) dngo_complete_time = time.time() # -- Train and Prediction with Bohamian Model --- model_boham = Bohamiann(print_every_n_steps=1000) model_boham.train(x, y.flatten(), num_steps=6000, num_burn_in_steps=2000, keep_every=50, lr=1e-2, verbose=True) m_boham, v_boham = model_boham.predict(grid) boham_complete_time = time.time()
class DNGOTuner(Tuner): def __init__(self, optimize_mode='maximize', sample_size=1000, trials_per_update=20, num_epochs_per_training=500): self.searchspace_json = None self.random_state = None self.model = DNGO(do_mcmc=False, num_epochs=num_epochs_per_training) self._model_initialized = False self.sample_size = sample_size self.trials_per_update = trials_per_update self.optimize_mode = optimize_mode self.x = [] self.y = [] def receive_trial_result(self, parameter_id, parameters, value, **kwargs): self.x.append(parameters) self.y.append(self._get_default_value(value)) if len(self.y) % self.trials_per_update == 0: self._update_model() def generate_parameters(self, parameter_id, **kwargs): if not self._model_initialized: return _random_config(self.searchspace_json, self.random_state) else: # random samples and pick best with model candidate_x = [ _random_config(self.searchspace_json, self.random_state) for _ in range(self.sample_size) ] # The model has NaN issue when all the candidates are same # Also we can save the predict time when this happens if all(x == candidate_x[0] for x in candidate_x): return candidate_x[0] x_test = np.array( [np.array(list(xi.values())) for xi in candidate_x]) m, v = self.model.predict(x_test) # The model has NaN issue when all the candidates are very close if np.isnan(m).any() or np.isnan(v).any(): return candidate_x[0] mean = torch.Tensor(m) sigma = torch.Tensor(v) u = (mean - torch.Tensor([0.95]).expand_as(mean)) / sigma normal = Normal(torch.zeros_like(u), torch.ones_like(u)) ucdf = normal.cdf(u) updf = torch.exp(normal.log_prob(u)) ei = sigma * (updf + u * ucdf) if self.optimize_mode == 'maximize': ind = torch.argmax(ei) else: ind = torch.argmin(ei) new_x = candidate_x[ind] return new_x def update_search_space(self, search_space): validate_search_space(search_space, [ 'choice', 'randint', 'uniform', 'quniform', 'loguniform', 'qloguniform' ]) self.searchspace_json = search_space self.random_state = np.random.RandomState() def import_data(self, data): for d in data: self.x.append(d['parameter']) self.y.append(self._get_default_value(d['value'])) self._update_model() def _update_model(self): _logger.info('Updating model on %d samples', len(self.x)) x_arr = [] for x in self.x: x_arr.append([x[k] for k in sorted(x.keys())]) try: self.model.train(np.array(x_arr), np.array(self.y), do_optimize=True) except np.linalg.LinAlgError as e: warnings.warn( f'numpy linalg error encountered in DNGO model training: {e}') self._model_initialized = True def _get_default_value(self, value): if isinstance(value, dict) and 'default' in value: return value['default'] elif isinstance(value, float): return value else: raise ValueError(f'Unsupported value: {value}')