def suggest(self, n_suggestions = 1, fix_input = None): if self.X.shape[0] < self.rand_sample: sample = self.space.sample(n_suggestions) if fix_input is not None: for k, v in fix_input.items(): sample[k] = v return sample else: X, Xe = self.space.transform(self.X) y = torch.FloatTensor(self.y) num_uniqs = None if Xe.shape[1] == 0 else [len(self.space.paras[name].categories) for name in self.space.enum_names] self.model = get_model(self.model_name, X.shape[1], Xe.shape[1], y.shape[1], num_uniqs = num_uniqs, **self.model_conf) self.model.fit(X, Xe, y) acq = GeneralAcq( self.model, self.num_obj, self.num_constr, kappa = self.kappa, c_kappa = self.c_kappa, use_noise = self.use_noise) opt = EvolutionOpt(self.space, acq, pop = 100, iters = 200) suggest = opt.optimize() with torch.no_grad(): py, ps2 = self.model.predict(*self.space.transform(suggest)) largest_uncert_id = np.argmax(np.log(ps2).sum(axis = 1)) if suggest.shape[0] >= n_suggestions: selected_id = np.random.choice(suggest.shape[0], n_suggestions).tolist() if largest_uncert_id not in selected_id: selected_id[0] = largest_uncert_id return suggest.iloc[selected_id] else: rand_samp = self.space.sample(n_suggestions - suggest.shape[0]) suggest = pd.concat([suggest, rand_samp], axis = 0, ignore_index = True) return suggest
def test_opt(): space = DesignSpace().parse([{ 'name': 'x1', 'type': 'num', 'lb': -3.0, 'ub': 3.0 }]) acq = ToyExample() opt = EvolutionOpt(space, acq, pop=10) rec = opt.optimize(initial_suggest=space.sample(3)) x, xe = space.transform(rec) assert (approx(1.0, 1e-3) == acq(x, xe)[:, 0].squeeze().item())
def test_mo(): space = DesignSpace().parse([{ 'name': 'x1', 'type': 'num', 'lb': -3.0, 'ub': 3.0 }, { 'name': 'x2', 'type': 'int', 'lb': -3.0, 'ub': 3.0 }]) acq = ToyExampleMO() opt = EvolutionOpt(space, acq, pop=10) rec = opt.optimize() assert (rec.shape[0] == 10)
def test_opt_int(): space = DesignSpace().parse([{ 'name': 'x1', 'type': 'num', 'lb': -3.0, 'ub': 3.0 }, { 'name': 'x2', 'type': 'int', 'lb': -3.0, 'ub': 3.0 }]) acq = ToyExample() opt = EvolutionOpt(space, acq, pop=10) rec = opt.optimize() assert (approx(1.0, 1e-3) == acq(*space.transform(rec))[:, 0].squeeze().item())
def test_opt_fix(): space = DesignSpace().parse([{ 'name': 'x1', 'type': 'num', 'lb': -3.0, 'ub': 3.0 }, { 'name': 'x2', 'type': 'num', 'lb': -3.0, 'ub': 3.0 }]) acq = ToyExample() opt = EvolutionOpt(space, acq, pop=10) rec = opt.optimize(fix_input={'x1': 1.0}) print(rec) assert (rec['x1'].values == approx(1.0, 1e-3))
def suggest(self, n_suggestions = 1, fix_input = None): assert n_suggestions == 1 if self.X.shape[0] < self.rand_sample: sample = self.space.sample(n_suggestions) if fix_input is not None: for k, v in fix_input.items(): sample[k] = v return sample else: X, Xe = self.space.transform(self.X) y = torch.FloatTensor(self.y) num_uniqs = None if Xe.shape[1] == 0 else [len(self.space.paras[name].categories) for name in self.space.enum_names] model = get_model(self.model_name, X.shape[1], Xe.shape[1], y.shape[1], num_uniqs = num_uniqs, warp = False) model.fit(X, Xe, y) acq = LCB(model, kappa = 2.0) opt = EvolutionOpt(self.space, acq, pop = 100, iters = 100) suggest = self.X.iloc[[np.argmin(self.y.reshape(-1))]] return opt.optimize(initial_suggest = suggest, fix_input = fix_input)
def suggest(self, n_suggestions=1, fix_input = None): if self.X.shape[0] < self.rand_sample: sample = self.quasi_sample(n_suggestions, fix_input) return sample else: X, Xe = self.space.transform(self.X) try: if self.y.min() <= 0: y = torch.FloatTensor(power_transform(self.y / self.y.std(), method = 'yeo-johnson')) else: y = torch.FloatTensor(power_transform(self.y / self.y.std(), method = 'box-cox')) if y.std() < 0.5: y = torch.FloatTensor(power_transform(self.y / self.y.std(), method = 'yeo-johnson')) if y.std() < 0.5: raise RuntimeError('Power transformation failed') model = get_model(self.model_name, self.space.num_numeric, self.space.num_categorical, 1, **self.model_config) model.fit(X, Xe, y) except: y = torch.FloatTensor(self.y).clone() model = get_model(self.model_name, self.space.num_numeric, self.space.num_categorical, 1, **self.model_config) model.fit(X, Xe, y) best_id = np.argmin(self.y.squeeze()) best_x = self.X.iloc[[best_id]] best_y = y.min() py_best, ps2_best = model.predict(*self.space.transform(best_x)) py_best = py_best.detach().numpy().squeeze() ps_best = ps2_best.sqrt().detach().numpy().squeeze() iter = max(1, self.X.shape[0] // n_suggestions) upsi = 0.5 delta = 0.01 # kappa = np.sqrt(upsi * 2 * np.log(iter ** (2.0 + self.X.shape[1] / 2.0) * 3 * np.pi**2 / (3 * delta))) kappa = np.sqrt(upsi * 2 * ((2.0 + self.X.shape[1] / 2.0) * np.log(iter) + np.log(3 * np.pi**2 / (3 * delta)))) acq = MACE(model, py_best, kappa = kappa) # LCB < py_best mu = Mean(model) sig = Sigma(model, linear_a = -1.) opt = EvolutionOpt(self.space, acq, pop = 100, iters = 100, verbose = False) rec = opt.optimize(initial_suggest = best_x, fix_input = fix_input).drop_duplicates() rec = rec[self.check_unique(rec)] cnt = 0 while rec.shape[0] < n_suggestions: rand_rec = self.quasi_sample(n_suggestions - rec.shape[0], fix_input) rand_rec = rand_rec[self.check_unique(rand_rec)] rec = rec.append(rand_rec, ignore_index = True) cnt += 1 if cnt > 3: # sometimes the design space is so small that duplicated sampling is unavoidable break if rec.shape[0] < n_suggestions: rand_rec = self.quasi_sample(n_suggestions - rec.shape[0], fix_input) rec = rec.append(rand_rec, ignore_index = True) select_id = np.random.choice(rec.shape[0], n_suggestions, replace = False).tolist() x_guess = [] with torch.no_grad(): py_all = mu(*self.space.transform(rec)).squeeze().numpy() ps_all = -1 * sig(*self.space.transform(rec)).squeeze().numpy() best_pred_id = np.argmin(py_all) best_unce_id = np.argmax(ps_all) if best_unce_id not in select_id and n_suggestions > 2: select_id[0]= best_unce_id if best_pred_id not in select_id and n_suggestions > 2: select_id[1]= best_pred_id rec_selected = rec.iloc[select_id].copy() return rec_selected