def forward(self, X, X2=None): if X2 is None: X2 = X l = transform_forward(self.lengthscale) return transform_forward(self.variance) * torch.mm(X / l, (X2 / l).t())
def forward(self, X, X2=None): if X2 is None: X2 = X l = transform_forward(self.lengthscale) return transform_forward( self.variance) * (-0.5 * sqdist(X / l, X2 / l)).exp()
def f_callback(m, v, it, t): varn_list.append(transform_forward(m.variance).item()) logpr_list.append(m().item() / m.D) if it == 1: t_list.append(t) else: t_list.append(t_list[-1] + t) if save_checkpoint and not (it % checkpoint_period): torch.save(m.state_dict(), fn_checkpoint + '_it%d.pt' % it) print('it=%d, f=%g, varn=%g, t: %g' % (it, logpr_list[-1], transform_forward(m.variance), t_list[-1]))
def f_callback(model, negative_log_likelihood, iteration, t): variances.append(transform_forward(model.variance).item()) log_probabilities.append(model().item()/model.D) if iteration == 1: t_list.append(t) else: t_list.append(t_list[-1] + t) if save_checkpoint and not (iteration % checkpoint_period): torch.save(model.state_dict(), fn_checkpoint + '_it%d.pt' % iteration) print('iteration=%d, log probability=%g, variance=%g, t: %g' % (iteration, log_probabilities[-1], transform_forward(model.variance), t_list[-1]))
def forward(self, X, X2=None): if X2 is None: shape = [X.size()[0], X.size()[0]] else: shape = [X.size()[0], X2.size()[0]] return transform_forward(self.variance) * torch.ones( shape[0], shape[1])
def f_callback(m, v, it, t): varn_list.append(transform_forward(m.variance).item()) logpr_list.append(m().item() / m.D) if it == 1: t_list.append(t) else: t_list.append(t_list[-1] + t) if save_checkpoint and not (it % checkpoint_period): torch.save(m.state_dict(), fn_checkpoint + '_it%d.pt' % it) f = open(EXPERIMENT + "/X" + '_it%d.pkl' % it, "wb") pickle.dump(m.X.detach().numpy(), f) f.close() log_string = 'it=%d, f=%g, varn=%g, t: %g' % ( it, logpr_list[-1], transform_forward(m.variance), t_list[-1]) print(log_string) f = open(EXPERIMENT + "/log.txt", "a+") f.write(log_string) f.close()
def bo_search(m, bo_n_init, bo_n_iters, Ytrain, Ftrain, ftest, ytest, do_print=False): """ initializes BO with L1 warm-start (using dataset features). returns a numpy array of length bo_n_iters holding the best performance attained so far per iteration (including initialization). bo_n_iters includes initialization iterations, i.e., after warm-start, BO will run for bo_n_iters - bo_n_init iterations. """ preds = bo.BO(m.dim, m.kernel, bo.ei, variance=transform_forward(m.variance)) ix_evaled = [] ix_candidates = np.where(np.invert(np.isnan(ytest)))[0].tolist() ybest_list = [] ix_init = bo.init_l1(Ytrain, Ftrain, ftest).tolist() for l in range(bo_n_init): ix = ix_init[l] if not np.isnan(ytest[ix]): preds.add(m.X[ix], ytest[ix]) ix_evaled.append(ix) ix_candidates.remove(ix) yb = preds.ybest if yb is None: yb = np.nan ybest_list.append(yb) if do_print: print('Iter: %d, %g [%d], Best: %g' % (l, ytest[ix], ix, yb)) for l in range(bo_n_init, bo_n_iters): ix = ix_candidates[preds.next(m.X[ix_candidates])] preds.add(m.X[ix], ytest[ix]) ix_evaled.append(ix) ix_candidates.remove(ix) ybest_list.append(preds.ybest) if do_print: print('Iter: %d, %g [%d], Best: %g' \ % (l, ytest[ix], ix, preds.ybest)) return np.asarray(ybest_list)
def bayesian_optimization_search(model, bo_n_init, bo_n_iterations, Ytrain, Ftrain, ftest, ytest, do_print=False): """ Initializes BayesianOptimization with L1 warm-start (using dataset features). Returns a numpy array of length bo_n_iterations holding the best performance attained so far per iteration (including initialization). bo_n_iterations includes initialization iterations, i.e., after warm-start, BayesianOptimization will run for bo_n_iterations - bo_n_init iterations. """ predictions = bayesian_optimization.BayesianOptimization(model.dim, model.kernel, bayesian_optimization.expected_improvement, variance=transform_forward(model.variance)) ix_evaluated = [] ix_candidates = np.where(np.invert(np.isnan(ytest)))[0].tolist() ybest_list = [] def _process_ix(ix, predictions, model, ytest, ix_evaluated, ix_candidates): predictions.add(model.X[ix], ytest[ix]) ix_evaluated.append(ix) ix_candidates.remove(ix) def _print_status(ix, bo_iteration, ytest, ybest, do_print): if do_print: print('Iteration: %d, %g [%d], Best: %g' % (bo_iteration, ytest[ix], ix, ybest)) ix_init = bayesian_optimization.init_l1(Ytrain, Ftrain, ftest).tolist() for bo_iteration in range(bo_n_init): ix = ix_init[bo_iteration] if not np.isnan(ytest[ix]): _process_ix(ix, predictions, model, ytest, ix_evaluated, ix_candidates) ybest = predictions.ybest if ybest is None: ybest = np.nan ybest_list.append(ybest) _print_status(ix, bo_iteration, ytest, ybest, do_print) for bo_iteration in range(bo_n_init, bo_n_iterations): ix = ix_candidates[predictions.next(model.X[ix_candidates])] _process_ix(ix, predictions, model, ytest, ix_evaluated, ix_candidates) ybest = predictions.ybest ybest_list.append(ybest) _print_status(ix, bo_iteration, ytest, ybest, do_print) return np.asarray(ybest_list)
def forward(self, X, X2=None): if X2 is None: return torch.eye(X.size()[0]) * transform_forward(self.variance) else: return 0.
def variance(self): return transform_backward( transform_forward(self.k1.variance) + transform_forward(self.k2.variance))