def __init__(self, target_size, input, inducing_size, device='cpu'): if device == 'gpu' and torch.cuda.is_available(): self.device = torch.device('cuda:0') else: self.device = torch.device('cpu') if input.ndim == 1: self.input_size = 1 else: self.input_size = input.shape[-1] self.target_size = target_size self.inducing_size = inducing_size _list = [ SparseGPRegressor(input, inducing_size) for _ in range(self.target_size) ] self.model = IndependentModelList(*[_model for _model in _list]) self.likelihood = LikelihoodList( *[_model.likelihood for _model in _list]) self.input_trans = None self.target_trans = None
def test_get_fantasy_model(self): models = [self.create_model() for _ in range(2)] model = IndependentModelList(*models) model.eval() model(torch.rand(3), torch.rand(3)) fant_x = [torch.randn(2), torch.randn(3)] fant_y = [torch.randn(2), torch.randn(3)] fmodel = model.get_fantasy_model(fant_x, fant_y) fmodel(torch.randn(4))
def test_get_fantasy_model_fixed_noise(self): models = [self.create_model(fixed_noise=True) for _ in range(2)] model = IndependentModelList(*models) model.eval() model(torch.rand(3), torch.rand(3)) fant_x = [torch.randn(2), torch.randn(3)] fant_y = [torch.randn(2), torch.randn(3)] fant_noise = [0.1 * torch.ones(2), 0.1 * torch.ones(3)] fmodel = model.get_fantasy_model(fant_x, fant_y, noise=fant_noise) fmodel(torch.randn(4))
def test_simple_model_list_gp_regression(self, cuda=False): train_x1 = torch.linspace(0, 0.95, 25) + 0.05 * torch.rand(25) train_x2 = torch.linspace(0, 0.95, 15) + 0.05 * torch.rand(15) train_y1 = torch.sin(train_x1 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x1) train_y2 = torch.cos(train_x2 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x2) likelihood1 = GaussianLikelihood() model1 = ExactGPModel(train_x1, train_y1, likelihood1) likelihood2 = GaussianLikelihood() model2 = ExactGPModel(train_x2, train_y2, likelihood2) model = IndependentModelList(model1, model2) likelihood = LikelihoodList(model1.likelihood, model2.likelihood) if cuda: model = model.cuda() model.train() likelihood.train() mll = SumMarginalLogLikelihood(likelihood, model) optimizer = torch.optim.Adam([{"params": model.parameters()}], lr=0.1) for _ in range(10): optimizer.zero_grad() output = model(*model.train_inputs) loss = -mll(output, model.train_targets) loss.backward() optimizer.step() model.eval() likelihood.eval() with torch.no_grad(), gpytorch.settings.fast_pred_var(): test_x = torch.linspace( 0, 1, 10, device=torch.device("cuda") if cuda else torch.device("cpu")) outputs_f = model(test_x, test_x) predictions_obs_noise = likelihood(*outputs_f) self.assertIsInstance(outputs_f, list) self.assertEqual(len(outputs_f), 2) self.assertIsInstance(predictions_obs_noise, list) self.assertEqual(len(predictions_obs_noise), 2)
class SparseGPListRegressor: @ensure_args_torch_floats def __init__(self, target_size, input, inducing_size, device='cpu'): if device == 'gpu' and torch.cuda.is_available(): self.device = torch.device('cuda:0') else: self.device = torch.device('cpu') if input.ndim == 1: self.input_size = 1 else: self.input_size = input.shape[-1] self.target_size = target_size self.inducing_size = inducing_size _list = [ SparseGPRegressor(input, inducing_size) for _ in range(self.target_size) ] self.model = IndependentModelList(*[_model for _model in _list]) self.likelihood = LikelihoodList( *[_model.likelihood for _model in _list]) self.input_trans = None self.target_trans = None @ensure_args_torch_floats @ensure_res_numpy_floats def predict(self, input): self.device = torch.device('cpu') self.model.eval().to(self.device) self.likelihood.eval().to(self.device) input = transform(input.reshape((-1, self.input_size)), self.input_trans) with max_preconditioner_size(10), torch.no_grad(): with max_root_decomposition_size(30), fast_pred_var(): _input = [input for _ in range(self.target_size)] predictions = self.likelihood(*self.model(*_input)) output = torch.stack([_pred.mean for _pred in predictions]).T output = inverse_transform(output, self.target_trans).squeeze() return output def init_preprocess(self, target, input): self.target_trans = StandardScaler() self.input_trans = StandardScaler() self.target_trans.fit(target) self.input_trans.fit(input) @ensure_args_torch_floats @ensure_args_atleast_2d def fit(self, target, input, nb_iter=100, lr=1e-1, verbose=True, preprocess=True): if preprocess: self.init_preprocess(target, input) target = transform(target, self.target_trans) input = transform(input, self.input_trans) # update inducing points inducing_idx = np.random.choice(len(input), self.inducing_size, replace=False) for i, _model in enumerate(self.model.models): _model.covar_module.inducing_points.data = input[inducing_idx, ...] target = target.to(self.device) input = input.to(self.device) for i, _model in enumerate(self.model.models): _model.set_train_data(input, target[:, i], strict=False) self.model.train().to(self.device) self.likelihood.train().to(self.device) optimizer = Adam([{'params': self.model.parameters()}], lr=lr) mll = SumMarginalLogLikelihood(self.likelihood, self.model) for i in range(nb_iter): optimizer.zero_grad() _output = self.model(*self.model.train_inputs) loss = -mll(_output, self.model.train_targets) loss.backward() if verbose: print('Iter %d/%d - Loss: %.3f' % (i + 1, nb_iter, loss.item())) optimizer.step() if torch.cuda.is_available(): torch.cuda.empty_cache()
def test_forward_eval_fixed_noise(self): models = [self.create_model(fixed_noise=True) for _ in range(2)] model = IndependentModelList(*models) model.eval() model(torch.rand(3))