def fit(self, target, input, nb_iter=100, lr=1e-1, verbose=True, preprocess=True): if preprocess: self.init_preprocess(target, input) target = transform(target, self.target_trans) input = transform(input, self.input_trans) target = target.to(self.device) input = input.to(self.device) self.model.set_train_data(input, target, strict=False) self.model.train().to(self.device) self.likelihood.train().to(self.device) optimizer = Adam([{'params': self.parameters()}], lr=lr) mll = ExactMarginalLogLikelihood(self.likelihood, self.model) for i in range(nb_iter): optimizer.zero_grad() _output = self.model(input) loss = - mll(_output, target) loss.backward() if verbose: print('Iter %d/%d - Loss: %.3f' % (i + 1, nb_iter, loss.item())) optimizer.step() if torch.cuda.is_available(): torch.cuda.empty_cache()
def fit(self, target, input, nb_iter=100, lr=1e-1, verbose=True, preprocess=True): if preprocess: self.init_preprocess(target, input) target = transform(target, self.target_trans) input = transform(input, self.input_trans) # update inducing points inducing_idx = np.random.choice(len(input), self.inducing_size, replace=False) for i, _model in enumerate(self.model.models): _model.covar_module.inducing_points.data = input[inducing_idx, ...] target = target.to(self.device) input = input.to(self.device) for i, _model in enumerate(self.model.models): _model.set_train_data(input, target[:, i], strict=False) self.model.train().to(self.device) self.likelihood.train().to(self.device) optimizer = Adam([{'params': self.model.parameters()}], lr=lr) mll = SumMarginalLogLikelihood(self.likelihood, self.model) for i in range(nb_iter): optimizer.zero_grad() _output = self.model(*self.model.train_inputs) loss = -mll(_output, self.model.train_targets) loss.backward() if verbose: print('Iter %d/%d - Loss: %.3f' % (i + 1, nb_iter, loss.item())) optimizer.step() if torch.cuda.is_available(): torch.cuda.empty_cache()
def predict(self, input): input = transform(input.reshape((-1, self.input_size)), self.input_trans) with max_preconditioner_size(10), torch.no_grad(): with max_root_decomposition_size(30), fast_pred_var(): output = self.likelihood(self.model(input)).mean output = inverse_transform(output, self.target_trans) if self.incremental: return input[..., :self.target_size] + output else: return output
def predict(self, input): self.device = torch.device('cpu') self.model.eval().to(self.device) self.likelihood.eval().to(self.device) input = transform(input.reshape((-1, self.input_size)), self.input_trans) with max_preconditioner_size(10), torch.no_grad(): with max_root_decomposition_size(30), fast_pred_var(): output = self.likelihood(self.model(input)).mean output = inverse_transform(output, self.target_trans).squeeze() return output
def predict(self, input): self.device = torch.device('cpu') self.model.eval().to(self.device) self.likelihood.eval().to(self.device) input = transform(input.reshape((-1, self.input_size)), self.input_trans) with max_preconditioner_size(10), torch.no_grad(): with max_root_decomposition_size(30), fast_pred_var(): _input = [input for _ in range(self.target_size)] predictions = self.likelihood(*self.model(*_input)) output = torch.stack([_pred.mean for _pred in predictions]).T output = inverse_transform(output, self.target_trans).squeeze() return output