def compute_loss(self, *args, **kwargs): if self.num_data == 0: loss = TensorType([0.0]) loss.requires_grad_(True) return loss else: return super().loss(*args, **kwargs)
def func_and_grad(x): x = TensorType(np.atleast_2d(x)) x.requires_grad_(True) m, v = self.predict_function(x) s = v.sqrt() if not self.y: # No current data: use mean ("everything is an improvement") f = m else: f = _expected_improvement(m, s, min(self.y), mode="min") if f.requires_grad: f.backward() g = x.grad.detach().cpu().numpy().flatten() else: g = 0.0 * x.detach().cpu().numpy().flatten() f = f.detach().cpu().item() return f, g