def fvp(self, xs, g, **kwargs): """ Return the product between a vector g (in the same formast as self.variable) and the Fisher information defined by the average over xs. """ gs = unflatten(g, shapes=self.var_shapes) ts_fvp = self.ts_fvp(array_to_ts(xs), array_to_ts(gs), **kwargs) return flatten([v.numpy() for v in ts_fvp])
def fvp0(self, xs, ys, g, **kwargs): """ Computes F(self.pi)*g, where F is the Fisher information matrix and g is a np.ndarray in the same shape as self.variable, were the Fisher information defined by the average over xs. """ gs = unflatten(g, shapes=self.var_shapes) ts_fvp = self.ts_fvp0(array_to_ts(xs), array_to_ts(ys), array_to_ts(gs), **kwargs) return flatten([v.numpy() for v in ts_fvp])
def logp_grad(self, xs, ys, fs, **kwargs): ts_grad = self.ts_logp_grad(array_to_ts(xs), array_to_ts(ys), array_to_ts(fs), **kwargs) return flatten([v.numpy() for v in ts_grad])
def exp_grad(self, xs, As, bs, cs, canonical=True, diagonal_A=True): """ See exp_fun. """ ts_grad = self.ts_exp_grad(array_to_ts(xs), array_to_ts(As), array_to_ts(bs), array_to_ts(cs), canonical, diagonal_A) return flatten([v.numpy() for v in ts_grad])
def mean_variable(self): return flatten(ts_to_array(super().ts_variables))
def grad(self, x, **kwargs): return flatten(ts_to_array(self.ts_grad(array_to_ts(x), **kwargs)))
def flatten(self, vs): return flatten(vs)
def compute_grad(self): if self._args is None: raise ValueError('Oracle has not been initialized') grads = self._compute_grad(*self._args) return flatten(grads)
def flatten(self, vals): return flatten(vals)