def kfra(self): with backpack(new_ext.KFRA()): _, _, loss = self.problem.forward_pass() loss.backward() kfra = [p.kfra for p in self.problem.model.parameters()] return kfra
def test_interface_kfra(): interface_test(new_ext.KFRA())
def test_interface_kfra_conv(): interface_test(new_ext.KFRA(), use_conv=True)
# We can now evaluate the loss and do a backward pass with Backpack # ----------------------------------------------------------------- loss = lossfunc(model(X), y) with backpack( extensions.BatchGrad(), extensions.Variance(), extensions.SumGradSquared(), extensions.BatchL2Grad(), extensions.DiagGGNMC(mc_samples=1), extensions.DiagGGNExact(), extensions.DiagHessian(), extensions.KFAC(mc_samples=1), extensions.KFLR(), extensions.KFRA(), ): loss.backward() # %% # And here are the results # ----------------------------------------------------------------- for name, param in model.named_parameters(): print(name) print(".grad.shape: ", param.grad.shape) print(".grad_batch.shape: ", param.grad_batch.shape) print(".variance.shape: ", param.variance.shape) print(".sum_grad_squared.shape: ", param.sum_grad_squared.shape) print(".batch_l2.shape: ", param.batch_l2.shape) print(".diag_ggn_mc.shape: ", param.diag_ggn_mc.shape)
def kfra(self) -> List[List[Tensor]]: # noqa:D102 with backpack(new_ext.KFRA()): _, _, loss = self.problem.forward_pass() loss.backward() return self.problem.collect_data("kfra")