예제 #1
0
def test_interface_sum_grad_squared():
    interface_test(new_ext.SumGradSquared())
예제 #2
0
model = Sequential(Flatten(), Linear(784, 10),)
lossfunc = CrossEntropyLoss()

model = extend(model)
lossfunc = extend(lossfunc)

# %%
# We can now evaluate the loss and do a backward pass with Backpack
# -----------------------------------------------------------------

loss = lossfunc(model(X), y)

with backpack(
    extensions.BatchGrad(),
    extensions.Variance(),
    extensions.SumGradSquared(),
    extensions.BatchL2Grad(),
    extensions.DiagGGNMC(mc_samples=1),
    extensions.DiagGGNExact(),
    extensions.DiagHessian(),
    extensions.KFAC(mc_samples=1),
    extensions.KFLR(),
    extensions.KFRA(),
):
    loss.backward()

# %%
# And here are the results
# -----------------------------------------------------------------

for name, param in model.named_parameters():
예제 #3
0
def test_interface_sum_grad_squared_conv():
    interface_test(new_ext.SumGradSquared(), use_conv=True)
model, lossfunc, make_loader = utils.data_prep_cifar10()

backpack_secondorder_extension_classes = {
    #    "KFRA": extensions.KFRA,
    "KFLR": extensions.KFLR,
    "KFAC": extensions.KFAC,
    "DiagGGNExact": extensions.DiagGGNExact,
    "DiagGGNMC": extensions.DiagGGNMC,
    "DiagH": extensions.DiagHessian,
}

backpack_extensions = {
    "Var": extensions.Variance(),
    "BatchGrad": extensions.BatchGrad(),
    "BatchL2": extensions.BatchL2Grad(),
    "SecondMoment": extensions.SumGradSquared(),
}

for name, ext_class in backpack_secondorder_extension_classes.items():
    ext_class.add_module_extension(flatten, DiagGGNFlatten())
    backpack_extensions[name] = ext_class()

combined_parameters = []
combined_names = []
for n in utils.Ns:
    for name, ext in backpack_extensions.items():
        combined_parameters.append((n, ext))
        combined_names.append(str(n) + "-" + name)


@pytest.mark.parametrize("N, ext", combined_parameters, ids=combined_names)
예제 #5
0
 def sgs(self):
     with backpack(new_ext.SumGradSquared()):
         _, _, loss = self.problem.forward_pass()
         loss.backward()
         sgs = [p.sum_grad_squared for p in self.problem.model.parameters()]
     return sgs
예제 #6
0
 def sgs(self):
     with backpack(new_ext.SumGradSquared()):
         self.loss().backward()
         sgs = [p.sum_grad_squared for p in self.model.parameters()]
     return sgs
예제 #7
0
"""

from torch.nn import CrossEntropyLoss, Flatten, Linear, Sequential

from backpack import backpack, extend, extensions
from backpack.utils.examples import load_mnist_data

B = 4
X, y = load_mnist_data(B)

print("# Gradient with PyTorch, gradient 2nd moment with BackPACK | B =", B)

model = Sequential(
    Flatten(),
    Linear(784, 10),
)
lossfunc = CrossEntropyLoss()

model = extend(model)
lossfunc = extend(lossfunc)

loss = lossfunc(model(X), y)

with backpack(extensions.SumGradSquared()):
    loss.backward()

for name, param in model.named_parameters():
    print(name)
    print(".grad.shape:             ", param.grad.shape)
    print(".sum_grad_squared.shape: ", param.sum_grad_squared.shape)
예제 #8
0
 def sgs(self) -> List[Tensor]:  # noqa:D102
     with backpack(new_ext.SumGradSquared()):
         _, _, loss = self.problem.forward_pass()
         loss.backward()
     return self.problem.collect_data("sum_grad_squared")