Beispiel #1
0
 def test_l2_regularization(self):
     model = torch.nn.Sequential(torch.nn.Linear(5, 10),
                                 torch.nn.Linear(10, 5))
     initializer = InitializerApplicator(
         default_initializer=lambda tensor: constant(tensor, 0.5))
     initializer(model)
     value = RegularizerApplicator({"": L2Regularizer(1.0)})(model)
     assert value.data.numpy() == 28.75
Beispiel #2
0
    def test_from_params(self):
        params = Params({"regularizers": {"conv": "l1", "linear": {"type": "l2", "alpha": 10}}})
        regularizer_applicator = RegularizerApplicator.from_params(params)
        regularizers = regularizer_applicator._regularizers  # pylint: disable=protected-access

        assert isinstance(regularizers["conv"], L1Regularizer)
        assert isinstance(regularizers["linear"], L2Regularizer)
        assert regularizers["linear"].alpha == 10
Beispiel #3
0
 def test_l1_regularization(self):
     model = torch.nn.Sequential(torch.nn.Linear(5, 10),
                                 torch.nn.Linear(10, 5))
     initializer = InitializerApplicator(
         default_initializer=lambda tensor: constant(tensor, -1))
     initializer(model)
     value = RegularizerApplicator({"": L1Regularizer(1.0)})(model)
     # 115 because of biases.
     assert value.data.numpy() == 115.0
Beispiel #4
0
 def test_regularizer_applicator_respects_regex_matching(self):
     model = torch.nn.Sequential(
             torch.nn.Linear(5, 10),
             torch.nn.Linear(10, 5)
     )
     initializer = InitializerApplicator(default_initializer=lambda tensor: constant(tensor, 1.))
     initializer(model)
     value = RegularizerApplicator({"weight": L2Regularizer(0.5),
                                    "bias": L1Regularizer(1.0)})(model)
     assert value.data.numpy() == 65.0