Пример #1
0
 def test_step(self):
     model = Model()
     sparsifier = WeightNormSparsifier(sparsity_level=0.5)
     sparsifier.prepare(model, config=[{'tensor_fqn': 'linear.weight'}])
     for g in sparsifier.groups:
         # Before step
         module = g['module']
         assert (1.0 - module.parametrizations['weight'][0].mask.mean()
                 ) == 0  # checking sparsity level is 0
     sparsifier.enable_mask_update = True
     sparsifier.step()
     self.assertAlmostEqual(
         model.linear.parametrizations['weight'][0].mask.mean().item(),
         0.5,
         places=2)
     for g in sparsifier.groups:
         # After step
         module = g['module']
         assert (1.0 - module.parametrizations['weight'][0].mask.mean()
                 ) > 0  # checking sparsity level has increased
     # Test if the mask collapses to all zeros if the weights are randomized
     iters_before_collapse = 1000
     for _ in range(iters_before_collapse):
         model.linear.weight.data = torch.randn(model.linear.weight.shape)
         sparsifier.step()
     for g in sparsifier.groups:
         # After step
         module = g['module']
         assert (1.0 - module.parametrizations['weight'][0].mask.mean()
                 ) > 0  # checking sparsity level did not collapse
Пример #2
0
 def test_constructor(self):
     model = Model()
     sparsifier = WeightNormSparsifier()
     sparsifier.prepare(model, config=None)
     for g in sparsifier.module_groups:
         assert isinstance(g['module'], nn.Linear)
         # The module_groups are unordered
         assert g['fqn'] in ('seq.0', 'linear', 'head')
Пример #3
0
 def test_mask_squash(self):
     model = Model()
     sparsifier = WeightNormSparsifier()
     sparsifier.prepare(model, config=None)
     sparsifier.squash_mask()
     for g in sparsifier.module_groups:
         module = g['module']
         assert not is_parametrized(module, 'weight')
         assert not hasattr(module, 'mask')
Пример #4
0
 def test_lambda_scheduler(self):
     model = nn.Sequential(nn.Linear(16, 16))
     sparsifier = WeightNormSparsifier()
     sparsifier.prepare(model, config=None)
     assert sparsifier.module_groups[0]['sparsity_level'] == 0.5
     scheduler = LambdaSL(sparsifier, lambda epoch: epoch * 10)
     assert sparsifier.module_groups[0]['sparsity_level'] == 0.0  # Epoch 0
     scheduler.step()
     assert sparsifier.module_groups[0]['sparsity_level'] == 5.0  # Epoch 1
Пример #5
0
    def test_constructor(self):
        model = nn.Sequential(nn.Linear(16, 16))
        sparsifier = WeightNormSparsifier()
        sparsifier.prepare(model, config=None)
        scheduler = ImplementedScheduler(sparsifier)

        assert scheduler.sparsifier is sparsifier
        assert scheduler._step_count == 1
        assert scheduler.base_sl == [sparsifier.groups[0]['sparsity_level']]
Пример #6
0
 def test_step(self):
     model = Model()
     sparsifier = WeightNormSparsifier(sparsity_level=0.5)
     sparsifier.prepare(model, config=[model.linear])
     sparsifier.enable_mask_update = True
     sparsifier.step()
     self.assertAlmostEqual(
         model.linear.parametrizations['weight'][0].mask.mean().item(),
         0.5,
         places=2)
Пример #7
0
 def test_prepare(self):
     model = Model()
     sparsifier = WeightNormSparsifier()
     sparsifier.prepare(model, config=None)
     for g in sparsifier.module_groups:
         module = g['module']
         # Check mask exists
         assert hasattr(module.parametrizations['weight'][0], 'mask')
         # Check parametrization exists and is correct
         assert is_parametrized(module, 'weight')
         assert type(module.parametrizations.weight[0]) == FakeSparsity
Пример #8
0
    def test_step(self):
        model = nn.Sequential(nn.Linear(16, 16))
        sparsifier = WeightNormSparsifier()
        sparsifier.prepare(model, config=None)
        assert sparsifier.module_groups[0]['sparsity_level'] == 0.5
        scheduler = ImplementedScheduler(sparsifier)
        assert sparsifier.module_groups[0]['sparsity_level'] == 0.5

        sparsifier.step()
        scheduler.step()
        assert sparsifier.module_groups[0]['sparsity_level'] == 0.25
Пример #9
0
    def test_sparsity_levels(self):
        sparsity_levels = [-1.0, 0.0, 0.5, 1.0, 2.0]
        sparse_block_shapes = [(1, 1), (1, 4), (2, 2), (4, 1)]
        zeros_per_blocks = [0, 1, 2, 3, 4]

        testcases = itertools.tee(itertools.product(sparsity_levels,
                                                    sparse_block_shapes,
                                                    zeros_per_blocks))
        # Create a config and model with all the testcases
        model = nn.Sequential()
        sparsifier = WeightNormSparsifier()

        sparsity_per_layer_config = []
        p = re.compile(r'[-\.\s]')
        for sl, sbs, zpb in testcases[0]:
            # Make sure the number of zeros is not > values in a block
            if zpb > sbs[0] * sbs[1]:
                continue
            layer_name = f'{sl}_{sbs}_{zpb}'
            layer_name = p.sub('_', layer_name)

            layer = nn.Linear(12, 12, bias=False)
            layer.weight = nn.Parameter(torch.ones(12, 12))
            model.add_module(layer_name, layer)
            config = {
                'fqn': layer_name,
                'sparsity_level': sl,
                'sparse_block_shape': sbs,
                'zeros_per_block': zpb
            }
            sparsity_per_layer_config.append(config)

        sparsifier.prepare(model, sparsity_per_layer_config)
        sparsifier.step()
        sparsifier.squash_mask()
        model.eval()

        for sl, sbs, zpb in testcases[1]:
            if zpb > sbs[0] * sbs[1]:
                continue
            layer_name = f'{sl}_{sbs}_{zpb}'
            layer_name = p.sub('_', layer_name)
            layer = getattr(model, layer_name)

            # Level of sparsity is achieved
            sparse_mask = (layer.weight == 0).float()
            if zpb == 0:
                assert sparse_mask.mean() == 0
            else:
                # Ratio of individual zeros in the tensor
                true_sl = min(max(sl, 0.0), 1.0)
                true_sl = true_sl * zpb / sbs[0] / sbs[1]
                assert sparse_mask.mean() == true_sl
Пример #10
0
 def test_step(self):
     model = Model()
     sparsifier = WeightNormSparsifier(sparsity_level=0.5)
     sparsifier.prepare(model, config=[model.linear])
     for g in sparsifier.module_groups:
         # Before step
         module = g['module']
         assert (1.0 - module.parametrizations['weight'][0].mask.mean()) == 0  # checking sparsity level is 0
     sparsifier.enable_mask_update = True
     sparsifier.step()
     self.assertAlmostEqual(model.linear.parametrizations['weight'][0].mask.mean().item(), 0.5, places=2)
     for g in sparsifier.module_groups:
         # After step
         module = g['module']
         assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0  # checking sparsity level has increased
Пример #11
0
 def test_step_2_of_4(self):
     model = Model()
     sparsifier = WeightNormSparsifier(sparsity_level=1.0,
                                       sparse_block_shape=(1, 4),
                                       zeros_per_block=2)
     sparsifier.prepare(model, config=[model.linear])
     sparsifier.step()
     # make sure the sparsity level is approximately 50%
     self.assertAlmostEqual(model.linear.parametrizations['weight'][0].mask.mean().item(), 0.5, places=2)
     # Make sure each block has exactly 50% zeros
     module = sparsifier.module_groups[0]['module']
     mask = module.parametrizations['weight'][0].mask
     for row in mask:
         for idx in range(0, len(row), 4):
             block = row[idx:idx + 4]
             block, _ = block.sort()
             assert (block[:2] == 0).all()
             assert (block[2:] != 0).all()
Пример #12
0
    def test_order_of_steps(self):
        """Checks if the warning is thrown if the scheduler step is called
        before the sparsifier step"""

        model = nn.Sequential(nn.Linear(16, 16))
        sparsifier = WeightNormSparsifier()
        sparsifier.prepare(model, config=None)
        scheduler = ImplementedScheduler(sparsifier)

        # Sparsifier step is not called
        with self.assertWarns(UserWarning):
            scheduler.step()

        # Correct order has no warnings
        # Note: This will trigger if other warnings are present.
        with warnings.catch_warnings(record=True) as w:
            sparsifier.step()
            scheduler.step()
            # Make sure there is no warning related to the base_scheduler
            for warning in w:
                fname = warning.filename
                fname = '/'.join(fname.split('/')[-5:])
                assert fname != 'torch/ao/sparsity/scheduler/base_scheduler.py'