def test_step(self): model = Model() sparsifier = WeightNormSparsifier(sparsity_level=0.5) sparsifier.prepare(model, config=[{'tensor_fqn': 'linear.weight'}]) for g in sparsifier.groups: # Before step module = g['module'] assert (1.0 - module.parametrizations['weight'][0].mask.mean() ) == 0 # checking sparsity level is 0 sparsifier.enable_mask_update = True sparsifier.step() self.assertAlmostEqual( model.linear.parametrizations['weight'][0].mask.mean().item(), 0.5, places=2) for g in sparsifier.groups: # After step module = g['module'] assert (1.0 - module.parametrizations['weight'][0].mask.mean() ) > 0 # checking sparsity level has increased # Test if the mask collapses to all zeros if the weights are randomized iters_before_collapse = 1000 for _ in range(iters_before_collapse): model.linear.weight.data = torch.randn(model.linear.weight.shape) sparsifier.step() for g in sparsifier.groups: # After step module = g['module'] assert (1.0 - module.parametrizations['weight'][0].mask.mean() ) > 0 # checking sparsity level did not collapse
def test_step(self): model = Model() sparsifier = WeightNormSparsifier(sparsity_level=0.5) sparsifier.prepare(model, config=[model.linear]) sparsifier.enable_mask_update = True sparsifier.step() self.assertAlmostEqual( model.linear.parametrizations['weight'][0].mask.mean().item(), 0.5, places=2)
def test_step(self): model = Model() sparsifier = WeightNormSparsifier(sparsity_level=0.5) sparsifier.prepare(model, config=[model.linear]) for g in sparsifier.module_groups: # Before step module = g['module'] assert (1.0 - module.parametrizations['weight'][0].mask.mean()) == 0 # checking sparsity level is 0 sparsifier.enable_mask_update = True sparsifier.step() self.assertAlmostEqual(model.linear.parametrizations['weight'][0].mask.mean().item(), 0.5, places=2) for g in sparsifier.module_groups: # After step module = g['module'] assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level has increased