Exemple #1
0
def get_basic_sparsity_config(model_size=4,
                              input_sample_size=(1, 1, 4, 4),
                              sparsity_init=0.02,
                              sparsity_target=0.5,
                              sparsity_steps=2,
                              sparsity_training_steps=3):
    config = Config()
    config.update({
        "model": "basic_sparse_conv",
        "model_size": model_size,
        "input_info": {
            "sample_size": input_sample_size,
        },
        "compression": {
            "algorithm": "rb_sparsity",
            "params": {
                "schedule": "polynomial",
                "sparsity_init": sparsity_init,
                "sparsity_target": sparsity_target,
                "sparsity_steps": sparsity_steps,
                "sparsity_training_steps": sparsity_training_steps
            },
            "layers": {
                "conv": {
                    "sparsify": True
                },
            }
        }
    })
    return config
def get_empty_config(model_size=4, input_sample_size=(1, 1, 4, 4)):
    config = Config()
    config.update({
        "model": "basic_sparse_conv",
        "model_size": model_size,
        "input_sample_size": input_sample_size,
    })
    return config
Exemple #3
0
 def __init__(self, binarization_algo, config=None):
     super().__init__()
     if config is None:
         config = Config()
     c = config['params']
     self.config = config
     self.algo = binarization_algo
     self.activations_bin_start_epoch = c.get('activations_bin_start_epoch',
                                              1)
     self.weights_bin_start_epoch = c.get('weights_bin_start_epoch', 1)
     self._set_binarization_status()
Exemple #4
0
def get_basic_magnitude_sparsity_config(input_sample_size=(1, 1, 4, 4)):
    config = Config()
    config.update({
        "model": "basic_sparse_conv",
        "input_sample_size": input_sample_size,
        "compression":
            {
                "algorithm": "magnitude_sparsity",
                "params": {}
            }
    })
    return config
Exemple #5
0
def ssd_vgg300():
    ssd_params = Config({
        "clip": False,
        "variance": [0.1, 0.1, 0.2, 0.2],
        "max_sizes": [60, 111, 162, 213, 264, 315],
        "min_sizes": [30, 60, 111, 162, 213, 264],
        "steps": [8, 16, 32, 64, 100, 300],
        "aspect_ratios": [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
        "flip": True
    })

    return SSD_VGG(ssd_params, 300, 21, True)
Exemple #6
0
def ssd_mobilenet():
    ssd_params = Config({
        "variance": [0.1, 0.1, 0.2, 0.2],
        "max_sizes": [60, 111, 162, 213, 264, 315],
        "min_sizes": [30, 60, 111, 162, 213, 264],
        "steps": [16, 32, 64, 100, 150, 300],
        "aspect_ratios": [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
        "clip": False,
        "flip": True,
        "top_k": 200
    })

    return MobileNetSSD(21, ssd_params)
Exemple #7
0
def get_basic_sparsity_plus_quantization_config(input_sample_size=(1, 1, 4,
                                                                   4)):
    config = Config()
    config.update({
        "input_sample_size":
        input_sample_size,
        "compression": [{
            "algorithm": "rb_sparsity",
        }, {
            "algorithm": "quantization",
        }]
    })
    return config
Exemple #8
0
def get_squeezenet_quantization_config(model_size=32):
    config = Config()
    config.update({
        "model": "squeezenet1_1_custom",
        "model_size": model_size,
        "input_sample_size": (3, 3, model_size, model_size),
        "compression": {
            "algorithm": "quantization",
            "initializer": {
                "num_init_steps": 0
            }
        }
    })
    return config
Exemple #9
0
def get_basic_quantization_config(model_size=4):
    config = Config()
    config.update({
        "model": "basic_quant_conv",
        "model_size": model_size,
        "input_sample_size": (1, 1, model_size, model_size),
        "compression": {
            "algorithm": "quantization",
            "initializer": {
                "num_init_steps": 0
            },
            "params": {}
        }
    })
    return config
Exemple #10
0
def get_basic_pruning_config(input_sample_size=(1, 1, 4, 4)):
    config = Config()
    config.update({
        "model": "pruning_conv_model",
        "input_info":
            {
                "sample_size": input_sample_size,
            },
        "compression":
            {
                "params": {
                }
            }
    })
    return config
    def __init__(self, pruning_algo, params: Config = None):
        super().__init__()
        if params is None:
            self._params = Config()
        else:
            self._params = params

        self.algo = pruning_algo

        # Number of initial steps of training before pruning
        self.num_init_steps = self._params.get('num_init_steps', 0)
        self.pruning_steps = self._params.get('pruning_steps', 100)

        # Pruning rates
        self.initial_pruning = self._params.get('pruning_init', 0)
        self.pruning_target = self._params.get('pruning_target', 0.5)
Exemple #12
0
def test_scheduler_can_do_epoch_step__with_rb_algo():
    config = Config()
    config['input_info'] = [{"sample_size": [1, 1, 32, 32]}]
    config['compression']['algorithm'] = 'rb_sparsity'

    config['compression']["params"] = {
        'schedule': 'polynomial',
        'power': 1,
        'sparsity_steps': 2,
        'sparsity_init': 0.2,
        'sparsity_target': 0.6,
        'sparsity_training_steps': 4
    }

    _, compression_ctrl = create_compressed_model_and_algo_for_test(
        BasicConvTestModel(), config)
    scheduler = compression_ctrl.scheduler
    loss = compression_ctrl.loss

    assert pytest.approx(loss.target_sparsity_rate) == 0.2
    assert not loss.disabled

    for module_info in compression_ctrl.sparsified_module_info:
        assert module_info.operand.sparsify
    scheduler.epoch_step()
    assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.4
    assert pytest.approx(loss().item(), abs=1e-3) == 64
    assert not loss.disabled

    scheduler.epoch_step()
    assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.6
    assert pytest.approx(loss().item(), abs=1e-3) == 144
    assert not loss.disabled

    scheduler.epoch_step()
    assert not loss.disabled
    assert loss.target_sparsity_rate == 0.6
    assert loss().item() == 144

    scheduler.epoch_step()
    assert loss.disabled
    assert loss.target_sparsity_rate == 0.6
    assert loss() == 0
    for module_info in compression_ctrl.sparsified_module_info:
        assert not module_info.operand.sparsify
Exemple #13
0
def test_get_default_weight_decay(algo, ref_weight_decay):
    config = Config()
    config.update({"compression": {"algorithm": algo}})
    assert ref_weight_decay == get_default_weight_decay(config)