Exemplo n.º 1
0
def test_staged_scheduler_with_empty_quantization():
    config = get_squeezenet_quantization_config()
    config['compression'].update({
        'params': {
            "activations_quant_start_epoch": 1,
            "weights_quant_start_epoch": 2,
        }
    })
    model = squeezenet1_1_custom(num_classes=10, pretrained=False, dropout=0)

    model, algo = create_compressed_model_and_algo_for_test(model, config)
    scheduler = algo.scheduler
    for module in algo.all_quantizations.values():
        assert not module.is_enabled_quantization()

    scheduler.epoch_step()
    for module in algo.all_quantizations.values():
        if module.is_weights:
            assert not module.is_enabled_quantization()
        else:
            assert module.is_enabled_quantization()

    scheduler.epoch_step()
    for module in algo.all_quantizations.values():
        assert module.is_enabled_quantization()
Exemplo n.º 2
0
def test_hawq_on_single_conv_without_quantizers(_seed, dataset_dir, tmp_path,
                                                params: HAWQTestParams):
    config = get_squeezenet_quantization_config(batch_size=params.batch_size)
    iter_number = params.iter_number
    tolerance = 4e-4

    model = squeezenet1_1_custom(num_classes=10, pretrained=False, dropout=0)
    from examples.common.models.classification.squeezenet import model_urls
    load_state(model, model_zoo.load_url(model_urls['squeezenet1_1']))
    model = model.cuda()

    criterion = nn.CrossEntropyLoss().cuda()

    if not dataset_dir:
        dataset_dir = str(tmp_path)
    data_loader, _ = create_test_dataloaders(config.get("model_size"),
                                             dataset_dir, params.batch_size)
    device = next(model.parameters()).device

    for _, param in model.named_parameters():
        param.requires_grad = False
    first_conv = next(iter(get_all_modules_by_type(model, 'Conv2d').values()))
    first_conv.weight.requires_grad = True

    trace_estimator = HessianTraceEstimator(model, criterion, device,
                                            data_loader,
                                            params.num_data_points)
    actual_state = trace_estimator.get_average_traces(max_iter=iter_number,
                                                      tolerance=tolerance)
    assert math.isclose(actual_state.item(), params.ref_trace, rel_tol=1e-09)
Exemplo n.º 3
0
def test_staged_scheduler_with_hawq():
    config = get_squeezenet_quantization_config()
    config['compression'].update({
        'params': {
            "activations_quant_start_epoch": 1,
            "weights_quant_start_epoch": 2,
        },
        'initializer': {
            'range': {
                'num_init_samples': 1
            },
            'precision': {
                "type": "hawq",
                "num_data_points": 1,
                "iter_number": 1,
                "tolerance": 1
            }
        }
    })
    num_classes = 10
    model = squeezenet1_1(num_classes=num_classes, dropout=0)

    input_infos_list = create_input_infos(config)
    input_sample_size = input_infos_list[0].shape
    data_loader = DataLoader(
        HawqDatasetMock(input_sample_size[1:], num_classes),
        batch_size=1,
        num_workers=0,  # Workaround for PyTorch MultiprocessingDataLoader issues
        shuffle=False)
    criterion = nn.CrossEntropyLoss().cuda()
    config = register_default_init_args(config, data_loader, criterion)

    model, algo = create_compressed_model_and_algo_for_test(model, config)
    scheduler = algo.scheduler

    for module in algo.all_quantizations.values():
        assert not module.is_enabled_quantization()

    scheduler.epoch_step()
    for module in algo.all_quantizations.values():
        assert not module.is_enabled_quantization()

    scheduler.epoch_step()
    for module in algo.all_quantizations.values():
        if module.is_weights:
            assert not module.is_enabled_quantization()
        else:
            assert module.is_enabled_quantization()

    scheduler.epoch_step()
    for module in algo.all_quantizations.values():
        assert module.is_enabled_quantization()
Exemplo n.º 4
0
def test_staged_scheduler_with_range_init():
    config = get_squeezenet_quantization_config()
    config['compression'].update({
        'params': {
            "activations_quant_start_epoch": 1,
            "weights_quant_start_epoch": 2,
        },
        'initializer': {
            'range': {
                'num_init_samples': 1
            }
        }
    })
    model = squeezenet1_1(num_classes=10, dropout=0)

    input_infos_list = create_input_infos(config)
    input_sample_size = input_infos_list[0].shape
    data_loader = DataLoader(
        OnesDatasetMock(input_sample_size[1:]),
        batch_size=1,
        num_workers=0,  # Workaround for PyTorch MultiprocessingDataLoader issues
        shuffle=False)
    config.register_extra_structs([QuantizationRangeInitArgs(data_loader)])

    model, algo = create_compressed_model_and_algo_for_test(model, config)
    scheduler = algo.scheduler

    for module in algo.all_quantizations.values():
        assert not module.is_enabled_quantization()

    scheduler.epoch_step()
    for module in algo.all_quantizations.values():
        assert not module.is_enabled_quantization()

    scheduler.epoch_step()
    for module in algo.all_quantizations.values():
        if module.is_weights:
            assert not module.is_enabled_quantization()
        else:
            assert module.is_enabled_quantization()

    scheduler.epoch_step()
    for module in algo.all_quantizations.values():
        assert module.is_enabled_quantization()
def create_hawq_test_config(batch_size, num_data_points):
    config = get_squeezenet_quantization_config()
    config['batch_size'] = batch_size
    config['compression'].update({
        'initializer': {
            'precision': {
                "type": "hawq",
                "bits": [
                    4,
                    8,
                    6,
                    7,
                    5
                ],
                "num_data_points": num_data_points,
                "iter_number": 1,
                "tolerance": 1e-2
            },
            'range': {
                'num_init_steps': 1
            }
        }})
    return config