コード例 #1
0
def test_hawq_init(hawq_config, tmp_path, mocker):
    args = {
        "--data": str(hawq_config.dataset_dir),
        "--config": str(hawq_config.config_path),
        "--log-dir": tmp_path,
        "--batch-size": hawq_config.batch_size,
        "--workers":
        0,  # Workaround for the PyTorch MultiProcessingDataLoader issue
    }

    command_line = " ".join(f'{key} {val}' for key, val in args.items())
    if hawq_config.sample_type == SampleType.CLASSIFICATION:
        import examples.classification.main as sample
        mocker.patch(
            "examples.classification.staged_quantization_worker.train_staged")
        mocker.patch("examples.classification.main.train")
    elif hawq_config.sample_type == SampleType.SEMANTIC_SEGMENTATION:
        import examples.semantic_segmentation.main as sample
        mocker.patch("examples.semantic_segmentation.main.train")
    elif hawq_config.sample_type == SampleType.OBJECT_DETECTION:
        import examples.object_detection.main as sample
        mocker.patch("examples.object_detection.main.train")

    from nncf.quantization.init_precision import HAWQPrecisionInitializer
    set_chosen_config_spy = mocker.spy(HAWQPrecisionInitializer,
                                       "set_chosen_config")

    sample.main(shlex.split(command_line))

    bitwidth_list = set_chosen_config_spy.call_args[0][1]
    assert len(bitwidth_list) == hawq_config.num_weights_to_init
    assert 4 in bitwidth_list
    assert 8 in bitwidth_list
コード例 #2
0
def test_precision_init(desc: TestCaseDescriptor, tmp_path, mocker):
    config_factory = ConfigFactory(desc.config, tmp_path / 'config.json')
    args = {
        "--data": str(desc.dataset_dir),
        "--config": config_factory.serialize(),
        "--log-dir": tmp_path,
        "--batch-size": desc.batch_size,
        "--workers":
        0,  # Workaround for the PyTorch MultiProcessingDataLoader issue
    }
    command_line = " ".join(f'{key} {val}' for key, val in args.items())
    # Need to mock SafeMLFLow to prevent starting a not closed mlflow session due to memory leak of config and
    # SafeMLFLow, which happens with a mocked train function
    if desc.sample_type == SampleType.CLASSIFICATION:
        import examples.classification.main as sample
        mocker.patch(
            "examples.classification.staged_quantization_worker.train_staged")
        mocker.patch("examples.classification.main.train")
        mocker.patch("examples.classification.main.SafeMLFLow")
        mocker.patch(
            "examples.classification.staged_quantization_worker.SafeMLFLow")
    elif desc.sample_type == SampleType.SEMANTIC_SEGMENTATION:
        import examples.semantic_segmentation.main as sample
        mocker.patch("examples.semantic_segmentation.main.train")
        mocker.patch("examples.semantic_segmentation.main.SafeMLFLow")
    elif desc.sample_type == SampleType.OBJECT_DETECTION:
        import examples.object_detection.main as sample
        mocker.patch("examples.object_detection.main.train")
        mocker.patch("examples.object_detection.main.SafeMLFLow")
    desc.setup_spy(mocker)

    sample.main(shlex.split(command_line))

    desc.validate_spy()
コード例 #3
0
def test_cpu_only_mode_produces_cpu_only_model(config, tmp_path, mocker):
    reset_context('orig')
    reset_context('quantized_graphs')
    c = config

    config_factory = ConfigFactory(config['config'], tmp_path / 'config.json')
    args = {
        "--data": c["dataset_path"],
        "--config": config_factory.serialize(),
        "--log-dir": tmp_path,
        "--batch-size": c["batch_size"] * torch.cuda.device_count(),
        "--workers": 1,
        "--epochs": 1,
        "--cpu-only": None
    }

    command_line = " ".join(key if val is None else "{} {}".format(key, val)
                            for key, val in args.items())

    if config["sample_type"] == "classification":
        import examples.classification.main as sample
        if is_binarization(config['config']):
            mocker.patch(
                "examples.classification.binarization_worker.train_epoch_bin")
            mocker.patch(
                "examples.classification.binarization_worker.validate")
            import examples.classification.binarization_worker as bin_worker
            bin_worker.validate.return_value = (0, 0)
        else:
            mocker.patch("examples.classification.main.train_epoch")
            mocker.patch("examples.classification.main.validate")
            sample.validate.return_value = (0, 0)
    elif config["sample_type"] == "semantic_segmentation":
        import examples.semantic_segmentation.main as sample
        import examples.semantic_segmentation.train
        mocker.spy(examples.semantic_segmentation.train.Train, "__init__")
    elif config["sample_type"] == "object_detection":
        import examples.object_detection.main as sample
        mocker.patch("examples.object_detection.main.train")

    sample.main(shlex.split(command_line))

    # pylint: disable=no-member
    if config["sample_type"] == "classification":
        if is_binarization(config['config']):
            import examples.classification.binarization_worker as bin_worker
            model_to_be_trained = bin_worker.train_epoch_bin.call_args[0][
                2]  # model
        else:
            model_to_be_trained = sample.train_epoch.call_args[0][1]  # model
    elif config["sample_type"] == "semantic_segmentation":
        model_to_be_trained = examples.semantic_segmentation.train.Train.__init__.call_args[
            0][1]  # model
    elif config["sample_type"] == "object_detection":
        model_to_be_trained = sample.train.call_args[0][0]  # net

    for p in model_to_be_trained.parameters():
        assert not p.is_cuda
コード例 #4
0
def test_cpu_only_mode_produces_cpu_only_model(config, tmp_path, mocker):
    config_factory = ConfigFactory(config['nncf_config'],
                                   tmp_path / 'config.json')
    args = {
        "--data": config["dataset_path"],
        "--config": config_factory.serialize(),
        "--log-dir": tmp_path,
        "--batch-size": config["batch_size"] * torch.cuda.device_count(),
        "--workers":
        0,  # Workaround for the PyTorch MultiProcessingDataLoader issue
        "--epochs": 1,
        "--cpu-only": None
    }

    # to prevent starting a not closed mlflow session due to memory leak of config and SafeMLFLow happens with a
    # mocked train function
    mocker.patch("examples.common.utils.SafeMLFLow")
    command_line = " ".join(key if val is None else "{} {}".format(key, val)
                            for key, val in args.items())
    if config["sample_type"] == "classification":
        import examples.classification.main as sample
        mocked_printing = mocker.patch(
            'examples.classification.main.print_statistics')
        if is_staged_quantization(config['nncf_config']):
            mocker.patch(
                "examples.classification.staged_quantization_worker.train_epoch_staged"
            )
            mocker.patch(
                "examples.classification.staged_quantization_worker.validate")
            import examples.classification.staged_quantization_worker as staged_worker
            mocked_printing = mocker.patch(
                'examples.classification.staged_quantization_worker.print_statistics'
            )
            staged_worker.validate.return_value = (0, 0)
        else:
            mocker.patch("examples.classification.main.train_epoch")
            mocker.patch("examples.classification.main.validate")
            sample.validate.return_value = (0, 0)
    elif config["sample_type"] == "semantic_segmentation":
        import examples.semantic_segmentation.main as sample
        mocked_printing = mocker.patch(
            'examples.semantic_segmentation.main.print_statistics')
        import examples.semantic_segmentation.train
        mocker.spy(examples.semantic_segmentation.train.Train, "__init__")
    elif config["sample_type"] == "object_detection":
        import examples.object_detection.main as sample
        mocker.spy(sample, "train")
        mocked_printing = mocker.patch(
            'examples.object_detection.main.print_statistics')

    sample.main(shlex.split(command_line))

    if not config["sample_type"] == "object_detection":
        assert mocked_printing.call_count == 2
    else:
        assert mocked_printing.call_count == 3

    # pylint: disable=no-member
    if config["sample_type"] == "classification":
        if is_staged_quantization(config['nncf_config']):
            import examples.classification.staged_quantization_worker as staged_worker
            model_to_be_trained = staged_worker.train_epoch_staged.call_args[
                0][2]  # model
        else:
            model_to_be_trained = sample.train_epoch.call_args[0][1]  # model
    elif config["sample_type"] == "semantic_segmentation":
        model_to_be_trained = examples.semantic_segmentation.train.Train.__init__.call_args[
            0][1]  # model
    elif config["sample_type"] == "object_detection":
        model_to_be_trained = sample.train.call_args[0][0]  # net

    for p in model_to_be_trained.parameters():
        assert not p.is_cuda