예제 #1
0
def framework_info() -> FrameworkInfo:
    """
    Detect the information for the deepsparse framework such as package versions,
    availability for core actions such as training and inference,
    sparsification support, and inference provider support.

    :return: The framework info for deepsparse
    :rtype: FrameworkInfo
    """
    arch = {}

    if check_deepsparse_install(raise_on_error=False):
        from deepsparse.cpu import cpu_architecture

        arch = cpu_architecture()

    cpu_warnings = []
    if arch and arch.isa != "avx512":
        cpu_warnings.append(
            "AVX512 instruction set not detected, inference performance will be limited"
        )
    if arch and arch.isa != "avx512" and arch.isa != "avx2":
        cpu_warnings.append("AVX2 and AVX512 instruction sets not detected, "
                            "inference performance will be severely limited")
    if arch and not arch.vnni:
        cpu_warnings.append("VNNI instruction set not detected, "
                            "quantized inference performance will be limited")

    cpu_provider = FrameworkInferenceProviderInfo(
        name="cpu",
        description=
        ("Performant CPU provider within DeepSparse specializing in speedup of "
         "sparsified models using AVX and VNNI instruction sets"),
        device="cpu",
        supported_sparsification=SparsificationInfo(
        ),  # TODO: fill in when available
        available=check_deepsparse_install(raise_on_error=False),
        properties={
            "cpu_architecture": arch,
        },
        warnings=cpu_warnings,
    )

    return FrameworkInfo(
        framework=Framework.deepsparse,
        package_versions={
            "deepsparse":
            get_version(package_name="deepsparse", raise_on_error=False),
            "sparsezoo":
            get_version(package_name="sparsezoo", raise_on_error=False),
            "sparseml":
            get_version(package_name="sparseml", raise_on_error=False),
        },
        sparsification=sparsification_info(),
        inference_providers=[cpu_provider],
        training_available=False,
        sparsification_available=False,
        exporting_onnx_available=False,
        inference_available=True,
    )
예제 #2
0
def test_get_version():
    version = get_version("sparseml", raise_on_error=True)
    assert version == __version__

    with pytest.raises(ImportError):
        get_version("unknown", raise_on_error=True)

    assert not get_version("unknown", raise_on_error=False)
예제 #3
0
def test_get_version():
    version = get_version("sparseml",
                          raise_on_error=True,
                          alternate_package_names=["sparseml-nightly"])
    assert version == __version__

    with pytest.raises(ImportError):
        get_version("unknown", raise_on_error=True)

    assert not get_version("unknown", raise_on_error=False)
예제 #4
0
파일: info.py 프로젝트: kevinaer/sparseml
def framework_info() -> FrameworkInfo:
    """
    Detect the information for the onnx/onnxruntime framework such as package versions,
    availability for core actions such as training and inference,
    sparsification support, and inference provider support.

    :return: The framework info for onnx/onnxruntime
    :rtype: FrameworkInfo
    """
    cpu_provider = FrameworkInferenceProviderInfo(
        name="cpu",
        description="Base CPU provider within PyTorch",
        device="cpu",
        supported_sparsification=SparsificationInfo(
        ),  # TODO: fill in when available
        available=check_torch_install(raise_on_error=False),
        properties={},
        warnings=[],
    )
    gpu_provider = FrameworkInferenceProviderInfo(
        name="cuda",
        description="Base GPU CUDA provider within PyTorch",
        device="gpu",
        supported_sparsification=SparsificationInfo(
        ),  # TODO: fill in when available
        available=(check_torch_install(raise_on_error=False)
                   and torch.cuda.is_available()),
        properties={},
        warnings=[],
    )

    return FrameworkInfo(
        framework=Framework.pytorch,
        package_versions={
            "torch":
            get_version(package_name="torch", raise_on_error=False),
            "torchvision": (get_version(package_name="torchvision",
                                        raise_on_error=False)),
            "onnx":
            get_version(package_name="onnx", raise_on_error=False),
            "sparsezoo":
            get_version(package_name="sparsezoo", raise_on_error=False),
            "sparseml":
            get_version(package_name="sparseml", raise_on_error=False),
        },
        sparsification=sparsification_info(),
        inference_providers=[cpu_provider, gpu_provider],
        properties={},
        training_available=True,
        sparsification_available=True,
        exporting_onnx_available=True,
        inference_available=True,
    )
예제 #5
0
def framework_info() -> FrameworkInfo:
    """
    Detect the information for the tensorflow framework such as package versions,
    availability for core actions such as training and inference,
    sparsification support, and inference provider support.

    :return: The framework info for tensorflow
    :rtype: FrameworkInfo
    """
    cpu_provider = FrameworkInferenceProviderInfo(
        name="cpu",
        description="Base CPU provider within TensorFlow",
        device="cpu",
        supported_sparsification=SparsificationInfo(),  # TODO: fill in when available
        available=check_tensorflow_install(raise_on_error=False),
        properties={},
        warnings=[],
    )
    gpu_provider = FrameworkInferenceProviderInfo(
        name="cuda",
        description="Base GPU CUDA provider within TensorFlow",
        device="gpu",
        supported_sparsification=SparsificationInfo(),  # TODO: fill in when available
        available=(
            check_tensorflow_install(raise_on_error=False)
            and get_version("tensorflow_gpu", raise_on_error=False) is not None
            and tf_compat.test.is_gpu_available()
        ),
        properties={},
        warnings=[],
    )

    return FrameworkInfo(
        framework=Framework.tensorflow_v1,
        package_versions={
            "tensorflow": (
                get_version(package_name="tensorflow", raise_on_error=False)
                or get_version(package_name="tensorflow_gpu", raise_on_error=False)
            ),
            "onnx": get_version(package_name="onnx", raise_on_error=False),
            "tf2onnx": get_version(package_name="tf2onnx", raise_on_error=False),
            "sparsezoo": get_version(
                package_name="sparsezoo",
                raise_on_error=False,
                alternate_package_names=["sparsezoo-nightly"],
            ),
            "sparseml": get_version(
                package_name="sparseml",
                raise_on_error=False,
                alternate_package_names=["sparseml-nightly"],
            ),
        },
        sparsification=sparsification_info(),
        inference_providers=[cpu_provider, gpu_provider],
        properties={},
        training_available=True,
        sparsification_available=True,
        exporting_onnx_available=True,
        inference_available=True,
    )
예제 #6
0
def framework_info() -> FrameworkInfo:
    """
    Detect the information for the keras framework such as package versions,
    availability for core actions such as training and inference,
    sparsification support, and inference provider support.

    :return: The framework info for keras
    :rtype: FrameworkInfo
    """
    cpu_provider = FrameworkInferenceProviderInfo(
        name="cpu",
        description="Base CPU provider within Keras",
        device="cpu",
        supported_sparsification=SparsificationInfo(
        ),  # TODO: fill in when available
        available=check_keras_install(raise_on_error=False),
        properties={},
        warnings=[],
    )
    gpu_provider = FrameworkInferenceProviderInfo(
        name="cuda",
        description="Base GPU CUDA provider within Keras",
        device="gpu",
        supported_sparsification=SparsificationInfo(
        ),  # TODO: fill in when available
        available=(check_keras_install(raise_on_error=False)
                   and tensorflow.test.is_gpu_available()),
        properties={},
        warnings=[],
    )

    return FrameworkInfo(
        framework=Framework.keras,
        package_versions={
            "keras":
            (get_version(package_name="keras", raise_on_error=False)
             if is_native_keras else get_version(package_name="tensorflow",
                                                 raise_on_error=False)),
            "tensorflow":
            get_version(package_name="tensorflow", raise_on_error=False),
            "onnx":
            get_version(package_name="onnx", raise_on_error=False),
            "keras2onnx":
            get_version(package_name="keras2onnx", raise_on_error=False),
            "tf2onnx":
            get_version(package_name="tf2onnx", raise_on_error=False),
            "sparsezoo":
            get_version(package_name="sparsezoo", raise_on_error=False),
            "sparseml":
            get_version(package_name="sparseml", raise_on_error=False),
        },
        sparsification=sparsification_info(),
        inference_providers=[cpu_provider, gpu_provider],
        properties={
            "is_native_keras": is_native_keras,
        },
        training_available=True,
        sparsification_available=True,
        exporting_onnx_available=True,
        inference_available=True,
    )
예제 #7
0
def framework_info() -> FrameworkInfo:
    """
    Detect the information for the onnx/onnxruntime framework such as package versions,
    availability for core actions such as training and inference,
    sparsification support, and inference provider support.

    :return: The framework info for onnx/onnxruntime
    :rtype: FrameworkInfo
    """
    all_providers = []
    available_providers = []
    if check_onnxruntime_install(raise_on_error=False):
        from onnxruntime import get_all_providers, get_available_providers

        available_providers = get_available_providers()
        all_providers = get_all_providers()

    cpu_provider = FrameworkInferenceProviderInfo(
        name="cpu",
        description="Base CPU provider within ONNXRuntime",
        device="cpu",
        supported_sparsification=SparsificationInfo(),  # TODO: fill in when available
        available=(
            check_onnx_install(raise_on_error=False)
            and check_onnxruntime_install(raise_on_error=False)
            and "CPUExecutionProvider" in available_providers
        ),
        properties={},
        warnings=[],
    )
    gpu_provider = FrameworkInferenceProviderInfo(
        name="cuda",
        description="Base GPU CUDA provider within ONNXRuntime",
        device="gpu",
        supported_sparsification=SparsificationInfo(),  # TODO: fill in when available
        available=(
            check_onnx_install(raise_on_error=False)
            and check_onnxruntime_install(raise_on_error=False)
            and "CUDAExecutionProvider" in available_providers
        ),
        properties={},
        warnings=[],
    )

    return FrameworkInfo(
        framework=Framework.onnx,
        package_versions={
            "onnx": get_version(package_name="onnx", raise_on_error=False),
            "onnxruntime": (
                get_version(package_name="onnxruntime", raise_on_error=False)
            ),
            "sparsezoo": get_version(
                package_name="sparsezoo",
                raise_on_error=False,
                alternate_package_names=["sparsezoo-nightly"],
            ),
            "sparseml": get_version(
                package_name="sparseml",
                raise_on_error=False,
                alternate_package_names=["sparseml-nightly"],
            ),
        },
        sparsification=sparsification_info(),
        inference_providers=[cpu_provider, gpu_provider],
        properties={
            "available_providers": available_providers,
            "all_providers": all_providers,
        },
        training_available=False,
        sparsification_available=True,
        exporting_onnx_available=True,
        inference_available=True,
    )