예제 #1
0
def main():
    log.basicConfig(format='[ %(levelname)s ] %(message)s',
                    level=log.INFO,
                    stream=sys.stdout)

    # --------------------------- Step 1. Initialize OpenVINO Runtime Core --------------------------------------------
    core = Core()

    # --------------------------- Step 2. Get metrics of available devices --------------------------------------------
    log.info('Available devices:')
    for device in core.available_devices:
        log.info(f'{device} :')
        log.info('\tSUPPORTED_PROPERTIES:')
        for property_key in core.get_property(device, 'SUPPORTED_PROPERTIES'):
            if property_key not in ('SUPPORTED_METRICS',
                                    'SUPPORTED_CONFIG_KEYS',
                                    'SUPPORTED_PROPERTIES'):
                try:
                    property_val = core.get_property(device, property_key)
                except TypeError:
                    property_val = 'UNSUPPORTED TYPE'
                log.info(
                    f'\t\t{property_key}: {param_to_string(property_val)}')
        log.info('')

    # -----------------------------------------------------------------------------------------------------------------
    return 0
예제 #2
0
def test_get_property_tuple_of_three_ints():
    ie = Core()
    param = ie.get_property("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS")
    assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_ASYNC_INFER_REQUESTS' " \
                                     f"metric must be tuple but {type(param)} is returned"
    assert all(isinstance(v, int) for v in param), "Not all of the parameter values for " \
                                                   "'RANGE_FOR_ASYNC_INFER_REQUESTS' metric are integers!"
예제 #3
0
def test_get_property_list_of_str():
    ie = Core()
    param = ie.get_property("CPU", "OPTIMIZATION_CAPABILITIES")
    assert isinstance(param, list), "Parameter value for 'OPTIMIZATION_CAPABILITIES' " \
                                    f"metric must be a list but {type(param)} is returned"
    assert all(isinstance(v, str) for v in param), \
        "Not all of the parameter values for 'OPTIMIZATION_CAPABILITIES' metric are strings!"
예제 #4
0
def test_get_property(device):
    core = Core()
    if core.get_property(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
        pytest.skip("Can't run on ARM plugin due-to CPU dependent test")
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    config = exec_net.get_property("PERF_COUNT")
    assert config == "NO"
예제 #5
0
def test_query_state_write_buffer(device, input_shape, data_type, mode):
    core = Core()
    if device == "CPU":
        if core.get_property(device,
                             "FULL_DEVICE_NAME") == "arm_compute::NEON":
            pytest.skip("Can't run on ARM plugin")

    from openvino.runtime import Tensor
    from openvino.runtime.utils.types import get_dtype

    model = create_model_with_memory(input_shape, data_type)
    compiled = core.compile_model(model=model, device_name=device)
    request = compiled.create_infer_request()
    mem_states = request.query_state()
    mem_state = mem_states[0]

    assert mem_state.name == "var_id_667"
    # todo: Uncomment after fix 45611,
    #  CPU plugin returns outputs and memory state in FP32 in case of FP16 original precision
    # assert mem_state.state.tensor_desc.precision == data_type

    for i in range(1, 10):
        if mode == "set_init_memory_state":
            # create initial value
            const_init = 5
            init_array = np.full(input_shape,
                                 const_init,
                                 dtype=get_dtype(mem_state.state.element_type))
            tensor = Tensor(init_array)
            mem_state.state = tensor

            res = request.infer({0: np.full(input_shape, 1, dtype=data_type)})
            expected_res = np.full(input_shape,
                                   1 + const_init,
                                   dtype=data_type)
        elif mode == "reset_memory_state":
            # reset initial state of ReadValue to zero
            mem_state.reset()
            res = request.infer({0: np.full(input_shape, 1, dtype=data_type)})
            # always ones
            expected_res = np.full(input_shape, 1, dtype=data_type)
        else:
            res = request.infer({0: np.full(input_shape, 1, dtype=data_type)})
            expected_res = np.full(input_shape, i, dtype=data_type)

        assert np.allclose(res[list(res)[0]], expected_res, atol=1e-6), \
            "Expected values: {} \n Actual values: {} \n".format(expected_res, res)
# ! [core_compile_model]
compiled_model = core.compile_model(model=model, device_name="MULTI", config=
    {
        "MULTI_DEVICE_PRIORITIES": "GPU,CPU",
        "PERFORMANCE_HINT": "THROUGHPUT",
        "INFERENCE_PRECISION_HINT": "f32"
    })
# ! [core_compile_model]

# ! [compiled_model_set_property]
# turn CPU off for multi-device execution
compiled_model.set_property(properties={"MULTI_DEVICE_PRIORITIES": "GPU"})
# ! [compiled_model_set_property]

# ! [core_get_rw_property]
num_streams = core.get_property("CPU", "NUM_STREAMS")
# ! [core_get_rw_property]

# ! [core_get_ro_property]
full_device_name = core.get_property("CPU", "FULL_DEVICE_NAME")
# ! [core_get_ro_property]

# ! [compiled_model_get_rw_property]
perf_mode = compiled_model.get_property("PERFORMANCE_HINT")
# ! [compiled_model_get_rw_property]

# ! [compiled_model_get_ro_property]
nireq = compiled_model.get_property("OPTIMAL_NUMBER_OF_INFER_REQUESTS")
# ! [compiled_model_get_ro_property]

예제 #7
0
def test_get_property_str():
    ie = Core()
    param = ie.get_property("CPU", "FULL_DEVICE_NAME")
    assert isinstance(param, str), "Parameter value for 'FULL_DEVICE_NAME' " \
                                   f"metric must be string but {type(param)} is returned"
예제 #8
0
def test_get_property():
    ie = Core()
    conf = ie.get_property("CPU", "CPU_BIND_THREAD")
    assert conf == "YES"
예제 #9
0
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from openvino.runtime import Core

#! [part0]
core = Core()
cpu_optimization_capabilities = core.get_property("CPU",
                                                  "OPTIMIZATION_CAPABILITIES")
#! [part0]

# TODO: enable part1 when property api will be supported in python
#! [part1]
core = Core()
model = core.read_model("model.xml")
compiled_model = core.compile_model(model, "CPU")
inference_precision = core.get_property("CPU", "INFERENCE_PRECISION_HINT")
#! [part1]

#! [part2]
core = Core()
core.set_property("CPU", {"INFERENCE_PRECISION_HINT": "f32"})
#! [part2]
예제 #10
0
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

from openvino.runtime import Core

# [get_available_devices]
core = Core()
available_devices = core.available_devices
# [get_available_devices]

# [hetero_priorities]
device_priorites = core.get_property("HETERO", "MULTI_DEVICE_PRIORITIES")
# [hetero_priorities]

# [cpu_device_name]
cpu_device_name = core.get_property("CPU", "FULL_DEVICE_NAME")
# [cpu_device_name]

model = core.read_model(model="sample.xml")
# [compile_model_with_property]
config = {"PERFORMANCE_HINT": "THROUGHPUT", "INFERENCE_PRECISION_HINT": "f32"}
compiled_model = core.compile_model(model, "CPU", config)
# [compile_model_with_property]

# [optimal_number_of_infer_requests]
compiled_model = core.compile_model(model, "CPU")
nireq = compiled_model.get_property("OPTIMAL_NUMBER_OF_INFER_REQUESTS")
# [optimal_number_of_infer_requests]

# [core_set_property_then_compile]
예제 #11
0
device_name = 'GNA'
xml_path = '/tmp/myModel.xml'
# ! [ov:caching:part0]
core = Core()
core.set_property({'CACHE_DIR': '/path/to/cache/dir'})
model = core.read_model(model=xml_path)
compiled_model = core.compile_model(model=model, device_name=device_name)
# ! [ov:caching:part0]

assert compiled_model

# ! [ov:caching:part1]
core = Core()
compiled_model = core.compile_model(model_path=xml_path, device_name=device_name)
# ! [ov:caching:part1]

assert compiled_model

# ! [ov:caching:part2]
core = Core()
core.set_property({'CACHE_DIR': '/path/to/cache/dir'})
compiled_model = core.compile_model(model_path=xml_path, device_name=device_name)
# ! [ov:caching:part2]

assert compiled_model

# ! [ov:caching:part3]
# Find 'EXPORT_IMPORT' capability in supported capabilities
caching_supported = 'EXPORT_IMPORT' in core.get_property(device_name, 'OPTIMIZATION_CAPABILITIES')
# ! [ov:caching:part3]
예제 #12
0
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

from openvino.runtime import Core

# [get_available_devices]
core = Core()
available_devices = core.available_devices
# [get_available_devices]

# [cpu_device_name]
cpu_device_name = core.get_property("CPU", "FULL_DEVICE_NAME")
# [cpu_device_name]

model = core.read_model(model="sample.xml")
# [compile_model_with_property]
config = {"PERFORMANCE_HINT": "THROUGHPUT", "INFERENCE_PRECISION_HINT": "f32"}
compiled_model = core.compile_model(model, "CPU", config)
# [compile_model_with_property]

# [optimal_number_of_infer_requests]
compiled_model = core.compile_model(model, "CPU")
nireq = compiled_model.get_property("OPTIMAL_NUMBER_OF_INFER_REQUESTS")
# [optimal_number_of_infer_requests]

# [core_set_property_then_compile]
# latency hint is a default for CPU
core.set_property("CPU", {"PERFORMANCE_HINT": "LATENCY"})
# compiled with latency configuration hint
compiled_model_latency = core.compile_model(model, "CPU")
예제 #13
0
def test_properties_core(properties_to_set):
    core = Core()
    core.set_property(properties_to_set)

    # RW properties
    assert core.get_property("CPU", properties.enable_profiling()) is True
    assert core.get_property("CPU", properties.cache_dir()) == "./"
    assert core.get_property("CPU", properties.inference_num_threads()) == 9
    assert core.get_property("CPU", properties.affinity()) == properties.Affinity.NONE
    assert core.get_property("CPU", properties.hint.inference_precision()) == Type.f32
    assert core.get_property("CPU", properties.hint.performance_mode()) == properties.hint.PerformanceMode.LATENCY
    assert core.get_property("CPU", properties.hint.num_requests()) == 12
    assert core.get_property("CPU", properties.streams.num()) == 5

    # RO properties
    assert type(core.get_property("CPU", properties.supported_properties())) == dict
    assert type(core.get_property("CPU", properties.available_devices())) == list
    assert type(core.get_property("CPU", properties.optimal_number_of_infer_requests())) == int
    assert type(core.get_property("CPU", properties.range_for_streams())) == tuple
    assert type(core.get_property("CPU", properties.range_for_async_infer_requests())) == tuple
    assert type(core.get_property("CPU", properties.device.full_name())) == str
    assert type(core.get_property("CPU", properties.device.capabilities())) == list
예제 #14
0
def test_single_property_setting():
    core = Core()
    core.set_property("CPU", properties.streams.num(properties.streams.Num.AUTO))

    assert properties.streams.Num.AUTO.to_integer() == -1
    assert type(core.get_property("CPU", properties.streams.num())) == int