def main(): log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) # --------------------------- Step 1. Initialize OpenVINO Runtime Core -------------------------------------------- core = Core() # --------------------------- Step 2. Get metrics of available devices -------------------------------------------- log.info('Available devices:') for device in core.available_devices: log.info(f'{device} :') log.info('\tSUPPORTED_METRICS:') for metric in core.get_metric(device, 'SUPPORTED_METRICS'): if metric not in ('SUPPORTED_METRICS', 'SUPPORTED_CONFIG_KEYS'): try: metric_val = core.get_metric(device, metric) except TypeError: metric_val = 'UNSUPPORTED TYPE' log.info(f'\t\t{metric}: {param_to_string(metric_val)}') log.info('') log.info('\tSUPPORTED_CONFIG_KEYS (default values):') for config_key in core.get_metric(device, 'SUPPORTED_CONFIG_KEYS'): try: config_val = core.get_config(device, config_key) except TypeError: config_val = 'UNSUPPORTED TYPE' log.info(f'\t\t{config_key}: {param_to_string(config_val)}') log.info('') # ----------------------------------------------------------------------------------------------------------------- return 0
def test_get_metric_tuple_of_three_ints(): ie = Core() param = ie.get_metric("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS") assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_ASYNC_INFER_REQUESTS' " \ f"metric must be tuple but {type(param)} is returned" assert all(isinstance(v, int) for v in param), "Not all of the parameter values for " \ "'RANGE_FOR_ASYNC_INFER_REQUESTS' metric are integers!"
def test_get_metric_tuple_of_two_ints(): ie = Core() param = ie.get_metric("CPU", "RANGE_FOR_STREAMS") assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_STREAMS' " \ f"metric must be tuple but {type(param)} is returned" assert all(isinstance(v, int) for v in param), \ "Not all of the parameter values for 'RANGE_FOR_STREAMS' metric are integers!"
def test_get_metric_list_of_str(): ie = Core() param = ie.get_metric("CPU", "OPTIMIZATION_CAPABILITIES") assert isinstance(param, list), "Parameter value for 'OPTIMIZATION_CAPABILITIES' " \ f"metric must be a list but {type(param)} is returned" assert all(isinstance(v, str) for v in param), \ "Not all of the parameter values for 'OPTIMIZATION_CAPABILITIES' metric are strings!"
def test_get_config(device): core = Core() if core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON": pytest.skip("Can't run on ARM plugin due-to CPU dependent test") func = core.read_model(model=test_net_xml, weights=test_net_bin) exec_net = core.compile_model(func, device) config = exec_net.get_config("PERF_COUNT") assert config == "NO"
def test_query_state_write_buffer(device, input_shape, data_type, mode): core = Core() if device == "CPU": if core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON": pytest.skip("Can't run on ARM plugin") from openvino.runtime import Tensor from openvino.runtime.utils.types import get_dtype function = create_function_with_memory(input_shape, data_type) exec_net = core.compile_model(model=function, device_name=device) request = exec_net.create_infer_request() mem_states = request.query_state() mem_state = mem_states[0] assert mem_state.name == "var_id_667" # todo: Uncomment after fix 45611, # CPU plugin returns outputs and memory state in FP32 in case of FP16 original precision # assert mem_state.state.tensor_desc.precision == data_type for i in range(1, 10): if mode == "set_init_memory_state": # create initial value const_init = 5 init_array = np.full(input_shape, const_init, dtype=get_dtype(mem_state.state.element_type)) tensor = Tensor(init_array) mem_state.state = tensor res = request.infer({0: np.full(input_shape, 1, dtype=data_type)}) expected_res = np.full(input_shape, 1 + const_init, dtype=data_type) elif mode == "reset_memory_state": # reset initial state of ReadValue to zero mem_state.reset() res = request.infer({0: np.full(input_shape, 1, dtype=data_type)}) # always ones expected_res = np.full(input_shape, 1, dtype=data_type) else: res = request.infer({0: np.full(input_shape, 1, dtype=data_type)}) expected_res = np.full(input_shape, i, dtype=data_type) assert np.allclose(res[list(res)[0]], expected_res, atol=1e-6), \ "Expected values: {} \n Actual values: {} \n".format(expected_res, res)
def test_get_metric_str(): ie = Core() param = ie.get_metric("CPU", "FULL_DEVICE_NAME") assert isinstance(param, str), "Parameter value for 'FULL_DEVICE_NAME' " \ f"metric must be string but {type(param)} is returned"
# ! [compiled_model_get_rw_property] perf_mode = compiled_model.get_property("PERFORMANCE_HINT") # ! [compiled_model_get_rw_property] # ! [compiled_model_get_ro_property] nireq = compiled_model.get_property("OPTIMAL_NUMBER_OF_INFER_REQUESTS") # ! [compiled_model_get_ro_property] from openvino.inference_engine import IECore core = IECore() #! [core_get_metric] full_device_name = core.get_metric("CPU", "FULL_DEVICE_NAME") #! [core_get_metric] #! [core_get_config] num_streams = core.get_config("CPU", "CPU_THROUGHPUT_STREAMS") #! [core_get_config] #! [core_set_config] core.set_config({"PERF_COUNT": "YES"}, "CPU") #! [core_set_config] net = core.read_network("sample.xml") #! [core_load_network] exec_network = core.load_network(net, "MULTI", {"DEVICE_PRIORITIES": "CPU, GPU", "PERFORMANCE_HINT": "THROUGHPUT",