예제 #1
0
def test_core_class():
    input_shape = [1, 3, 4, 4]
    param = ov.parameter(input_shape, np.float32, name="parameter")
    relu = ov.relu(param, name="relu")
    func = Function([relu], [param], "test")
    func.get_ordered_ops()[2].friendly_name = "friendly"

    cnn_network = IENetwork(func)

    core = Core()
    core.set_config({}, device_name="CPU")
    executable_network = core.compile_model(cnn_network, "CPU", {})

    td = TensorDesc("FP32", input_shape, "NCHW")

    # from IPython import embed; embed()

    request = executable_network.create_infer_request()
    input_data = np.random.rand(*input_shape) - 0.5

    expected_output = np.maximum(0.0, input_data)

    input_blob = Blob(td, input_data)

    request.set_input({"parameter": input_blob})
    request.infer()

    result = request.get_blob("relu").buffer

    assert np.allclose(result, expected_output)
예제 #2
0
class Runtime(object):
    """Represents an nGraph runtime environment."""
    def __init__(self, backend_name: str) -> None:
        self.backend_name = backend_name
        log.debug("Creating Inference Engine for %s" % backend_name)
        self.backend = Core()
        assert backend_name in self.backend.available_devices, (
            'The requested device "' + backend_name + '" is not supported!')

    def set_config(self, config: Dict[str, str]) -> None:
        """Set the inference engine configuration."""
        self.backend.set_config(config, device_name=self.backend_name)

    def __repr__(self) -> str:
        return "<Runtime: Backend='{}'>".format(self.backend_name)

    def computation(self, node_or_function: Union[Node, Function],
                    *inputs: Node) -> "Computation":
        """Return a callable Computation object."""
        if isinstance(node_or_function, Node):
            ng_function = Function(node_or_function, inputs,
                                   node_or_function.name)
            return Computation(self, ng_function)
        elif isinstance(node_or_function, Function):
            return Computation(self, node_or_function)
        else:
            raise TypeError(
                "Runtime.computation must be called with an nGraph Function object "
                "or an nGraph node object an optionally Parameter node objects. "
                "Called with: %s",
                node_or_function,
            )
예제 #3
0
def test_infer_mixed_keys(device):
    core = Core()
    func = core.read_model(test_net_xml, test_net_bin)
    core.set_config({"PERF_COUNT": "YES"}, device)
    model = core.compile_model(func, device)

    img = read_image()
    tensor = Tensor(img)

    data2 = np.ones(shape=img.shape, dtype=np.float32)
    tensor2 = Tensor(data2)

    request = model.create_infer_request()
    res = request.infer({0: tensor2, "data": tensor})
    assert np.argmax(res) == 2
예제 #4
0
def test_get_profiling_info(device):
    core = Core()
    func = core.read_model(test_net_xml, test_net_bin)
    core.set_config({"PERF_COUNT": "YES"}, device)
    exec_net = core.compile_model(func, device)
    img = read_image()
    request = exec_net.create_infer_request()
    request.infer({0: img})
    assert request.latency > 0
    prof_info = request.get_profiling_info()
    soft_max_node = next(node for node in prof_info if node.node_name == "fc_out")
    assert soft_max_node.node_type == "Softmax"
    assert soft_max_node.status == ProfilingInfo.Status.OPTIMIZED_OUT
    assert isinstance(soft_max_node.real_time, datetime.timedelta)
    assert isinstance(soft_max_node.cpu_time, datetime.timedelta)
    assert isinstance(soft_max_node.exec_type, str)
예제 #5
0
def test_infer_mixed_keys(device):
    core = Core()
    func = core.read_model(test_net_xml, test_net_bin)
    core.set_config({"PERF_COUNT": "YES"}, device)
    exec_net = core.compile_model(func, device)

    img = read_image()
    tensor = Tensor(img)

    data2 = np.ones(shape=(1, 10), dtype=np.float32)
    tensor2 = Tensor(data2)

    request = exec_net.create_infer_request()
    with pytest.raises(TypeError) as e:
        request.infer({0: tensor, "fc_out": tensor2})
    assert "incompatible function arguments!" in str(e.value)
예제 #6
0
def test_get_perf_counts(device):
    ie_core = Core()
    net = ie_core.read_network(test_net_xml, test_net_bin)
    ie_core.set_config({"PERF_COUNT": "YES"}, device)
    exec_net = ie_core.load_network(net, device)
    img = read_image()
    request = exec_net.create_infer_request()
    td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
    input_blob = Blob(td, img)
    request.set_input({"data": input_blob})
    request.infer()
    pc = request.get_perf_counts()
    assert pc["29"]["status"] == "EXECUTED"
    assert pc["29"]["layer_type"] == "FullyConnected"
    del exec_net
    del ie_core
    del net
예제 #7
0
def test_set_batch_size(device):
    ie_core = Core()
    ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device)
    net = ie_core.read_network(test_net_xml, test_net_bin)
    net.batch_size = 10
    data = np.ones(shape=net.input_info["data"].input_data.shape)
    exec_net = ie_core.load_network(net, device)
    data[0] = read_image()[0]
    request = exec_net.create_infer_request()
    request.set_batch(1)
    td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
    input_blob = Blob(td, data)
    request.set_input({"data": input_blob})
    request.infer()
    assert np.allclose(int(round(request.output_blobs["fc_out"].buffer[0][2])), 1), \
        "Incorrect data for 1st batch"
    del exec_net
    del ie_core
    del net