Exemple #1
0
def test_ngraph_preprocess_set_from_tensor():
    shape = [1, 224, 224, 3]
    inp_shape = [1, 480, 640, 3]
    parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
    parameter_a.set_layout(ov.Layout("NHWC"))
    model = parameter_a
    function = Model(model, [parameter_a], "TestFunction")

    input_data = ov.Tensor(Type.i32, inp_shape)
    p = PrePostProcessor(function)
    inp = p.input()
    inp.tensor().set_from(input_data)
    inp.preprocess().resize(ResizeAlgorithm.RESIZE_LINEAR)
    function = p.build()
    assert function.input().shape == ov.Shape(inp_shape)
    assert function.input().element_type == Type.i32
    assert function.output().shape == ov.Shape(shape)
    assert function.output().element_type == Type.f32
Exemple #2
0
    pass

if model.output(0).partial_shape[1].is_dynamic():
    # 1-st dimension of output is dynamic
    pass
#! [detect_dynamic]

executable = core.compile_model(model)
infer_request = executable.create_infer_request()

#! [set_input_tensor]
# The first inference call

# Create tensor compatible to the model input
# Shape {1, 128} is compatible with any reshape statements made in previous examples
input_tensor1 = ov.Tensor(model.input().element_type, [1, 128])
# ... write values to input_tensor_1

# Set the tensor as an input for the infer request
infer_request.set_input_tensor(input_tensor1)

# Do the inference
infer_request.infer()

# Or pass a tensor in infer to set the tensor as a model input and make the inference
infer_request.infer([input_tensor1])

# Or pass the numpy array to set inputs of the infer request
input_data = np.ones(shape=[1, 128])
infer_request.infer([input_data])
Exemple #3
0
input_a = ov.opset8.parameter([8])
res = ov.opset8.absolute(input_a)
model = ov.Model(res, [input_a])
compiled = core.compile_model(model, "CPU")

print(model.inputs)
print(model.outputs)

print(compiled.inputs)
print(compiled.outputs)
#! [properties_example]

#! [tensor_basics]
data_float64 = np.ones(shape=(2, 8))

tensor = ov.Tensor(data_float64)
assert tensor.element_type == ov.Type.f64

data_int32 = np.ones(shape=(2, 8), dtype=np.int32)

tensor = ov.Tensor(data_int32)
assert tensor.element_type == ov.Type.i32
#! [tensor_basics]

#! [tensor_shared_mode]
data_to_share = np.ones(shape=(2, 8))

shared_tensor = ov.Tensor(data_to_share, shared_memory=True)

# Editing of the numpy array affects Tensor's data
data_to_share[0][2] = 6.0
input_a = ov.opset8.parameter([8])
res = ov.opset8.absolute(input_a)
model = ov.Model(res, [input_a])
compiled = core.compile_model(model, "CPU")

print(model.inputs)
print(model.outputs)

print(compiled.inputs)
print(compiled.outputs)
#! [properties_example]

#! [tensor_basics]
data_float64 = np.ones(shape=(2, 8))

tensor = ov.Tensor(data_float64)
assert tensor.element_type == ov.Type.f64

data_int32 = np.ones(shape=(2, 8), dtype=np.int32)

tensor = ov.Tensor(data_int32)
assert tensor.element_type == ov.Type.i32
#! [tensor_basics]

#! [tensor_shared_mode]
data_to_share = np.ones(shape=(2, 8))

shared_tensor = ov.Tensor(data_to_share, shared_memory=True)

# Editing of the numpy array affects Tensor's data
data_to_share[0][2] = 6.0
Exemple #5
0
    res = ov.opset8.result(data)
    return ov.Model([res], [data], "model")


model = create_model()
compiled_model = core.compile_model(model, "AUTO")
#! [part2_4]

#! [part3]
infer_request = compiled_model.create_infer_request()
#! [part3]

memory = np.array([1, 2, 3, 4])
#! [part4]
# Create tensor from external memory
input_tensor = ov.Tensor(array=memory, shared_memory=True)
# Set input tensor for model with one input
infer_request.set_input_tensor(input_tensor)
#! [part4]

#! [part5]
infer_request.start_async()
infer_request.wait()
#! [part5]

#! [part6]
# Get output tensor for model with one output
output = infer_request.get_output_tensor()
output_buffer = output.data
# output_buffer[] - accessing output tensor data
#! [part6]
Exemple #6
0
output_tensor = infer_request.get_output_tensor()
#! [get_set_one_tensor]

#! [get_set_index_tensor]
input_tensor = infer_request.get_input_tensor(0)
output_tensor = infer_request.get_output_tensor(1)
#! [get_set_index_tensor]

#! [get_set_name_tensor]
input_tensor = infer_request.get_tensor("input_name")
output_tensor = infer_request.get_tensor("output_name")
#! [get_set_name_tensor]

#! [get_set_tensor]
tensor1 = infer_request.get_tensor("tensor_name1")
tensor2 = ov.Tensor()
infer_request.set_tensor("tensor_name2", tensor2)
#! [get_set_tensor]

#! [get_set_tensor_by_port]
input_port = model.input(0)
output_port = model.input("tensor_name")
input_tensor = ov.Tensor()
infer_request.set_tensor(input_port, input_tensor)
output_tensor = infer_request.get_tensor(output_port)
#! [get_set_tensor_by_port]

infer_request1 = compiled_model.create_infer_request()
infer_request2 = compiled_model.create_infer_request()

#! [cascade_models]
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import openvino.runtime as ov

#! [dynamic_batch]
core = ov.Core()

C = 3
H = 224
W = 224

model = core.read_model("model.xml")
model.reshape([(1, 10), C, H, W])

# compile model and create infer request
compiled_model = core.compile_model(model, "GPU")
infer_request = compiled_model.create_infer_request()

# create input tensor with specific batch size
input_tensor = ov.Tensor(model.input().element_type, [2, C, H, W])

# ...

infer_request.infer([input_tensor])

#! [dynamic_batch]