Exemplo n.º 1
0
def test_tflite_parser_with_optional_options_out_of_scope(shared_data_folder):
    parser = create_with_opt()
    network = parser.CreateNetworkFromBinaryFile(
        os.path.join(shared_data_folder, "mock_model.tflite"))

    graphs_count = parser.GetSubgraphCount()
    graph_id = graphs_count - 1

    input_names = parser.GetSubgraphInputTensorNames(graph_id)
    input_binding_info = parser.GetNetworkInputBindingInfo(
        graph_id, input_names[0])

    output_names = parser.GetSubgraphOutputTensorNames(graph_id)

    preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]

    options = ann.CreationOptions()
    runtime = ann.IRuntime(options)

    opt_network, messages = ann.Optimize(network, preferred_backends,
                                         runtime.GetDeviceSpec(),
                                         ann.OptimizerOptions())
    assert 0 == len(messages)

    net_id, messages = runtime.LoadNetwork(opt_network)
    assert "" == messages
Exemplo n.º 2
0
def test_optimize_owned_by_python(network_file, get_runtime):
    preferred_backends = get_runtime[0]
    network = get_runtime[1]
    runtime = get_runtime[2]

    opt_network, _ = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
    assert opt_network.thisown
Exemplo n.º 3
0
def mock_model_runtime(shared_data_folder):
    parser = ann.ITfLiteParser()
    network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite'))
    graph_id = 0

    input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, "input_1")

    input_tensor_data = np.load(os.path.join(shared_data_folder, 'tflite_parser/input_lite.npy'))

    preferred_backends = [ann.BackendId('CpuRef')]

    options = ann.CreationOptions()
    runtime = ann.IRuntime(options)

    opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())

    print(messages)

    net_id, messages = runtime.LoadNetwork(opt_network)

    print(messages)

    input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])

    output_names = parser.GetSubgraphOutputTensorNames(graph_id)
    outputs_binding_info = []

    for output_name in output_names:
        outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(graph_id, output_name))

    output_tensors = ann.make_output_tensors(outputs_binding_info)

    yield runtime, net_id, input_tensors, output_tensors
Exemplo n.º 4
0
def test_optimize_executes_successfully_for_neon_backend_only(network_file, get_runtime):
    preferred_backends = [ann.BackendId('CpuAcc')]
    network = get_runtime[1]
    runtime = get_runtime[2]

    opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
    assert 0 == len(messages)
    assert opt_network
Exemplo n.º 5
0
def test_optimize_executes_successfully(network_file, get_runtime):
    preferred_backends = [ann.BackendId('CpuRef')]
    network = get_runtime[1]
    runtime = get_runtime[2]

    opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())

    assert len(messages) == 0, 'With only CpuRef, there should be no warnings irrelevant of architecture.'
    assert opt_network
Exemplo n.º 6
0
def test_optimize_fails_for_no_backends_specified(network_file, get_runtime):
    empty_backends = []
    network = get_runtime[1]
    runtime = get_runtime[2]

    with pytest.raises(RuntimeError) as err:
        ann.Optimize(network, empty_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())

    expected_error_message = "Invoked Optimize with no backends specified"
    assert expected_error_message in str(err.value)
Exemplo n.º 7
0
def test_optimize_fails_for_invalid_backends(network_file, get_runtime):
    invalid_backends = [ann.BackendId('Unknown')]
    network = get_runtime[1]
    runtime = get_runtime[2]

    with pytest.raises(RuntimeError) as err:
        ann.Optimize(network, invalid_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())

    expected_error_message = "None of the preferred backends [Unknown ] are supported."
    assert expected_error_message in str(err.value)
Exemplo n.º 8
0
def test_python_disowns_network(random_runtime):
    preferred_backends = random_runtime[0]
    network = random_runtime[1]
    runtime = random_runtime[2]
    opt_network, _ = ann.Optimize(network, preferred_backends,
                                  runtime.GetDeviceSpec(), ann.OptimizerOptions())

    runtime.LoadNetwork(opt_network)

    assert not opt_network.thisown
Exemplo n.º 9
0
def test_caffe_parser_end_to_end(shared_data_folder):
    parser = ann.ICaffeParser = ann.ICaffeParser()

    # Load the network specifying the inputs and outputs
    input_name = "Placeholder"
    tensor_shape = {input_name: ann.TensorShape((1, 1, 28, 28))}
    requested_outputs = ["output"]

    network = parser.CreateNetworkFromBinaryFile(
        os.path.join(shared_data_folder, 'mock_model.caffemodel'),
        tensor_shape, requested_outputs)

    # Specify preferred backend
    preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]

    input_binding_info = parser.GetNetworkInputBindingInfo(input_name)

    options = ann.CreationOptions()
    runtime = ann.IRuntime(options)

    opt_network, messages = ann.Optimize(network, preferred_backends,
                                         runtime.GetDeviceSpec(),
                                         ann.OptimizerOptions())

    assert 0 == len(messages)

    net_id, messages = runtime.LoadNetwork(opt_network)

    assert "" == messages

    # Load test image data stored in input_caffe.npy
    input_tensor_data = np.load(
        os.path.join(shared_data_folder,
                     'caffe_parser/input_caffe.npy')).astype(np.float32)
    input_tensors = ann.make_input_tensors([input_binding_info],
                                           [input_tensor_data])

    # Load output binding info and
    outputs_binding_info = []
    for output_name in requested_outputs:
        outputs_binding_info.append(
            parser.GetNetworkOutputBindingInfo(output_name))
    output_tensors = ann.make_output_tensors(outputs_binding_info)

    runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)

    output_vectors = ann.workload_tensors_to_ndarray(output_tensors)

    # Load golden output file for result comparison.
    expected_output = np.load(
        os.path.join(shared_data_folder,
                     'caffe_parser/golden_output_caffe.npy'))

    # Check that output matches golden output to 4 decimal places (there are slight rounding differences after this)
    np.testing.assert_almost_equal(output_vectors[0], expected_output, 4)
Exemplo n.º 10
0
def test_tflite_parser_end_to_end(shared_data_folder):
    parser = ann.ITfLiteParser()

    network = parser.CreateNetworkFromBinaryFile(
        os.path.join(shared_data_folder, "mock_model.tflite"))

    graphs_count = parser.GetSubgraphCount()
    graph_id = graphs_count - 1

    input_names = parser.GetSubgraphInputTensorNames(graph_id)
    input_binding_info = parser.GetNetworkInputBindingInfo(
        graph_id, input_names[0])

    output_names = parser.GetSubgraphOutputTensorNames(graph_id)

    preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]

    options = ann.CreationOptions()
    runtime = ann.IRuntime(options)

    opt_network, messages = ann.Optimize(network, preferred_backends,
                                         runtime.GetDeviceSpec(),
                                         ann.OptimizerOptions())
    assert 0 == len(messages)

    net_id, messages = runtime.LoadNetwork(opt_network)
    assert "" == messages

    # Load test image data stored in input_lite.npy
    input_tensor_data = np.load(
        os.path.join(shared_data_folder, 'tflite_parser/input_lite.npy'))
    input_tensors = ann.make_input_tensors([input_binding_info],
                                           [input_tensor_data])

    output_tensors = []
    for index, output_name in enumerate(output_names):
        out_bind_info = parser.GetNetworkOutputBindingInfo(
            graph_id, output_name)
        out_tensor_info = out_bind_info[1]
        out_tensor_id = out_bind_info[0]
        output_tensors.append((out_tensor_id, ann.Tensor(out_tensor_info)))

    runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)

    output_vectors = []
    for index, out_tensor in enumerate(output_tensors):
        output_vectors.append(out_tensor[1].get_memory_area())

    # Load golden output file for result comparison.
    expected_outputs = np.load(
        os.path.join(shared_data_folder,
                     'tflite_parser/golden_output_lite.npy'))

    # Check that output matches golden output
    assert (expected_outputs == output_vectors[0]).all()
Exemplo n.º 11
0
def create_network(model_file: str, backends: list):
    """
    Creates a network based on the model file and a list of backends.

    Args:
        model_file: User-specified model file.
        backends: List of backends to optimize network.

    Returns:
        net_id: Unique ID of the network to run.
        runtime: Runtime context for executing inference.
        input_binding_info: Contains essential information about the model input.
        output_binding_info: Used to map output tensor and its memory.
    """
    if not os.path.exists(model_file):
        raise FileNotFoundError(f'Model file not found for: {model_file}')

    # Determine which parser to create based on model file extension
    parser = None
    _, ext = os.path.splitext(model_file)
    if ext == '.tflite':
        parser = ann.ITfLiteParser()
    elif ext == '.pb':
        parser = ann.ITfParser()
    elif ext == '.onnx':
        parser = ann.IOnnxParser()
    assert (parser is not None)
    network = parser.CreateNetworkFromBinaryFile(model_file)

    # Specify backends to optimize network
    preferred_backends = []
    for b in backends:
        preferred_backends.append(ann.BackendId(b))

    # Select appropriate device context and optimize the network for that device
    options = ann.CreationOptions()
    runtime = ann.IRuntime(options)
    opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(),
                                  ann.OptimizerOptions())
    print(f'Preferred backends: {backends}\n{runtime.GetDeviceSpec()}\n'
          f'Optimization warnings: {messages}')

    # Load the optimized network onto the Runtime device
    net_id, _ = runtime.LoadNetwork(opt_network)

    # Get input and output binding information
    graph_id = parser.GetSubgraphCount() - 1
    input_names = parser.GetSubgraphInputTensorNames(graph_id)
    input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0])
    output_names = parser.GetSubgraphOutputTensorNames(graph_id)
    output_binding_info = []
    for output_name in output_names:
        outBindInfo = parser.GetNetworkOutputBindingInfo(graph_id, output_name)
        output_binding_info.append(outBindInfo)
    return net_id, runtime, input_binding_info, output_binding_info
Exemplo n.º 12
0
def test_load_network(random_runtime):
    preferred_backends = random_runtime[0]
    network = random_runtime[1]
    runtime = random_runtime[2]

    opt_network, _ = ann.Optimize(network, preferred_backends,
                                  runtime.GetDeviceSpec(), ann.OptimizerOptions())

    net_id, messages = runtime.LoadNetwork(opt_network)
    assert "" == messages
    assert net_id == 0
Exemplo n.º 13
0
    def run(self):
        self.start()

        image = cv2.imread(self.image)
        image = cv2.resize(image, (128, 128))
        image = np.array(image, dtype=np.float32) / 255.0

        # ONNX, Caffe and TF parsers also exist.
        parser = ann.ITfLiteParser()
        network = parser.CreateNetworkFromBinaryFile(self.model)

        graph_id = 0
        input_names = parser.GetSubgraphInputTensorNames(graph_id)
        input_binding_info = parser.GetNetworkInputBindingInfo(
            graph_id, input_names[0])
        input_tensor_id = input_binding_info[0]
        input_tensor_info = input_binding_info[1]

        # Create a runtime object that will perform inference.
        options = ann.CreationOptions()
        runtime = ann.IRuntime(options)

        # Backend choices earlier in the list have higher preference.
        preferredBackends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
        opt_network, messages = ann.Optimize(network, preferredBackends,
                                             runtime.GetDeviceSpec(),
                                             ann.OptimizerOptions())

        # Load the optimized network into the runtime.
        net_id, _ = runtime.LoadNetwork(opt_network)
        # Create an inputTensor for inference.
        input_tensors = ann.make_input_tensors([input_binding_info], [image])

        # Get output binding information for an output layer by using the layer
        # name.
        output_names = parser.GetSubgraphOutputTensorNames(graph_id)
        output_binding_info = parser.GetNetworkOutputBindingInfo(
            0, output_names[0])
        output_tensors = ann.make_output_tensors([output_binding_info])

        start = timer()
        runtime.EnqueueWorkload(0, input_tensors, output_tensors)
        end = timer()
        print('Elapsed time is ', (end - start) * 1000, 'ms')

        output, output_tensor_info = ann.from_output_tensor(
            output_tensors[0][1])
        print(f"Output tensor info: {output_tensor_info}")
        print(output)
        j = np.argmax(output)
        if j == 0:
            print("Non-Fire")
        else:
            print("Fire")
Exemplo n.º 14
0
def test_enqueue_workload(random_runtime):
    preferred_backends = random_runtime[0]
    network = random_runtime[1]
    runtime = random_runtime[2]
    input_tensors = random_runtime[3]
    output_tensors = random_runtime[4]

    opt_network, _ = ann.Optimize(network, preferred_backends,
                                  runtime.GetDeviceSpec(), ann.OptimizerOptions())

    net_id, _ = runtime.LoadNetwork(opt_network)
    runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
Exemplo n.º 15
0
def test_unload_network_fails_for_invalid_net_id(random_runtime):
    preferred_backends = random_runtime[0]
    network = random_runtime[1]
    runtime = random_runtime[2]

    ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())

    with pytest.raises(RuntimeError) as err:
        runtime.UnloadNetwork(9)

    expected_error_message = "Failed to unload network."
    assert expected_error_message in str(err.value)
Exemplo n.º 16
0
def test_load_network_properties_provided(random_runtime):
    preferred_backends = random_runtime[0]
    network = random_runtime[1]
    runtime = random_runtime[2]

    opt_network, _ = ann.Optimize(network, preferred_backends,
                                  runtime.GetDeviceSpec(), ann.OptimizerOptions())

    properties = ann.INetworkProperties(True, True)
    net_id, messages = runtime.LoadNetwork(opt_network, properties)
    assert "" == messages
    assert net_id == 0
Exemplo n.º 17
0
def test_deserializer_end_to_end(shared_data_folder):
    parser = ann.IDeserializer()

    network = parser.CreateNetworkFromBinary(
        os.path.join(shared_data_folder, "mock_model.armnn"))

    # use 0 as a dummy value for layer_id, which is unused in the actual implementation
    layer_id = 0
    input_name = 'input_1'
    output_name = 'dense/Softmax'

    input_binding_info = parser.GetNetworkInputBindingInfo(
        layer_id, input_name)

    preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]

    options = ann.CreationOptions()
    runtime = ann.IRuntime(options)

    opt_network, messages = ann.Optimize(network, preferred_backends,
                                         runtime.GetDeviceSpec(),
                                         ann.OptimizerOptions())
    assert 0 == len(messages)

    net_id, messages = runtime.LoadNetwork(opt_network)
    assert "" == messages

    # Load test image data stored in input_lite.npy
    input_tensor_data = np.load(
        os.path.join(shared_data_folder, 'deserializer/input_lite.npy'))
    input_tensors = ann.make_input_tensors([input_binding_info],
                                           [input_tensor_data])

    output_tensors = []
    out_bind_info = parser.GetNetworkOutputBindingInfo(layer_id, output_name)
    out_tensor_info = out_bind_info[1]
    out_tensor_id = out_bind_info[0]
    output_tensors.append((out_tensor_id, ann.Tensor(out_tensor_info)))

    runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)

    output_vectors = []
    for index, out_tensor in enumerate(output_tensors):
        output_vectors.append(out_tensor[1].get_memory_area())

    # Load golden output file for result comparison.
    expected_outputs = np.load(
        os.path.join(shared_data_folder,
                     'deserializer/golden_output_lite.npy'))

    # Check that output matches golden output
    assert (expected_outputs == output_vectors[0]).all()
Exemplo n.º 18
0
def test_load_network_properties_provided(random_runtime):
    preferred_backends = random_runtime[0]
    network = random_runtime[1]
    runtime = random_runtime[2]

    opt_network, _ = ann.Optimize(network, preferred_backends,
                                  runtime.GetDeviceSpec(), ann.OptimizerOptions())

    inputSource = ann.MemorySource_Malloc
    outputSource = ann.MemorySource_Malloc
    properties = ann.INetworkProperties(False, inputSource, outputSource)
    net_id, messages = runtime.LoadNetwork(opt_network, properties)
    assert "" == messages
    assert net_id == 0
Exemplo n.º 19
0
def test_enqueue_workload_fails_with_empty_input_tensors(random_runtime):
    preferred_backends = random_runtime[0]
    network = random_runtime[1]
    runtime = random_runtime[2]
    input_tensors = []
    output_tensors = random_runtime[4]

    opt_network, _ = ann.Optimize(network, preferred_backends,
                                  runtime.GetDeviceSpec(), ann.OptimizerOptions())

    net_id, _ = runtime.LoadNetwork(opt_network)
    with pytest.raises(RuntimeError) as err:
        runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)

    expected_error_message = "Number of inputs provided does not match network."
    assert expected_error_message in str(err.value)
Exemplo n.º 20
0
def test_serialize_to_dot(network_file, get_runtime, tmpdir):
    preferred_backends = get_runtime[0]
    network = get_runtime[1]
    runtime = get_runtime[2]
    opt_network, _ = ann.Optimize(network, preferred_backends,
                                  runtime.GetDeviceSpec(), ann.OptimizerOptions())
    dot_file_path = os.path.join(tmpdir, 'mock_model.dot')
    """Check that serialized file does not exist at the start, gets created after SerializeToDot and is not empty"""
    assert not os.path.exists(dot_file_path)
    opt_network.SerializeToDot(dot_file_path)

    assert os.path.exists(dot_file_path)

    with open(dot_file_path) as res_file:
        expected_data = res_file.read()
        assert len(expected_data) > 1
        assert '[label=< [1,28,28,1] >]' in expected_data
Exemplo n.º 21
0
def __create_network(model_file: str, backends: list, parser=None):
    """Creates a network based on a file and parser type.

    Args:
        model_file (str): Path of the model file.
        backends (list): List of backends to use when running inference.
        parser_type: Parser instance. (pyarmnn.ITFliteParser/pyarmnn.IOnnxParser...)

    Returns:
        int: Network ID.
        int: Graph ID.
        IParser: TF Lite parser instance.
        IRuntime: Runtime object instance.
    """
    args = parse_command_line()
    options = ann.CreationOptions()
    runtime = ann.IRuntime(options)

    if parser is None:
        # try to determine what parser to create based on model extension
        _, ext = os.path.splitext(model_file)
        if ext == ".onnx":
            parser = ann.IOnnxParser()
        elif ext == ".tflite":
            parser = ann.ITfLiteParser()
    assert (parser is not None)

    network = parser.CreateNetworkFromBinaryFile(model_file)

    preferred_backends = []
    for b in backends:
        preferred_backends.append(ann.BackendId(b))

    opt_network, messages = ann.Optimize(network, preferred_backends,
                                         runtime.GetDeviceSpec(),
                                         ann.OptimizerOptions())
    if args.verbose:
        for m in messages:
            warnings.warn(m)

    net_id, w = runtime.LoadNetwork(opt_network)
    if args.verbose and w:
        warnings.warn(w)

    return net_id, parser, runtime
Exemplo n.º 22
0
def test_serialize_to_dot_mode_readonly(network_file, get_runtime, tmpdir):
    preferred_backends = get_runtime[0]
    network = get_runtime[1]
    runtime = get_runtime[2]
    opt_network, _ = ann.Optimize(network, preferred_backends,
                                  runtime.GetDeviceSpec(), ann.OptimizerOptions())
    """Create file, write to it and change mode to read-only"""
    dot_file_path = os.path.join(tmpdir, 'mock_model.dot')
    f = open(dot_file_path, "w+")
    f.write("test")
    f.close()
    os.chmod(dot_file_path, stat.S_IREAD)
    assert os.path.exists(dot_file_path)

    with pytest.raises(RuntimeError) as err:
        opt_network.SerializeToDot(dot_file_path)

    expected_error_message = "Failed to open dot file"
    assert expected_error_message in str(err.value)
Exemplo n.º 23
0
def test_network_properties_constructor(random_runtime):
    preferred_backends = random_runtime[0]
    network = random_runtime[1]
    runtime = random_runtime[2]

    opt_network, _ = ann.Optimize(network, preferred_backends,
                                  runtime.GetDeviceSpec(), ann.OptimizerOptions())

    inputSource = ann.MemorySource_Undefined
    outputSource = ann.MemorySource_Undefined
    properties = ann.INetworkProperties(True, inputSource, outputSource)
    assert properties.m_AsyncEnabled == True
    assert properties.m_ProfilingEnabled == False
    assert properties.m_OutputNetworkDetailsMethod == ann.ProfilingDetailsMethod_Undefined
    assert properties.m_InputSource == ann.MemorySource_Undefined
    assert properties.m_OutputSource == ann.MemorySource_Undefined

    net_id, messages = runtime.LoadNetwork(opt_network, properties)
    assert "" == messages
    assert net_id == 0
Exemplo n.º 24
0
def test_onnx_parser_end_to_end(shared_data_folder):
    parser = ann.IOnnxParser = ann.IOnnxParser()

    network = parser.CreateNetworkFromBinaryFile(
        os.path.join(shared_data_folder, 'mock_model.onnx'))

    # load test image data stored in input_onnx.npy
    input_binding_info = parser.GetNetworkInputBindingInfo("input")
    input_tensor_data = np.load(
        os.path.join(shared_data_folder,
                     'onnx_parser/input_onnx.npy')).astype(np.float32)

    options = ann.CreationOptions()
    runtime = ann.IRuntime(options)

    preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
    opt_network, messages = ann.Optimize(network, preferred_backends,
                                         runtime.GetDeviceSpec(),
                                         ann.OptimizerOptions())

    assert 0 == len(messages)

    net_id, messages = runtime.LoadNetwork(opt_network)

    assert "" == messages

    input_tensors = ann.make_input_tensors([input_binding_info],
                                           [input_tensor_data])
    output_tensors = ann.make_output_tensors(
        [parser.GetNetworkOutputBindingInfo("output")])

    runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)

    output = ann.workload_tensors_to_ndarray(output_tensors)

    # Load golden output file for result comparison.
    golden_output = np.load(
        os.path.join(shared_data_folder, 'onnx_parser/golden_output_onnx.npy'))

    # Check that output matches golden output to 4 decimal places (there are slight rounding differences after this)
    np.testing.assert_almost_equal(output[0], golden_output, decimal=4)
Exemplo n.º 25
0
def test_enqueue_workload_with_profiler(random_runtime):
    """
    Tests ArmNN's profiling extension
    """
    preferred_backends = random_runtime[0]
    network = random_runtime[1]
    runtime = random_runtime[2]
    input_tensors = random_runtime[3]
    output_tensors = random_runtime[4]

    opt_network, _ = ann.Optimize(network, preferred_backends,
                                  runtime.GetDeviceSpec(),
                                  ann.OptimizerOptions())
    net_id, _ = runtime.LoadNetwork(opt_network)

    profiler = runtime.GetProfiler(net_id)
    # By default profiling should be turned off:
    assert profiler.IsProfilingEnabled() is False

    # Enable profiling:
    profiler.EnableProfiling(True)
    assert profiler.IsProfilingEnabled() is True

    # Run the inference:
    runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)

    # Get profile output as a string:
    str_profile = profiler.as_json()

    # Verify that certain markers are present:
    assert len(str_profile) != 0
    assert str_profile.find('\"ArmNN\": {') > 0

    # Get events analysis output as a string:
    str_events_analysis = profiler.event_log()

    assert "Event Sequence - Name | Duration (ms) | Start (ms) | Stop (ms) | Device" in str_events_analysis

    assert profiler.thisown == 0
Exemplo n.º 26
0
                                                       input_names[0])
input_tensor_id = input_binding_info[0]
input_tensor_info = input_binding_info[1]
print(f"""
tensor id: {input_tensor_id}, 
tensor info: {input_tensor_info}
""")

# Create a runtime object that will perform inference.
options = ann.CreationOptions()
runtime = ann.IRuntime(options)

# Backend choices earlier in the list have higher preference.
preferredBackends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
opt_network, messages = ann.Optimize(network, preferredBackends,
                                     runtime.GetDeviceSpec(),
                                     ann.OptimizerOptions())

# Load the optimized network into the runtime.
net_id, _ = runtime.LoadNetwork(opt_network)
print(f"Loaded network, id={net_id}")
# Create an inputTensor for inference.
input_tensors = ann.make_input_tensors([input_binding_info], [image])

# Get output binding information for an output layer by using the layer name.
output_names = parser.GetSubgraphOutputTensorNames(graph_id)
output_binding_info = parser.GetNetworkOutputBindingInfo(0, output_names[0])
output_tensors = ann.make_output_tensors([output_binding_info])

runtime.EnqueueWorkload(0, input_tensors, output_tensors)
results = ann.workload_tensors_to_ndarray(output_tensors)
model_dir ="../tvm-bench/mobilenet-v2.1.0-224quant"
tflite_model_file = os.path.join(model_dir, "mobilenet_v2_1.0_224_quant.tflite")
tflite_model_buf = open(tflite_model_file, "rb").read()

dtype="uint8"
image_data = load_test_image(dtype)

parser = ann.ITfLiteParser()
network = parser.CreateNetworkFromBinaryFile(tflite_model_file)

options = ann.CreationOptions()
rt = ann.IRuntime(options)
preferredBackends = [ ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]

opt_network, _ = ann.Optimize(network, preferredBackends, rt.GetDeviceSpec(), ann.OptimizerOptions())
net_id, _ = rt.LoadNetwork(opt_network)

input_names = parser.GetSubgraphInputTensorNames(0)
input_binding_info = parser.GetNetworkInputBindingInfo(0, input_names[0])
input_tensors = ann.make_input_tensors([input_binding_info], [image_data])

output_names = parser.GetSubgraphOutputTensorNames(0)
output_binding_info = parser.GetNetworkOutputBindingInfo(0, output_names[0])
output_tensors = ann.make_output_tensors([output_binding_info])


repeat=10
numpy_time = np.zeros(repeat)
for i in range(0,repeat):
    start_time = time.time()
Exemplo n.º 28
0
def test_add_constant_layer_to_fully_connected():

    inputWidth = 1
    inputHeight = 1
    inputChannels = 5
    inputNum = 2

    outputChannels = 3
    outputNum = 2

    inputShape = (inputNum, inputChannels, inputHeight, inputWidth)
    outputShape = (outputNum, outputChannels)
    weightsShape = (inputChannels, outputChannels)
    biasShape = (outputChannels, )

    input = np.array([[1.0, 2.0, 3.0, 4.0, 5.0], [5.0, 4.0, 3.0, 2.0, 1.0]],
                     dtype=np.float32)

    weights = np.array(
        [[.5, 2., .5], [.5, 2., 1.], [.5, 2., 2.], [.5, 2., 3.], [.5, 2., 4.]],
        dtype=np.float32)

    biasValues = np.array([10, 20, 30], dtype=np.float32)

    expectedOutput = np.array([[
        0.5 + 1.0 + 1.5 + 2.0 + 2.5 + biasValues[0], 2.0 + 4.0 + 6.0 + 8.0 +
        10. + biasValues[1], 0.5 + 2.0 + 6.0 + 12. + 20. + biasValues[2]
    ],
                               [
                                   2.5 + 2.0 + 1.5 + 1.0 + 0.5 + biasValues[0],
                                   10.0 + 8.0 + 6.0 + 4.0 + 2. + biasValues[1],
                                   2.5 + 4.0 + 6.0 + 6. + 4. + biasValues[2]
                               ]],
                              dtype=np.float32)

    network = ann.INetwork()

    input_info = ann.TensorInfo(ann.TensorShape(inputShape),
                                ann.DataType_Float32, 0, 0, True)
    input_tensor = ann.ConstTensor(input_info, input)
    input_layer = network.AddInputLayer(0, "input")

    w_info = ann.TensorInfo(ann.TensorShape(weightsShape),
                            ann.DataType_Float32, 0, 0, True)
    w_tensor = ann.ConstTensor(w_info, weights)
    w_layer = network.AddConstantLayer(w_tensor, "weights")

    b_info = ann.TensorInfo(ann.TensorShape(biasShape), ann.DataType_Float32,
                            0, 0, True)
    b_tensor = ann.ConstTensor(b_info, biasValues)
    b_layer = network.AddConstantLayer(b_tensor, "bias")

    fc_descriptor = ann.FullyConnectedDescriptor()
    fc_descriptor.m_BiasEnabled = True
    fc_descriptor.m_ConstantWeights = True
    fully_connected = network.AddFullyConnectedLayer(fc_descriptor, "fc")

    output_info = ann.TensorInfo(ann.TensorShape(outputShape),
                                 ann.DataType_Float32)
    output_tensor = ann.Tensor(output_info, np.zeros([1, 1], dtype=np.float32))
    output = network.AddOutputLayer(0, "output")

    input_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(0))
    w_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(1))
    b_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(2))
    fully_connected.GetOutputSlot(0).Connect(output.GetInputSlot(0))

    input_layer.GetOutputSlot(0).SetTensorInfo(input_info)
    w_layer.GetOutputSlot(0).SetTensorInfo(w_info)
    b_layer.GetOutputSlot(0).SetTensorInfo(b_info)
    fully_connected.GetOutputSlot(0).SetTensorInfo(output_info)

    preferred_backends = [ann.BackendId('CpuRef')]
    options = ann.CreationOptions()
    runtime = ann.IRuntime(options)
    opt_network, messages = ann.Optimize(network, preferred_backends,
                                         runtime.GetDeviceSpec(),
                                         ann.OptimizerOptions())
    net_id, messages = runtime.LoadNetwork(opt_network)

    input_tensors = [(0, input_tensor)]
    output_tensors = [(0, output_tensor)]
    runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)

    output_vectors = ann.workload_tensors_to_ndarray(output_tensors)

    assert (output_vectors == expectedOutput).all()