Example #1
0
def test_add_caffe_style():
    """Test Caffe2-style broadcasting with axis option

    Currently there is still discussion whether this explicit style of broadcasting should be
    enabled, or if it should be substituted by numpy-style implicit broadcasting.
    https://github.com/onnx/onnx/issues/83
    """
    # shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1
    left_operand = np.zeros((2, 3, 4, 5), dtype=np.float32)
    right_operand = np.ones((3, 4), dtype=np.float32)
    node = make_node("Add", ["X", "Y"], ["Z"],
                     name="test_node",
                     broadcast=1,
                     axis=1)
    graph = make_graph([node], "test_graph", [
        make_tensor_value_info("X", onnx.TensorProto.FLOAT,
                               left_operand.shape),
        make_tensor_value_info("Y", onnx.TensorProto.FLOAT,
                               right_operand.shape)
    ], [
        make_tensor_value_info("Z", onnx.TensorProto.FLOAT, left_operand.shape)
    ])
    onnx_model = make_model(graph, producer_name='ngraph ONNXImporter')
    transformer = get_transformer()
    ng_model = import_onnx_model(onnx_model)[0]
    computation = transformer.computation(ng_model['output'],
                                          *ng_model['inputs'])
    assert np.array_equal(computation(left_operand, right_operand),
                          np.ones((2, 3, 4, 5)))

    # shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0
    left_operand = np.zeros((2, 3, 4, 5), dtype=np.float32)
    right_operand = np.ones((2, ), dtype=np.float32)
    node = make_node("Add", ["X", "Y"], ["Z"],
                     name="test_node",
                     broadcast=1,
                     axis=0)
    graph = make_graph([node], "test_graph", [
        make_tensor_value_info("X", onnx.TensorProto.FLOAT,
                               left_operand.shape),
        make_tensor_value_info("Y", onnx.TensorProto.FLOAT,
                               right_operand.shape)
    ], [
        make_tensor_value_info("Z", onnx.TensorProto.FLOAT, left_operand.shape)
    ])
    onnx_model = make_model(graph, producer_name='ngraph ONNXImporter')
    transformer = get_transformer()
    ng_model = import_onnx_model(onnx_model)[0]
    computation = transformer.computation(ng_model['output'],
                                          *ng_model['inputs'])
    assert np.array_equal(computation(left_operand, right_operand),
                          np.ones((2, 3, 4, 5)))
Example #2
0
def convert_and_calculate(onnx_node, data_inputs, data_outputs):
    # type: (NodeProto, List[np.ndarray], List[np.ndarray]) -> List[np.ndarray]
    """
    Convert ONNX node to ngraph node and perform computation on input data.

    :param onnx_node: ONNX NodeProto describing a computation node
    :param data_inputs: list of numpy ndarrays with input data
    :param data_outputs: list of numpy ndarrays with expected output data
    :return: list of numpy ndarrays with computed output
    """
    transformer = get_transformer()
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(onnx_node.input, data_inputs)
    ]
    output_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(onnx_node.output, data_outputs)
    ]

    graph = make_graph([onnx_node], 'test_graph', input_tensors,
                       output_tensors)
    model = make_model(graph, producer_name='ngraph ONNXImporter')

    ng_results = []
    for ng_model in import_onnx_model(model):
        computation = transformer.computation(ng_model['output'],
                                              *ng_model['inputs'])
        ng_results.append(computation(*data_inputs))

    return ng_results
Example #3
0
def import_and_compute_dot(input_left, input_right):
    input_data_left = np.array(input_left)
    input_data_right = np.array(input_right)
    onnx_model = make_onnx_model_for_dot_op(input_data_left, input_data_right)
    transformer = get_transformer()
    ng_model = import_onnx_model(onnx_model)[0]
    computation = transformer.computation(ng_model['output'], *ng_model['inputs'])
    return computation(input_data_left, input_data_right)
Example #4
0
def import_and_compute_conv(x, weights, transpose=False, **attributes):
    x, weights = np.array(x), np.array(weights)
    onnx_model = make_onnx_model_for_conv_op(x.shape,
                                             weights.shape,
                                             transpose=transpose,
                                             **attributes)
    ng_model = import_onnx_model(onnx_model)[0]
    computation = get_transformer().computation(ng_model['output'],
                                                *ng_model['inputs'])
    return computation(x, weights)
Example #5
0
def test_simple_graph():
    node1 = make_node('Add', ['A', 'B'], ['X'], name='add_node1')
    node2 = make_node('Add', ['X', 'C'], ['Y'], name='add_node2')
    graph = make_graph([node1, node2], 'test_graph',
                       [make_tensor_value_info('A', onnx.TensorProto.FLOAT, [1]),
                        make_tensor_value_info('B', onnx.TensorProto.FLOAT, [1]),
                        make_tensor_value_info('C', onnx.TensorProto.FLOAT, [1])],
                       [make_tensor_value_info('Y', onnx.TensorProto.FLOAT, [1])])
    model = make_model(graph, producer_name='ngraph ONNXImporter')

    ng_model = import_onnx_model(model)[0]
    transformer = ng.transformers.make_transformer()
    computation = transformer.computation(ng_model['output'], *ng_model['inputs'])
    assert np.array_equal(computation(4, 5, 6), np.array([15.0], dtype=np.float32))
Example #6
0
def test_simple_graph():
    node1 = make_node("Add", ["A", "B"], ["X"], name="add_node1")
    node2 = make_node("Add", ["X", "C"], ["Y"], name="add_node2")
    graph = make_graph([node1, node2], "test_graph",
                       [make_tensor_value_info("A", onnx.TensorProto.FLOAT, [1]),
                        make_tensor_value_info("B", onnx.TensorProto.FLOAT, [1]),
                        make_tensor_value_info("C", onnx.TensorProto.FLOAT, [1])],
                       [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [1])])
    model = make_model(graph, producer_name='ngraph ONNXImporter')

    ng_model = import_onnx_model(model)[0]
    transformer = ng.transformers.make_transformer()
    computation = transformer.computation(ng_model['output'], *ng_model['inputs'])
    assert np.array_equal(computation(4, 5, 6), np.array([15.0], dtype=np.float32))
Example #7
0
def import_and_compute_gemm(input_a, input_b, input_c, **kwargs):
    input_a, input_b, input_c = np.array(input_a), np.array(input_b), np.array(input_c)

    if kwargs.get('trans_a'):
        kwargs['transA'] = kwargs['trans_a']
        del kwargs['trans_a']

    if kwargs.get('trans_b'):
        kwargs['transB'] = kwargs['trans_b']
        del kwargs['trans_b']

    onnx_model = make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs)
    transformer = get_transformer()
    ng_model = import_onnx_model(onnx_model)[0]
    computation = transformer.computation(ng_model['output'], *ng_model['inputs'])
    return computation(input_a, input_b, input_c)
Example #8
0
def import_model_make_computation(onnx_model):
    transformer = get_transformer()
    ng_model = import_onnx_model(onnx_model)[0]
    computation = transformer.computation(ng_model['output'],
                                          *ng_model['inputs'])
    return computation
Example #9
0
 def prepare(cls, onnx_model, device='CPU', **kwargs):
     # type: (onnx.ModelProto, str, Dict) -> NgraphBackendRep
     super(NgraphBackend, cls).prepare(onnx_model, device, **kwargs)
     ng_model = import_onnx_model(onnx_model)[0]
     return NgraphBackendRep(ng_model, device)