Exemplo n.º 1
0
def Flatten(onnx_node, ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Flatten the input tensor into a 2D matrix.

    Flattening happens at axis specified by 'axis' attribute.
    First dimension of output tensor is the product of [d_0, ... d_{axis-1}] dimensions of input tensor.
    The last dimension is the product of the rest of input tensor dimensions: [d_{axis}, ..., d_n]
    """
    input_node = ng_inputs[0]
    axis = onnx_node.get_attribute_value('axis', 1)
    input_shape = list(input_node.shape)

    if axis < 0 or axis > len(input_shape):
        raise ValueError('Flatten node (%s): %d is not a valid value for `axis`.',
                         onnx_node.name, axis)

    first_dim = 1
    last_dim = 1

    for index in range(len(input_shape)):
        last_dim = last_dim * input_shape[index]
        if index < axis:
            first_dim = last_dim

    last_dim = int(last_dim / first_dim)
    # the order in which we iterate over input tensor dimensions while reshaping it.
    input_order = list(range(len(input_shape)))
    output_shape = [first_dim, last_dim]

    return ng.reshape(input_node, input_order, output_shape)
Exemplo n.º 2
0
def flatten(node, axis):  # type: (NgraphNode, int) -> NgraphNode
    """Flatten the input tensor into a 2D matrix.

    Flattening happens at axis specified by 'axis' attribute.
    First dimension of output tensor is the product of [d_0, ... d_{axis-1}] dimensions of input tensor.
    The last dimension is the product of the rest of input tensor dimensions: [d_{axis}, ..., d_n]

    :param node: The tensor to be flattened.
    :param axis: The axis dividing shape.
    :return: The new node being a 2D matrix representing flattened input node.
    """
    shape = list(node.shape)
    first_dim = 1
    last_dim = 1
    if axis < 0:
        axis = len(shape) + axis
    for index in range(len(shape)):
        last_dim = last_dim * shape[index]
        if index < axis:
            first_dim = last_dim

    last_dim = int(last_dim / first_dim)
    output_shape = [first_dim, last_dim]

    return ng.reshape(node, output_shape)
Exemplo n.º 3
0
def Squeeze(onnx_node,
            ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Remove single-dimensional entries from the shape of a tensor."""
    data = ng_inputs[0]
    axes_to_squeeze = onnx_node.get_attribute_value('axes')
    if axes_to_squeeze is None:
        raise ValueError(
            'Squeeze node (%s): the "axes" attribute is mandatory.',
            onnx_node.name)

    for axis in axes_to_squeeze:
        if axis < 0 or axis >= len(data.shape):
            raise ValueError(
                'Squeeze node (%s): `axes` attribute value %d is out of range.',
                onnx_node.name, axis)
        if data.shape[axis] > 1:
            raise ValueError(
                'Squeeze node (%s): can only remove single-dimensional axes: '
                'shape[%d] = %d', onnx_node.name, axis, data.shape[axis])

    out_shape = [
        data.shape[i] for i in range(len(data.shape))
        if i not in axes_to_squeeze
    ]
    return ng.reshape(data, out_shape)
Exemplo n.º 4
0
def test_reshape():

    element_type = Type.f32
    shape = Shape([2, 3])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function([ng.reshape(A, Shape([3, 2]), special_zero=False)],
                        parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, Shape([3, 2]))

    a.write(
        util.numpy_to_c(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)),
        24)

    result_arr = np.array([[0, 0], [0, 0], [0, 0]], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 24)
    handle = backend.compile(function)
    handle.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 24)

    a_arr = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
    result_arr_ref = np.reshape(a_arr, (3, 2))

    assert np.allclose(result_arr, result_arr_ref)
Exemplo n.º 5
0
def make_reduction_op(ng_op_type, onnx_node, ng_input):
    # type: (Callable, NodeWrapper, NgraphNode) -> NgraphNode
    """
    Create an ngraph Op node for a reduction operation (min, max, sum, etc.).

    :param ng_op_type: an ngraph reduction factory function such as ng.max, etc.
    :param onnx_node: wrapped ONNX node
    :param ng_input: ngraph Op to be used as input to the reduction node
    """
    reduction_axes = get_reduction_axes(onnx_node, ng_input)
    if len(reduction_axes) > len(ng_input.shape):
        raise ValueError(
            'Reduction node (%s) provided reduction axes count (%d) is larger than '
            'input tensor rank (%d).', onnx_node.name, len(reduction_axes),
            len(ng_input.shape))
    op_node = ng_op_type(ng_input, reduction_axes)

    if onnx_node.get_attribute_value('keepdims', default=1):
        output_shape = list(ng_input.shape)
        # flatten reduced axes
        for idx in reduction_axes:
            output_shape[idx] = 1
        op_node = ng.reshape(op_node, output_shape)

    return op_node
Exemplo n.º 6
0
def Unsqueeze(onnx_node, ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Insert single-dimensional entries to the shape of a tensor.

    :param onnx_node: The ONNX node we create operation for.
    :param ng_inputs: nGraph node which provide data.
    :return: nGraph node with applied unsqueeze operation on it's data.
    """
    data = ng_inputs[0]
    axes = onnx_node.get_attribute_value('axes')
    if axes is None:
        raise ValueError('Unsqueeze node (%s): the "axes" attribute is mandatory.', onnx_node.name)

    input_order = list(range(len(data.shape)))
    out_shape = list(data.shape)
    axes.sort()
    for axis in axes:
        # this condition forbids adding new dimensions greater than len(out_shape), i.e:
        # if we have input tensor of shape (3,4,5) and we provide 'axes' attribute with value
        # [10], then such input is considered invalid.
        if axis < 0 or axis > len(out_shape):
            raise ValueError('Unsqueeze node (%s): `axes` attribute value %d is out of range.',
                             onnx_node.name, axis)
        out_shape.insert(axis, 1)

    return ng.reshape(data, input_order, out_shape)
Exemplo n.º 7
0
def test_get_constant_from_source_failed():
    dtype = np.int
    input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1")
    input2 = ng.parameter(Shape([1]), dtype=dtype, name="input_2")
    reshape = ng.reshape(input1, input2, special_zero=True)
    folded_const = ng.impl.util.get_constant_from_source(reshape.input(1).get_source_output())

    assert folded_const is None
Exemplo n.º 8
0
def test_get_constant_from_source_success():
    dtype = np.int
    input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1")
    input2 = ng.parameter(Shape([25]), dtype=dtype, name="input_2")
    shape_of = ng.shape_of(input2, name="shape_of")
    reshape = ng.reshape(input1, shape_of, special_zero=True)
    folded_const = ng.impl.util.get_constant_from_source(reshape.input(1).get_source_output())

    assert folded_const is not None
    assert folded_const.get_vector() == [25]
Exemplo n.º 9
0
def test_reshape():

    element_type = Type.f32
    shape = Shape([2, 3])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function([ng.reshape(A, Shape([3, 2]), special_zero=False)], parameter_list, "test")

    runtime = get_runtime()
    computation = runtime.computation(function, *parameter_list)
    result = computation(np.array(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), dtype=np.float32))[0]

    expected = np.reshape(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), (3, 2))
    assert np.allclose(result, expected)
Exemplo n.º 10
0
def DepthToSpace(
        onnx_node,
        ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Rearranges (permutes) input tensor data from depth into blocks of spatial data.

    Values from the depth dimension (assuming NCHW layout) are moved in spatial blocks to the
    height and width dimensions.

    :param onnx_node: The ONNX node representing this operation.
    :param ng_inputs: The input tensors.
    :return: Tensor with shape [N, C/(blocksize * blocksize), H * blocksize, W * blocksize].
    """
    data = ng_inputs[0]
    block_size = onnx_node.get_attribute_value('blocksize')
    if block_size is None:
        raise ValueError(
            'DepthToSpace node (%s): missing required attribute \"blocksize\"',
            onnx_node.name)
    # Set default values to each dimension to be able to work with 3D or 4D data.
    n, c, h, w = 1, 1, 1, 1
    if len(data.shape) == 4:
        n, c, h, w, = data.shape
    elif len(data.shape) == 3:
        c, h, w = data.shape
    else:
        raise ValueError(
            'DepthToSpace node (%s): the provided tensor shape (%s) is not supported',
            onnx_node.name, str(data.shape))
    # First we have to disperse the data from depth channel, then rearrange them so as appropriate
    # chunks of data where close to their destination place. Finally squeeze data from
    # respective dimensions.
    flat_node = ng.reshape(
        data, [n, block_size, block_size, c // (block_size**2), h, w])
    flat_node = reorder_axes(flat_node, [0, 3, 4, 1, 5, 2])
    return ng.reshape(
        flat_node, [n, c // (block_size**2), h * block_size, w * block_size])
Exemplo n.º 11
0
def numpy_style_broadcast_for_binary_operation(onnx_node, ng_inputs):
    # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """
    Cast shape of two nodes to make them compatible for an element-wise binary operation.

    :param onnx_node: a wrapped ONNX node
    :param ng_inputs: left and right node (inputs of the binary op)
    :return: left and right node after broadcasting
    """
    left = ng_inputs[0]
    right = ng_inputs[1]

    dimensions_identical = list(left.shape) == list(right.shape)
    if dimensions_identical:
        return left, right

    try:
        output_shape, left_full_shape, right_full_shape = numpy_style_broadcast_output_shape(
            left.shape, right.shape)
    except UserInputError:
        raise UserInputError(
            '%s node (%s): Unable to broadcast shapes %s and %s.',
            onnx_node.op_type, onnx_node.name, left.shape, right.shape)

    if list(right.shape) != output_shape:
        one_pos = [i for i, dim in enumerate(right_full_shape) if dim == 1]
        right = ng.reshape(right,
                           [dim for dim in right.shape if dim != 1])  # Squeeze
        right = ng.broadcast(right, output_shape, broadcast_axes=one_pos)

    if list(left.shape) != output_shape:
        one_pos = [i for i, dim in enumerate(left_full_shape) if dim == 1]
        left = ng.reshape(left, [dim for dim in left.shape if dim != 1])
        left = ng.broadcast(left, output_shape, broadcast_axes=one_pos)

    return left, right
Exemplo n.º 12
0
def Reshape(onnx_node,
            ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Reshape the input tensor similar to numpy.reshape.

    At most one dimension of the new shape can be -1. In this case, the value is inferred from
    the size of the tensor and the remaining dimensions. A dimension could also be 0, in which
    case the actual dimension value is going to be copied from the shape argument.
    """
    data = ng_inputs[0]
    output_shape = onnx_node.get_attribute_value('shape', data.shape)

    if output_shape == data.shape:
        return data

    output_shape = infer_dimensions(onnx_node.name, data.shape, output_shape)
    return ng.reshape(data, output_shape)
Exemplo n.º 13
0
def PRelu(onnx_node,
          ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Apply the Parametric Relu function to the input tensor elementwise.

    f(x) = slope * x for x < 0, f(x) = x for x >= 0
    The slope parameter is passed to the node as its second input.
    """
    x, slope = ng_inputs
    if len(slope.shape) == 0:
        return ng.maximum(slope * x, x)
    elif slope.shape[0] == 1:
        slope = ng.broadcast_to(slope, [x.shape[0], 1])
        slope = ng.reshape(slope, [x.shape[0]])
        return ng.maximum(ng.broadcast_to(slope, x.shape, 0) * x, x)
    else:
        return ng.maximum(ng.broadcast_to(slope, x.shape, 1) * x, x)
Exemplo n.º 14
0
def flatten_innermost_empty_dims(node):  # type: (NgraphNode) -> NgraphNode
    """Flatten input shape if there is at least one innermost dimension equal to one.

    node(shape: 1,2,3,1,1,1) -> node(shape: 1,2,3)
    node(shape: 1,2,3) -> node(shape: 1,2,3)
    node(shape: 1) -> node(shape: 1)

    :param node: The input node whose data we want to flatten.
    """
    shape = list(node.shape)
    if len(shape) < 2:
        return node

    if shape[-1] == 1:
        output_shape = list(shape)
        while len(output_shape) > 1 and output_shape[-1] == 1:
            output_shape.pop()
        return ng.reshape(node, output_shape)
    else:
        return node
Exemplo n.º 15
0
def make_reduction_op(ng_op_type, onnx_node, ng_input):
    # type: (Callable, NodeWrapper, NgraphNode) -> NgraphNode
    """
    Create an ngraph Op node for a reduction operation (min, max, sum, etc.).

    :param ng_op_type: an ngraph reduction factory function such as ng.max, etc.
    :param onnx_node: wrapped ONNX node
    :param ng_input: ngraph Op to be used as input to the reduction node
    """
    reduction_axes = get_reduction_axes(onnx_node, ng_input)
    op_node = ng_op_type(ng_input, reduction_axes)

    if onnx_node.get_attribute_value('keepdims', default=1):
        output_shape = list(ng_input.shape)
        # flatten reduced axes
        for idx in reduction_axes:
            output_shape[idx] = 1
        op_node = ng.reshape(op_node, list(range(len(op_node.shape))),
                             output_shape)

    return op_node
Exemplo n.º 16
0
def Reshape(onnx_node,
            ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Reshape the input tensor similar to numpy.reshape.

    At most one dimension of the new shape can be -1. In this case, the value is inferred from
    the size of the tensor and the remaining dimensions. A dimension could also be 0, in which
    case the actual dimension value is going to be copied from the shape argument.
    """
    data = ng_inputs[0]
    output_shape = ng_inputs[1]
    # Be input data type agnostic as long as it has correct interface.
    if hasattr(output_shape, 'get_data'):
        output_shape = output_shape.get_data().tolist()
    else:
        raise NotImplementedError(
            'Reshape node (%s) doesn\'t support shape input of other type '
            'than Constant.', onnx_node.name)

    if output_shape == data.shape:
        return data

    output_shape = infer_dimensions(onnx_node.name, data.shape, output_shape)
    return ng.reshape(data, output_shape)
Exemplo n.º 17
0
def reorder_axes(node,
                 axes_order):  # type: (NgraphNode, List[int]) -> NgraphNode
    """Permute axes according to specified axes_order parameter.

    :param node: The node which axes we want to permute.
    :param axes_order: The permutation of node tensor axes.
    :return: New node with permuted axes.
    """
    out_shape = list(node.shape)
    if axes_order is None:
        axes_order = list(range(len(node.shape)))
    elif len(axes_order) != len(node.shape):
        raise ng.exceptions.UserInputError(
            'Node (%s): provided axes count is different than '
            'input tensor rank.', node.name)
    else:
        for idx, axis in enumerate(axes_order):
            try:
                out_shape[idx] = node.shape[axis]
            except IndexError as e:
                raise ng.exceptions.UserInputError(
                    'Node (%s): provided axes indices are out '
                    'of range.', node.name)
    return ng.reshape(node, axes_order, out_shape)
def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function:
    """Create a network on the fly from the source code using ngraph"""
    def shape_and_length(shape: list) -> typing.Tuple[list, int]:
        length = reduce(lambda x, y: x * y, shape)
        return shape, length

    weights = np.fromfile(args.model, dtype=np.float32)
    weights_offset = 0
    padding_begin = padding_end = [0, 0]

    # input
    input_shape = [64, 1, 28, 28]
    param_node = ngraph.parameter(input_shape, np.float32, 'Parameter')

    # convolution 1
    conv_1_kernel_shape, conv_1_kernel_length = shape_and_length([20, 1, 5, 5])
    conv_1_kernel = ngraph.constant(
        weights[0:conv_1_kernel_length].reshape(conv_1_kernel_shape))
    weights_offset += conv_1_kernel_length
    conv_1_node = ngraph.convolution(param_node, conv_1_kernel, [1, 1],
                                     padding_begin, padding_end, [1, 1])

    # add 1
    add_1_kernel_shape, add_1_kernel_length = shape_and_length([1, 20, 1, 1])
    add_1_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_1_kernel_length].reshape(add_1_kernel_shape), )
    weights_offset += add_1_kernel_length
    add_1_node = ngraph.add(conv_1_node, add_1_kernel)

    # maxpool 1
    maxpool_1_node = ngraph.max_pool(add_1_node, [2, 2], padding_begin,
                                     padding_end, [2, 2], 'ceil', None)

    # convolution 2
    conv_2_kernel_shape, conv_2_kernel_length = shape_and_length(
        [50, 20, 5, 5])
    conv_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                conv_2_kernel_length].reshape(conv_2_kernel_shape), )
    weights_offset += conv_2_kernel_length
    conv_2_node = ngraph.convolution(maxpool_1_node, conv_2_kernel, [1, 1],
                                     padding_begin, padding_end, [1, 1])

    # add 2
    add_2_kernel_shape, add_2_kernel_length = shape_and_length([1, 50, 1, 1])
    add_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_2_kernel_length].reshape(add_2_kernel_shape), )
    weights_offset += add_2_kernel_length
    add_2_node = ngraph.add(conv_2_node, add_2_kernel)

    # maxpool 2
    maxpool_2_node = ngraph.max_pool(add_2_node, [2, 2], padding_begin,
                                     padding_end, [2, 2], 'ceil', None)

    # reshape 1
    reshape_1_dims, reshape_1_length = shape_and_length([2])
    # workaround to get int64 weights from float32 ndarray w/o unnecessary copying
    dtype_weights = np.frombuffer(
        weights[weights_offset:weights_offset + 2 * reshape_1_length],
        dtype=np.int64,
    )
    reshape_1_kernel = ngraph.constant(dtype_weights)
    weights_offset += 2 * reshape_1_length
    reshape_1_node = ngraph.reshape(maxpool_2_node, reshape_1_kernel, True)

    # matmul 1
    matmul_1_kernel_shape, matmul_1_kernel_length = shape_and_length(
        [500, 800])
    matmul_1_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                matmul_1_kernel_length].reshape(matmul_1_kernel_shape), )
    weights_offset += matmul_1_kernel_length
    matmul_1_node = ngraph.matmul(reshape_1_node, matmul_1_kernel, False, True)

    # add 3
    add_3_kernel_shape, add_3_kernel_length = shape_and_length([1, 500])
    add_3_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_3_kernel_length].reshape(add_3_kernel_shape), )
    weights_offset += add_3_kernel_length
    add_3_node = ngraph.add(matmul_1_node, add_3_kernel)

    # ReLU
    relu_node = ngraph.relu(add_3_node)

    # reshape 2
    reshape_2_kernel = ngraph.constant(dtype_weights)
    reshape_2_node = ngraph.reshape(relu_node, reshape_2_kernel, True)

    # matmul 2
    matmul_2_kernel_shape, matmul_2_kernel_length = shape_and_length([10, 500])
    matmul_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                matmul_2_kernel_length].reshape(matmul_2_kernel_shape), )
    weights_offset += matmul_2_kernel_length
    matmul_2_node = ngraph.matmul(reshape_2_node, matmul_2_kernel, False, True)

    # add 4
    add_4_kernel_shape, add_4_kernel_length = shape_and_length([1, 10])
    add_4_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_4_kernel_length].reshape(add_4_kernel_shape), )
    weights_offset += add_4_kernel_length
    add_4_node = ngraph.add(matmul_2_node, add_4_kernel)

    # softmax
    softmax_axis = 1
    softmax_node = ngraph.softmax(add_4_node, softmax_axis)

    # result
    result_node = ngraph.result(softmax_node)
    return ngraph.impl.Function(result_node, [param_node], 'lenet')
def create_ngraph_function(args) -> Function:
    weights = np.fromfile(args.model, dtype=np.float32)
    weights_offset = 0
    padding_begin = [0, 0]
    padding_end = [0, 0]

    # input
    input_shape = [64, 1, 28, 28]
    param_node = ngraph.parameter(input_shape, np.float32, 'Parameter')

    # convolution 1
    conv_1_kernel_shape, conv_1_kernel_length = shape_and_length([20, 1, 5, 5])
    conv_1_kernel = ngraph.constant(
        weights[0:conv_1_kernel_length].reshape(conv_1_kernel_shape))
    weights_offset += conv_1_kernel_length
    conv_1_node = ngraph.convolution(param_node, conv_1_kernel, [1, 1],
                                     padding_begin, padding_end, [1, 1])

    # add 1
    add_1_kernel_shape, add_1_kernel_length = shape_and_length([1, 20, 1, 1])
    add_1_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_1_kernel_length].reshape(add_1_kernel_shape))
    weights_offset += add_1_kernel_length
    add_1_node = ngraph.add(conv_1_node, add_1_kernel)

    # maxpool 1
    maxpool_1_node = ngraph.max_pool(add_1_node, [2, 2], padding_begin,
                                     padding_end, [2, 2], 'ceil', None)

    # convolution 2
    conv_2_kernel_shape, conv_2_kernel_length = shape_and_length(
        [50, 20, 5, 5])
    conv_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                conv_2_kernel_length].reshape(conv_2_kernel_shape))
    weights_offset += conv_2_kernel_length
    conv_2_node = ngraph.convolution(maxpool_1_node, conv_2_kernel, [1, 1],
                                     padding_begin, padding_end, [1, 1])

    # add 2
    add_2_kernel_shape, add_2_kernel_length = shape_and_length([1, 50, 1, 1])
    add_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_2_kernel_length].reshape(add_2_kernel_shape))
    weights_offset += add_2_kernel_length
    add_2_node = ngraph.add(conv_2_node, add_2_kernel)

    # maxpool 2
    maxpool_2_node = ngraph.max_pool(add_2_node, [2, 2], padding_begin,
                                     padding_end, [2, 2], 'ceil', None)

    # reshape 1
    reshape_1_dims, reshape_1_length = shape_and_length([2])
    # workaround to get int64 weights from float32 ndarray w/o unnecessary copying
    dtype_weights = np.frombuffer(weights[weights_offset:weights_offset +
                                          2 * reshape_1_length],
                                  dtype=np.int64)
    reshape_1_kernel = ngraph.constant(dtype_weights)
    weights_offset += 2 * reshape_1_length
    reshape_1_node = ngraph.reshape(maxpool_2_node, reshape_1_kernel, True)

    # matmul 1
    matmul_1_kernel_shape, matmul_1_kernel_length = shape_and_length(
        [500, 800])
    matmul_1_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                matmul_1_kernel_length].reshape(matmul_1_kernel_shape))
    weights_offset += matmul_1_kernel_length
    matmul_1_node = ngraph.matmul(reshape_1_node, matmul_1_kernel, False, True)

    # add 3
    add_3_kernel_shape, add_3_kernel_length = shape_and_length([1, 500])
    add_3_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_3_kernel_length].reshape(add_3_kernel_shape))
    weights_offset += add_3_kernel_length
    add_3_node = ngraph.add(matmul_1_node, add_3_kernel)

    # ReLU
    relu_node = ngraph.relu(add_3_node)

    # reshape 2
    reshape_2_kernel = ngraph.constant(dtype_weights)
    reshape_2_node = ngraph.reshape(relu_node, reshape_2_kernel, True)

    # matmul 2
    matmul_2_kernel_shape, matmul_2_kernel_length = shape_and_length([10, 500])
    matmul_2_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                matmul_2_kernel_length].reshape(matmul_2_kernel_shape))
    weights_offset += matmul_2_kernel_length
    matmul_2_node = ngraph.matmul(reshape_2_node, matmul_2_kernel, False, True)

    # add 4
    add_4_kernel_shape, add_4_kernel_length = shape_and_length([1, 10])
    add_4_kernel = ngraph.constant(
        weights[weights_offset:weights_offset +
                add_4_kernel_length].reshape(add_4_kernel_shape))
    weights_offset += add_4_kernel_length
    add_4_node = ngraph.add(matmul_2_node, add_4_kernel)

    # softmax
    softmax_axis = 1
    softmax_node = ngraph.softmax(add_4_node, softmax_axis)

    # result
    result_node = ngraph.result(softmax_node)

    # nGraph function
    function = Function(result_node, [param_node], 'lenet')

    return function