Esempio n. 1
0
def test_convolution_with_padding():

    element_type = Type.f32
    image_shape = Shape([1, 1, 10, 10])
    filter_shape = Shape([1, 1, 3, 3])
    data = Parameter(element_type, image_shape)
    filters = Parameter(element_type, filter_shape)
    parameter_list = [data, filters]

    image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
    filter_arr = np.zeros(9, dtype=np.float32).reshape(1, 1, 3, 3)
    filter_arr[0][0][1][1] = 1
    strides = [1, 1]
    dilations = [2, 2]
    pads_begin = [0, 0]
    pads_end = [0, 0]

    model = ng.convolution(data, filters, strides, pads_begin, pads_end,
                           dilations)
    function = Function([model], parameter_list, "test")
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, image_shape)
    b = backend.create_tensor(element_type, filter_shape)

    a.write(util.numpy_to_c(image_arr), 10 * 10 * 4)
    b.write(util.numpy_to_c(filter_arr), 3 * 3 * 4)

    result_arr = np.zeros(36, dtype=np.float32).reshape(1, 1, 6, 6)
    result = backend.create_tensor(element_type, Shape([1, 1, 6, 6]))
    result.write(util.numpy_to_c(result_arr), 6 * 6 * 4)
    handle = backend.compile(function)
    handle.call([result], [a, b])

    result.read(util.numpy_to_c(result_arr), 6 * 6 * 4)
    result_arr_ref = convolution2d(image_arr[0][0], filter_arr[0][0], strides,
                                   dilations, pads_begin,
                                   pads_end).reshape(1, 1, 6, 6)
    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 2
0
def test_constant():
    element_type = Type.f32
    parameter_list = []
    function = Function(
        [Constant(element_type, Shape([3, 3]), list(range(9)))],
        parameter_list, "test")

    runtime = get_runtime()
    computation = runtime.computation(function, *parameter_list)
    result = computation()[0]

    expected = np.arange(9).reshape(3, 3)
    assert np.allclose(result, expected)
Esempio n. 3
0
def one_hot(node,
            shape,
            one_hot_axis,
            name=None):  # type: (Node, TensorShape, int, str) -> Node
    """Create node performing one-hot encoding on input data.

    :param node: The input node providing data for operation.
    :param shape: The output node shape including the new one-hot axis.
    :param one_hot_axis: The index within the output shape of the new one-hot axis.
    :param name: The optional name for new output node.
    :return: New node performing one-hot operation.
    """
    return OneHot(node, Shape(shape), one_hot_axis)
Esempio n. 4
0
def test_convolution_with_data_dilation():

    element_type = Type.f32
    image_shape = Shape([1, 1, 10, 10])
    filter_shape = Shape([1, 1, 3, 3])
    A = Parameter(element_type, image_shape)
    B = Parameter(element_type, filter_shape)
    parameter_list = [A, B]

    image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
    filter_arr = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)
    strides = [1, 1]
    dilation = [1, 1]
    padding_below = [0, 0]
    padding_above = [0, 0]
    data_dilation = [2, 2]

    function = Function(NodeVector([Convolution(A, B, Strides(strides), Strides(dilation),
                                    CoordinateDiff(padding_below), CoordinateDiff(padding_above),
                                    Strides(data_dilation))]), parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, image_shape)
    b = backend.create_tensor(element_type, filter_shape)

    a.write(util.numpy_to_c(image_arr), 0, 10*10*4)
    b.write(util.numpy_to_c(filter_arr), 0, 3*3*4)

    result_arr = np.zeros(17*17, dtype=np.float32).reshape(1, 1, 17, 17)
    result = backend.create_tensor(element_type, Shape([1, 1, 17, 17]))
    result.write(util.numpy_to_c(result_arr), 0, 17*17*4)
    handle = backend.compile(function)
    handle.call([result], [a, b])

    result.read(util.numpy_to_c(result_arr), 0, 17*17*4)
    result_arr_ref = convolution2d(image_arr[0][0], filter_arr[0][0], strides,
                                   dilation, padding_below, padding_above,
                                   data_dilation).reshape(1, 1, 17, 17)
    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 5
0
def test_onehot():

    element_type = Type.f32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function(NodeVector([OneHot(A, Shape([3, 3]), 0)]), parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(element_type, Shape([3]))
    result = backend.make_primary_tensor_view(element_type, Shape([3, 3]))

    a.write(util.numpy_to_c(np.array([1, 0, 2], dtype=np.float32)), 0, 12)

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 36)
    cf.call([a], [result])
    result.read(util.numpy_to_c(result_arr), 0, 36)

    a_arr = np.array([1, 0, 2])
    result_arr_ref = np.eye(3)[a_arr]

    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 6
0
def avg_pool(
        x,  # type: Node
        window_shape,  # type: TensorShape
        strides=None,  # type: List[int]
        padding_above=None,  # type: List[int]
        padding_below=None,  # type: List[int]
        zero_pad=True,  # type: bool
        name=None,  # type: str
):
    # type: (...) -> Node
    """Return average pooling node."""
    if strides is None:
        strides = [1] * len(
            window_shape
        )  # Default to as many 1s as spatial dimensions of input.
    if padding_above is None:
        padding_above = [0] * len(window_shape)
    if padding_below is None:
        padding_below = [0] * len(window_shape)

    return AvgPool(x, Shape(window_shape), Strides(strides),
                   Shape(padding_above), Shape(padding_below), zero_pad)
Esempio n. 7
0
def test_broadcast():

    element_type = Type.f32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function(NodeVector([Broadcast(A, Shape([3, 3]), AxisSet({0}))]), parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(element_type, Shape([3]))
    result = backend.make_primary_tensor_view(element_type, Shape([3, 3]))

    a.write(util.numpy_to_c(np.array([1, 2, 3], dtype=np.float32)), 0, 12)

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 36)
    cf.call([a], [result])
    result.read(util.numpy_to_c(result_arr), 0, 36)

    a_arr = np.array([[0], [0], [0]], dtype=np.float32)
    b_arr = np.array([[1, 2, 3]], dtype=np.float32)
    result_arr_ref = np.add(a_arr, b_arr)

    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 8
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function(NodeVector([Sum(A, AxisSet({1}))]), parameter_list, 'test')
    backend = Backend.create(pytest.config.getoption('backend'))

    a = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 0, 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 4)
    backend.call(backend.compile(function), [result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Esempio n. 9
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function(NodeVector([Sum(A, AxisSet({1}))]), parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(element_type, shape)
    result = backend.make_primary_tensor_view(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 0, 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 4)
    cf.call([a], [result])
    result.read(util.numpy_to_c(result_arr), 0, 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Esempio n. 10
0
def test_reshape():

    element_type = Type.f32
    shape = Shape([2, 3])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function(NodeVector([Reshape(A, AxisVector([0, 1]), Shape([3, 2]))]), parameter_list, 'test')
    backend = Backend.create(pytest.config.getoption('backend'))

    a = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, Shape([3, 2]))

    a.write(util.numpy_to_c(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)), 0, 24)

    result_arr = np.array([[0, 0], [0, 0], [0, 0]], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 24)
    backend.call(backend.compile(function), [result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 24)

    a_arr = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
    result_arr_ref = np.reshape(a_arr, (3, 2))

    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 11
0
def test_onehot():

    element_type = Type.f32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function(NodeVector([OneHot(A, Shape([3, 3]), 0)]),
                        parameter_list, 'test')
    backend = Backend.create(pytest.config.getoption('backend'))

    a = backend.create_tensor(element_type, Shape([3]))
    result = backend.create_tensor(element_type, Shape([3, 3]))

    a.write(util.numpy_to_c(np.array([1, 0, 2], dtype=np.float32)), 0, 12)

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 36)
    backend.call(backend.compile(function), [result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 36)

    a_arr = np.array([1, 0, 2])
    result_arr_ref = np.eye(3)[a_arr]

    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 12
0
def test_reshape():

    element_type = Type.f32
    shape = Shape([2, 3])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function(NodeVector([Reshape(A, AxisVector([0, 1]), Shape([3, 2]))]), parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(element_type, shape)
    result = backend.make_primary_tensor_view(element_type, Shape([3, 2]))

    a.write(util.numpy_to_c(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)), 0, 24)

    result_arr = np.array([[0, 0], [0, 0], [0, 0]], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 24)
    cf.call([a], [result])
    result.read(util.numpy_to_c(result_arr), 0, 24)

    a_arr = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
    result_arr_ref = np.reshape(a_arr, (3, 2))

    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 13
0
def test_onehot():

    element_type = Type.i32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function([OneHot(A, Shape([3, 3]), 0)], parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, Shape([3]))
    result = backend.create_tensor(element_type, Shape([3, 3]))

    a.write(util.numpy_to_c(np.array([1, 0, 2], dtype=np.int32)), 12)

    result_arr = np.zeros((3, 3), dtype=np.int32)
    result.write(util.numpy_to_c(result_arr), 36)
    handle = backend.compile(function)
    handle.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 36)

    a_arr = np.array([1, 0, 2])
    result_arr_ref = np.eye(3)[a_arr]

    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 14
0
def test_broadcast():

    element_type = Type.f32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function(NodeVector([Broadcast(A, Shape([3, 3]), AxisSet({0}))]), parameter_list, 'test')
    backend = Backend.create(pytest.config.getoption('backend'))

    a = backend.create_tensor(element_type, Shape([3]))
    result = backend.create_tensor(element_type, Shape([3, 3]))

    a.write(util.numpy_to_c(np.array([1, 2, 3], dtype=np.float32)), 0, 12)

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 36)
    backend.call(backend.compile(function), [result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 36)

    a_arr = np.array([[0], [0], [0]], dtype=np.float32)
    b_arr = np.array([[1, 2, 3]], dtype=np.float32)
    result_arr_ref = np.add(a_arr, b_arr)

    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 15
0
File: ops.py Progetto: jrmwng/ngraph
def broadcast(node,
              new_shape,
              axis=None,
              name=None):  # type: (Node, TensorShape, int, str) -> Node
    """Return node which broadcasts input node values to specified shape.

    :param node: The node with input tensor data.
    :param new_shape: The new shape we want to broadcast tensor to.
    :param axis: The axis along which we perform broadcasting.
    :param name: Optional new name for output node.
    :return: New node with broadcasted shape.
    """
    return Broadcast(node, Shape(new_shape),
                     get_broadcast_axes(new_shape, node.shape, axis))
Esempio n. 16
0
def test_convolution():

    element_type = Type.f32
    image_shape = Shape([1, 1, 16, 16])
    filter_shape = Shape([1, 1, 3, 3])
    A = Parameter(element_type, image_shape)
    B = Parameter(element_type, filter_shape)
    parameter_list = [A, B]

    image_arr = np.arange(-128, 128, 1, dtype=np.float32).reshape(1, 1, 16, 16)
    filter_arr = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)
    filter_arr[0][0][0][0] = -1
    filter_arr[0][0][1][1] = -1
    filter_arr[0][0][2][2] = -1
    filter_arr[0][0][0][2] = -1
    filter_arr[0][0][2][0] = -1
    result_arr = np.zeros(196, dtype=np.float32).reshape(1, 1, 14, 14)

    function = Function(NodeVector([Convolution(A, B)]), parameter_list,
                        'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(element_type, image_shape)
    b = backend.make_primary_tensor_view(element_type, filter_shape)

    a.write(util.numpy_to_c(image_arr), 0, 16 * 16 * 4)
    b.write(util.numpy_to_c(filter_arr), 0, 3 * 3 * 4)

    result = backend.make_primary_tensor_view(element_type,
                                              Shape([1, 1, 14, 14]))
    result.write(util.numpy_to_c(result_arr), 0, 14 * 14 * 4)
    cf.call([result], [a, b])
    result.read(util.numpy_to_c(result_arr), 0, 14 * 14 * 4)

    result_arr_ref = convolution2d(image_arr[0][0],
                                   filter_arr[0][0]).reshape(1, 1, 14, 14)
    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 17
0
def test_offline_api():
    element_type = Type.f32
    param = Parameter(element_type, Shape([1, 3, 22, 22]))
    relu = ng.relu(param)
    func = Function([relu], [param], 'test')
    caps = Function.to_capsule(func)

    cnnNetwork = IENetwork(caps)
    assert cnnNetwork != None

    ApplyMOCTransformations(cnnNetwork, False)

    func2 = ng.function_from_cnn(cnnNetwork)
    assert func2 != None
    assert len(func2.get_ops()) == 3
Esempio n. 18
0
def test_round_away():
    float_dtype = np.float32
    data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data")

    node = ng.round(data, "HALF_AWAY_FROM_ZERO")
    assert node.get_type_name() == "Round"
    assert node.get_output_size() == 1
    assert list(node.get_output_shape(0)) == [3, 10]
    assert node.get_output_element_type(0) == Type.f32

    input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
    expected = [-3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0]

    result = run_op_node([input_tensor], ng.round, "HALF_AWAY_FROM_ZERO")
    assert np.allclose(result, expected)
Esempio n. 19
0
def test_broadcast():

    element_type = Type.f32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function([ng.broadcast(A, [3, 3])], parameter_list, "test")
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, Shape([3]))
    result = backend.create_tensor(element_type, Shape([3, 3]))

    a.write(util.numpy_to_c(np.array([1, 2, 3], dtype=np.float32)), 12)

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 36)
    handle = backend.compile(function)
    handle.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 36)

    a_arr = np.array([[0], [0], [0]], dtype=np.float32)
    b_arr = np.array([[1, 2, 3]], dtype=np.float32)
    result_arr_ref = np.add(a_arr, b_arr)

    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 20
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function([Sum(A, AxisSet({1}))], parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 4)
    handle = backend.compile(function)
    handle.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Esempio n. 21
0
def test_broadcast():

    element_type = Type.f32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function([ng.broadcast(A, [3, 3])], parameter_list, "test")

    runtime = get_runtime()
    computation = runtime.computation(function, *parameter_list)
    result = computation(np.array([1, 2, 3], dtype=np.float32))[0]

    a_arr = np.array([[0], [0], [0]], dtype=np.float32)
    b_arr = np.array([[1, 2, 3]], dtype=np.float32)
    expected = np.add(a_arr, b_arr)
    assert np.allclose(result, expected)
Esempio n. 22
0
def test_convolution():

    element_type = Type.f32
    image_shape = Shape([1, 1, 16, 16])
    filter_shape = Shape([1, 1, 3, 3])
    A = Parameter(element_type, image_shape)
    B = Parameter(element_type, filter_shape)
    parameter_list = [A, B]

    image_arr = np.arange(-128, 128, 1, dtype=np.float32).reshape(1, 1, 16, 16)
    filter_arr = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)
    filter_arr[0][0][0][0] = -1
    filter_arr[0][0][1][1] = -1
    filter_arr[0][0][2][2] = -1
    filter_arr[0][0][0][2] = -1
    filter_arr[0][0][2][0] = -1
    result_arr = np.zeros(196, dtype=np.float32).reshape(1, 1, 14, 14)

    function = Function([Convolution(A, B)], parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, image_shape)
    b = backend.create_tensor(element_type, filter_shape)

    a.write(util.numpy_to_c(image_arr), 16 * 16 * 4)
    b.write(util.numpy_to_c(filter_arr), 3 * 3 * 4)

    result = backend.create_tensor(element_type, Shape([1, 1, 14, 14]))
    result.write(util.numpy_to_c(result_arr), 14 * 14 * 4)
    handle = backend.compile(function)
    handle.call([result], [a, b])
    result.read(util.numpy_to_c(result_arr), 14 * 14 * 4)

    result_arr_ref = convolution2d(image_arr[0][0],
                                   filter_arr[0][0]).reshape(1, 1, 14, 14)
    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 23
0
def unary_op_exec(op_str, input_list):
    """
    input_list needs to have deep length of 4
    """
    element_type = Type.f32
    shape = Shape(np.array(input_list).shape)
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function([unary_op(op_str, A)], parameter_list, "test")

    runtime = get_runtime()
    computation = runtime.computation(function, *parameter_list)
    result = computation(np.array(input_list, dtype=np.float32))[0]

    expected = unary_op_ref(op_str, np.array(input_list, dtype=np.float32))
    assert np.allclose(result, expected)
Esempio n. 24
0
    def visit(self, op, x):
        self.computation.set_op_rank(op)
        axis_set = set()
        op_element_type = self.computation.lookup_cpp_op(op.args[0])

        # build axis_set
        broadcast_axes = op.axes.names
        broadcast_args_axes = op.args[0].axes.names

        for pos, axis in enumerate(broadcast_axes):
            if axis not in broadcast_args_axes:
                axis_set.add(pos)

        self.computation.register_cpp_op(
            op,
            PyngBroadcast(op_element_type, Shape(list(op.axes.lengths)),
                          AxisSet(axis_set)))
Esempio n. 25
0
def binary_op_comparison(op_str):

    element_type = Type.f32
    shape = Shape([2, 2])
    A = Parameter(element_type, shape)
    B = Parameter(element_type, shape)
    parameter_list = [A, B]
    function = Function([binary_op(op_str, A, B)], parameter_list, "test")
    a_arr = np.array([[1, 5], [3, 2]], dtype=np.float32)
    b_arr = np.array([[2, 4], [3, 1]], dtype=np.float32)

    runtime = get_runtime()
    computation = runtime.computation(function, A, B)
    result = computation(a_arr, b_arr)[0]

    expected = binary_op_ref(op_str, a_arr, b_arr)
    assert np.allclose(result, expected)
Esempio n. 26
0
    def visit(self, op, input):
        self.computation.set_op_rank(op)
        axis_order = []
        reorder_axes = list(op.axes.lengths)
        reorder_axes_names = op.axes.names
        input_axes_names = op.args[0].axes.names

        # determine the axis order for the reshape
        for reorder_axis_name in reorder_axes_names:
            index = input_axes_names.index(reorder_axis_name)
            axis_order.append(index)
        ngraph_input = self.computation.lookup_cpp_op(op.args[0])
        # print(ngraph_input.get_output_shape(0))
        ngraph_cpp_reorder_op = PyngReshape(ngraph_input,
                                            AxisVector(axis_order),
                                            Shape(reorder_axes))
        self.computation.register_cpp_op(op, ngraph_cpp_reorder_op)
Esempio n. 27
0
def test_convert():

    element_type = Type.f32
    shape = Shape([1, 3])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    # f32 to boolean
    function = Function(NodeVector([Convert(A, Type.boolean)]), parameter_list,
                        'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(Type.boolean, shape)

    a.write(util.numpy_to_c(np.array([1, 5, 3], dtype=np.float32)), 0, 12)

    result_arr = np.array([False, False, False], dtype=np.bool)
    result.write(util.numpy_to_c(result_arr), 0, 3)
    handle = backend.compile(function)
    handle.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 3)

    a_arr = np.array([1, 5, 3], dtype=np.float32)
    result_arr_ref = a_arr.astype(bool)
    assert np.allclose(result_arr, result_arr_ref)

    # f32 to i32
    function = Function(NodeVector([Convert(A, Type.i32)]), parameter_list,
                        'test')
    backend = Backend.create(test.BACKEND_NAME)

    result = backend.create_tensor(Type.i32, shape)

    a.write(util.numpy_to_c(np.array([1.4, 5.5, 3.9], dtype=np.float32)), 0,
            12)

    result_arr = np.array([0, 0, 0], dtype=np.int32)
    result.write(util.numpy_to_c(result_arr), 0, 12)
    handle = backend.compile(function)
    handle.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 12)

    a_arr = np.array([1.4, 5.4, 3.9], dtype=np.float32)
    result_arr_ref = a_arr.astype(int)

    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 28
0
def test_convert():

    element_type = Type.f32
    shape = Shape([1, 3])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    # f32 to boolean
    function = Function(NodeVector([Convert(A, Type.boolean)]), parameter_list,
                        'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(element_type, shape)
    result = backend.make_primary_tensor_view(Type.boolean, shape)

    a.write(util.numpy_to_c(np.array([1, 5, 3], dtype=np.float32)), 0, 12)

    result_arr = np.array([False, False, False], dtype=np.bool)
    result.write(util.numpy_to_c(result_arr), 0, 3)
    cf.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 3)

    a_arr = np.array([1, 5, 3], dtype=np.float32)
    result_arr_ref = a_arr.astype(bool)
    assert np.allclose(result_arr, result_arr_ref)

    # f32 to i32
    function = Function(NodeVector([Convert(A, Type.i32)]), parameter_list,
                        'test')
    backend, cf = make_backend_call_frame(function)

    result = backend.make_primary_tensor_view(Type.i32, shape)

    a.write(util.numpy_to_c(np.array([1.4, 5.5, 3.9], dtype=np.float32)), 0,
            12)

    result_arr = np.array([0, 0, 0], dtype=np.int32)
    result.write(util.numpy_to_c(result_arr), 0, 12)
    cf.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 12)

    a_arr = np.array([1.4, 5.4, 3.9], dtype=np.float32)
    result_arr_ref = a_arr.astype(int)

    assert np.allclose(result_arr, result_arr_ref)
Esempio n. 29
0
 def visit(self, op, tensor):
     self.computation.set_op_rank(op)
     variable = tensor.tensor
     list_size = 1
     for x in list(tensor.axes.lengths):
         list_size *= x
     constant_list = [op.scalar] * list_size
     ngraph_constant_op = Constant(Type.f32,
                                   Shape(list(tensor.axes.lengths)),
                                   constant_list)
     if variable not in self.computation.variables_cpp_op:
         # treat 'op' as the rhs of assignment for forwarding and lookup purposes
         self.computation.variables_cpp_op[variable] = \
             (self.computation.scopemark[op.tensor], op)
         self.computation.register_cpp_op(op,
                                          ngraph_constant_op,
                                          set_name=False)
     else:
         raise RuntimeError("Variable updated more than once!")
Esempio n. 30
0
def broadcast_to(node, new_shape, axis=None, name=None):
    # type: (Node, TensorShape, int, str) -> Node
    """Create a node which broadcasts the input node's values to a desired shape.

    `broadcast_to` will attempt to automatically determine which axes need broadcasting.

    The optional `axis` parameter specifies the starting axis position (0-based) in the output
    shape from which the current shape of the tensor matches the desired new shape.

    e.g. current_shape: [4, 5], new_shape: [2, 3, 4, 5, 6], axis: 2

    By using the `axis` parameter you can control which output axis to broadcast along.

    Example:

    >>> input_node = ng.constant([1, 2, 3])
    >>> current_shape = [3]
    >>> new_shape = [3, 3]
    >>> ng.broadcast_to(input_node, new_shape, axis=1)
    array([[1, 2, 3],
           [1, 2, 3],
           [1, 2, 3]])

    >>> ng.broadcast_to(input_node, new_shape, axis=0)
    array([[1, 1, 1],
           [2, 2, 2],
           [3, 3, 3]])

    If the `axis` parameter is not specified, `broadcast_to` will attempt to match shapes,
    assuming the current shape matches the rightmost positions of the desired new shape.
    This behaviour is similar to NumPy's broadcasting.

    i.e. default `axis = len(new_shape) - len(current_shape)`

    :param node: The node with input tensor data.
    :param new_shape: The new shape we want to broadcast tensor to.
    :param axis: The axis along which we perform broadcasting.
    :param name: Optional new name for output node.
    :return: New node with broadcast shape.
    """
    return Broadcast(node, Shape(new_shape),
                     get_broadcast_axes(new_shape, node.shape, axis))