Exemplo n.º 1
0
def test_axisset():

    set_axisset = AxisSet({1, 2, 3})
    list_axisset = AxisSet([1, 2, 3])
    tuple_axisset = AxisSet((1, 2, 3))

    assert len(set_axisset) == 3
    assert set(set_axisset) == {1, 2, 3}

    assert len(list_axisset) == 3
    assert set(list_axisset) == set(set_axisset)

    assert len(tuple_axisset) == 3
    assert set(tuple_axisset) == set(set_axisset)
Exemplo n.º 2
0
def test_broadcast():

    element_type = Type.f32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function(
        NodeVector([Broadcast(A, Shape([3, 3]), AxisSet({0}))]),
        parameter_list, 'test')
    backend = Backend.create(pytest.config.getoption('backend'))

    a = backend.create_tensor(element_type, Shape([3]))
    result = backend.create_tensor(element_type, Shape([3, 3]))

    a.write(util.numpy_to_c(np.array([1, 2, 3], dtype=np.float32)), 0, 12)

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 36)
    backend.call(backend.compile(function), [result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 36)

    a_arr = np.array([[0], [0], [0]], dtype=np.float32)
    b_arr = np.array([[1, 2, 3]], dtype=np.float32)
    result_arr_ref = np.add(a_arr, b_arr)

    assert np.allclose(result_arr, result_arr_ref)
Exemplo n.º 3
0
def make_float32_constant_like(scalar, op):  # type: (float, Node) -> float
    """Create a Constant node for float value."""
    v = set()
    shape = op.get_shape()
    for i in range(len(shape)):
        v.add(i)
    return make_float32_constant(scalar, Shape(shape), AxisSet(v))
Exemplo n.º 4
0
def reduce(
        node,  # type: Node
        initial_value,  # type: ScalarData
        reduction_function,  # type: Union[Callable, Function]
        reduction_axes=None,  # type: List[int]
        name=None,  # type: str
):
    # type: (...) -> Node
    """Perform general tensor reduction operation.

    :param node: The node providing data for reduction operation.
    :param initial_value: The initial value for reduction operation.
    :param reduction_function: The function performing binary reduction operation or a nGraph
                           Function object. The operation must accept two nodes providing scalar
                           operands and return a node which produces a scalar result.
    :param reduction_axes: The list of axes indices to be reduced. Default to reduce all axes.
    :param name: The new name for output node.
    :return: The node performing reduction operation with provided reduction node.
    """
    if reduction_axes is None:
        reduction_axes = list(range(len(node.shape)))
    init_val_node = constant(initial_value)
    if not isinstance(reduction_function, Function):
        # wrap reduction function into Function object
        param1 = Parameter(node.get_element_type(), Shape([]))
        param2 = Parameter(node.get_element_type(), Shape([]))
        reduction_operation = Function(
            NodeVector([reduction_function(param1, param2)]), [param1, param2],
            'reduction_operation')
    else:
        reduction_operation = reduction_function
    return Reduce(node, init_val_node, reduction_operation,
                  AxisSet(set(reduction_axes)))
Exemplo n.º 5
0
def test_reduce():

    float_element_type = Type.f32

    AddParam1 = Parameter(float_element_type, Shape([]))
    AddParam2 = Parameter(float_element_type, Shape([]))
    constant_op = Constant(float_element_type, Shape([]), [0.])
    reduce_function = Function(NodeVector([Add(AddParam1, AddParam2)]),
                               [AddParam1, AddParam2], 'add')

    A = Parameter(float_element_type, Shape([2, 2, 2]))
    parameter_list = [A]

    function = Function(
        NodeVector([Reduce(A, constant_op, reduce_function, AxisSet({0}))]),
        parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(float_element_type, Shape([2, 2, 2]))
    result = backend.make_primary_tensor_view(float_element_type, Shape([2,
                                                                         2]))

    a.write(util.numpy_to_c(np.arange(8, dtype=np.float32).reshape(2, 2, 2)),
            0, 32)

    result_arr = np.zeros((2, 2), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 16)
    cf.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 16)

    a_arr = np.arange(8).reshape(2, 2, 2)
    result_arr_ref = np.add.reduce(a_arr)

    assert np.allclose(result_arr, result_arr_ref)
Exemplo n.º 6
0
def test_broadcast():

    element_type = Type.f32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function([Broadcast(A, Shape([3, 3]), AxisSet({0}))],
                        parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, Shape([3]))
    result = backend.create_tensor(element_type, Shape([3, 3]))

    a.write(util.numpy_to_c(np.array([1, 2, 3], dtype=np.float32)), 12)

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 36)
    handle = backend.compile(function)
    handle.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 36)

    a_arr = np.array([[0], [0], [0]], dtype=np.float32)
    b_arr = np.array([[1, 2, 3]], dtype=np.float32)
    result_arr_ref = np.add(a_arr, b_arr)

    assert np.allclose(result_arr, result_arr_ref)
Exemplo n.º 7
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function([Sum(A, AxisSet({1}))], parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 4)
    handle = backend.compile(function)
    handle.get_performance_data()
    handle.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Exemplo n.º 8
0
def test_broadcast():

    element_type = Type.f32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function(
        NodeVector([Broadcast(A, Shape([3, 3]), AxisSet({0}))]),
        parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(element_type, Shape([3]))
    result = backend.make_primary_tensor_view(element_type, Shape([3, 3]))

    a.write(util.numpy_to_c(np.array([1, 2, 3], dtype=np.float32)), 0, 12)

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 36)
    cf.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 36)

    a_arr = np.array([[0], [0], [0]], dtype=np.float32)
    b_arr = np.array([[1, 2, 3]], dtype=np.float32)
    result_arr_ref = np.add(a_arr, b_arr)

    assert np.allclose(result_arr, result_arr_ref)
Exemplo n.º 9
0
Arquivo: ops.py Projeto: jrmwng/ngraph
def sum(node, reduction_axes=None, name=None):
    # type: (Node, Iterable[int], str) -> Node
    """Element-wise sums the input tensor, eliminating the specified reduction axes.

    :param reduction_axes: The axes to eliminate through summation.
    """
    reduction_axes = get_reduction_axes(node, reduction_axes)
    return Sum(node, AxisSet(reduction_axes))
Exemplo n.º 10
0
def make_float32_constant(scalar, shape=None, axis_set=None):
    # type: (float, List[int], Set[int]) -> float
    """Create a Constant node for float value."""
    if shape is None:
        shape = []
    if axis_set is None:
        axis_set = set()
    return make_scalar_constant(Type.f32, scalar, Shape(shape),
                                AxisSet(axis_set))
Exemplo n.º 11
0
def min(node, reduction_axes=None, name=None):
    # type: (Node, Iterable[int], str) -> Node
    """Min-reduction operation on input tensor, eliminating the specified reduction axes.

    :param node: The tensor we want to max-reduce.
    :param reduction_axes: The axes to eliminate through min operation.
    :param name: Optional name for output node.
    """
    return Min(node, AxisSet(get_reduction_axes(node, reduction_axes)))
Exemplo n.º 12
0
def reverse(node, reversed_axes, name=None):  # type: (Node, List[int], str) -> Node
    """Perform axis-reverse operation.

    :param node: The input node on which operation will be carried out.
    :param reversed_axes: The list of indices of axes to be reversed.
    :param name: The optional name of the output node.
    :return: The new node with reversed axes.
    """
    return Reverse(node, AxisSet(reversed_axes))
Exemplo n.º 13
0
 def visit(self, op, x):
     self.computation.set_op_rank(op)
     op_element_type = self.computation.lookup_cpp_op(x)
     axis_set = set()
     axis_set.add(op.dim)
     self.computation.register_cpp_op(
         op,
         PyngBroadcast(op_element_type, Shape(list(op.axes.lengths)),
                       AxisSet(axis_set)))
Exemplo n.º 14
0
def sum(node, reduction_axes=None, name=None):
    # type: (Node, Iterable[int], str) -> Node
    """Perform element-wise sums of the input tensor, eliminating the specified reduction axes.

    :param node: The node providing data for operation.
    :param reduction_axes: The axes to eliminate through summation.
    :param name: The optional new name for ouptut node.
    :return: The new node performing summation along `reduction_axes` element-wise.
    """
    return Sum(node, AxisSet(get_reduction_axes(node, reduction_axes)))
Exemplo n.º 15
0
def prod(node, reduction_axes=None, name=None):
    # type: (Node, Iterable[int], str) -> Node
    """Product-reduction operation on input tensor, eliminating the specified reduction axes.

    :param node: The tensor we want to product-reduce.
    :param reduction_axes: The axes to eliminate through product operation.
    :param name: Optional name for output node.
    :return: The new node performing product-reduction operation.
    """
    return Product(node, AxisSet(get_reduction_axes(node, reduction_axes)))
Exemplo n.º 16
0
    def visit(self, op, input):
        self.computation.set_op_rank(op)

        if isinstance(self.np_reduction_axis(op), tuple):
            axis_set = self.np_reduction_axis(op)
        else:
            axis_set = tuple()
            axis_set += (self.np_reduction_axis(op), )
        ngraph_input = self.computation.lookup_cpp_op(input)
        self.computation.register_cpp_op(
            op, PyngMax(ngraph_input, AxisSet(set(axis_set))))
Exemplo n.º 17
0
def broadcast(node, new_shape, broadcast_axes, name=None):
    # type: (Node, TensorShape, Iterable[int], str) -> Node
    """Create a node which broadcasts the input node's values along specified axes to a desired shape.

    :param node: The node with input tensor data.
    :param new_shape: The new shape we want to broadcast tensor to.
    :param broadcast_axes: The axis positions (0-based) in the result that are being broadcast.
    :param name: Optional new name for output node.
    :return: New node with broadcast shape.
    """
    return Broadcast(node, Shape(new_shape), AxisSet(broadcast_axes))
Exemplo n.º 18
0
def make_scalar_constant(elem_type, scalar, shape=None, axis_set=None):
    # type: (int, float, List[int], Set[int]) -> float
    """Create a Constant node for scalar value."""
    if shape is None:
        shape = Shape([])
    if axis_set is None:
        axis_set = AxisSet(set())
    scalar_shape = Shape([])  # type: List[int]
    constant_op = Constant(elem_type, scalar_shape, [scalar])
    constant_broadcast = Broadcast(constant_op, shape, axis_set)
    return constant_broadcast
Exemplo n.º 19
0
def softmax(node, axes, name=None):  # type: (Node, Iterable[int], str) -> Node
    """Apply softmax operation on each element of input tensor.

    :param node: The tensor providing input data.
    :param axes: The list of axes indices which are used to calculate divider of
                 the softmax function.
    :param name: The optional new name for output node.
    :return: The new node with softmax operation applied on each element.
    """
    if type(axes) is not set:
        axes = set(axes)
    return Softmax(node, AxisSet(axes))
Exemplo n.º 20
0
    def visit(self, op, x):
        self.computation.set_op_rank(op)
        axis_set = set()
        op_element_type = self.computation.lookup_cpp_op(op.args[0])

        # build axis_set
        broadcast_axes = op.axes.names
        broadcast_args_axes = op.args[0].axes.names

        for pos, axis in enumerate(broadcast_axes):
            if axis not in broadcast_args_axes:
                axis_set.add(pos)

        self.computation.register_cpp_op(
            op,
            PyngBroadcast(op_element_type, Shape(list(op.axes.lengths)),
                          AxisSet(axis_set)))
Exemplo n.º 21
0
def mvn(data, axes, normalize_variance, eps, name=None):
    # type: (Node, Set[int], bool, float, str) -> Node
    r"""Perform Mean Variance Normalization operation on data from input node.

    Computes MVN on the input tensor :code:`data` (called `X`) using formula:

    .. math:: Y = \dfrac{X-EX}{\sqrt{E(X-EX)^2}}

    :param data: The node with data tensor.
    :param axes: A list of axes, along which to reduce. Array of integers.
    :param normalize_variance: Flag that denotes if mean values are shared across channels.
                               Boolen value.
    :param eps: The number added to the variance to avoid division by zero
               when normalizing the value. Scalar value.
    :param name: Optional output node name.
    :return: The new node performing a MVN operation on input tensor.
    """
    return MVN(data, AxisSet(axes), normalize_variance, eps)
Exemplo n.º 22
0
def get_broadcast_axes(left_shape, right_shape, axis):
    # type: (TensorShape, TensorShape, Optional[int]) -> AxisSet
    """Generate a list of broadcast axes for ngraph++ broadcast.

    Informally, a broadcast "adds" axes to the input tensor,
    replicating elements from the input tensor as needed to fill the new dimensions.
    Function calculate which of the output axes is being so added.
    For example, an output shape of `{2,5,6,2,8}` and input shape of `{2,6}` means
    that the broadcast axes must be `{1,3,4}`.
    """
    axes_indexes = list(range(0, len(left_shape)))
    if axis is None:
        right_begin = len(left_shape) - len(right_shape)
    else:
        right_begin = axis
    right_axes_indexes = list(
        range(right_begin, right_begin + len(right_shape)))
    for index in reversed(right_axes_indexes):
        del axes_indexes[index]
    return AxisSet(set(axes_indexes))
Exemplo n.º 23
0
def get_broadcast_axes(output_shape, input_shape, axis=None):
    # type: (TensorShape, TensorShape, int) -> AxisSet
    """Generate a list of broadcast axes for ngraph++ broadcast.

    Informally, a broadcast "adds" axes to the input tensor,
    replicating elements from the input tensor as needed to fill the new dimensions.
    Function calculate which of the output axes are added in this way.

    :param output_shape: The new shape for the output tensor.
    :param input_shape: The shape of input tensor.
    :param axis: The axis along which we want to replicate elements.
    :return: The indices of added axes.
    """
    axes_indexes = list(range(0, len(output_shape)))
    if axis is None:
        output_begin = len(output_shape) - len(input_shape)
    else:
        output_begin = axis
    right_axes_indexes = list(range(output_begin, output_begin + len(input_shape)))
    for index in reversed(right_axes_indexes):
        del axes_indexes[index]
    return AxisSet(set(axes_indexes))
Exemplo n.º 24
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function(NodeVector([Sum(A, AxisSet({1}))]), parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(element_type, shape)
    result = backend.make_primary_tensor_view(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 0, 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 4)
    cf.call([a], [result])
    result.read(util.numpy_to_c(result_arr), 0, 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Exemplo n.º 25
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function(NodeVector([Sum(A, AxisSet({1}))]), parameter_list, 'test')
    backend = Backend.create(pytest.config.getoption('backend'))

    a = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 0, 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 4)
    backend.call(backend.compile(function), [result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Exemplo n.º 26
0
def unary_op(op_str, a):
    if op_str == 'Abs':
        return Abs(a)
    elif op_str == 'Acos':
        return Acos(a)
    elif op_str == 'Asin':
        return Asin(a)
    elif op_str == 'Atan':
        return Atan(a)
    elif op_str == 'Ceiling':
        return Ceiling(a)
    elif op_str == 'Cos':
        return Cos(a)
    elif op_str == 'Cosh':
        return Cosh(a)
    elif op_str == 'Floor':
        return Floor(a)
    elif op_str == 'log':
        return Log(a)
    elif op_str == 'exp':
        return Exp(a)
    elif op_str == 'negative':
        return Negative(a)
    elif op_str == 'Reverse':
        return Reverse(a, AxisSet({1}))
    elif op_str == 'Sign':
        return Sign(a)
    elif op_str == 'Sin':
        return Sin(a)
    elif op_str == 'Sinh':
        return Sinh(a)
    elif op_str == 'Sqrt':
        return Sqrt(a)
    elif op_str == 'Tan':
        return Tan(a)
    elif op_str == 'Tanh':
        return Tanh(a)
Exemplo n.º 27
0
def relu(op):  # type: (Node) -> Node
    """Relu operator."""
    return Maximum(op, make_float32_constant_like(0., op))


# Flatten
X1 = Reshape(Input, AxisVector([0, 1, 2]), Shape([bz, 784]))

# Normalize
X2 = X1 / make_float32_constant_like(255., X1)

# Affine 1
W1 = Parameter(float_element_type, Shape([784, 100]))
b1 = Parameter(float_element_type, Shape([100]))
X3 = Dot(X2, W1) + Broadcast(b1, Shape([bz, 100]), AxisSet({0}))
X4 = relu(X3)

# Affine 2
W2 = Parameter(float_element_type, Shape([100, 10]))
b2 = Parameter(float_element_type, Shape([10]))
X5 = Dot(X4, W2) + Broadcast(b2, Shape([bz, 10]), AxisSet({0}))

# Softmax
Logits = X5
Exp = Exp(Logits)
Max = Reduce(Exp, make_float32_constant(0., [], set()), MaxFn, AxisSet({1}))
MaxBroadcast = Broadcast(Max, Shape([bz, 10]), AxisSet({1}))
Softmax = Exp / MaxBroadcast

# Loss
Exemplo n.º 28
0
Arquivo: ops.py Projeto: jrmwng/ngraph
def softmax(node, axes):  # type: (Node, Iterable[int]) -> Node
    """Softmax operation on input tensor."""
    if type(axes) is not set:
        axes = set(axes)
    return Softmax(node, AxisSet(axes))