Exemple #1
0
def softmax(node, axes, name=None):  # type: (Node, Iterable[int], str) -> Node
    """Apply softmax operation on each element of input tensor.

    :param node: The tensor providing input data.
    :param axes: The list of axes indices which are used to calculate divider of
                 the softmax function.
    :param name: The optional new name for output node.
    :return: The new node with softmax operation applied on each element.
    """
    if not isinstance(axes, set):
        axes = set(axes)
    return Softmax(node, AxisSet(axes))
    def visit(self, op, x):
        self.computation.set_op_rank(op)
        axis_set = set()
        op_element_type = self.computation.lookup_cpp_op(op.args[0])

        # build axis_set
        broadcast_axes = op.axes.names
        broadcast_args_axes = op.args[0].axes.names

        for pos, axis in enumerate(broadcast_axes):
            if axis not in broadcast_args_axes:
                axis_set.add(pos)

        self.computation.register_cpp_op(
            op,
            PyngBroadcast(op_element_type, Shape(list(op.axes.lengths)),
                          AxisSet(axis_set)))
Exemple #3
0
def mvn(data, axes, normalize_variance, eps, name=None):
    # type: (Node, Set[int], bool, float, str) -> Node
    r"""Perform Mean Variance Normalization operation on data from input node.

    Computes MVN on the input tensor :code:`data` (called `X`) using formula:

    .. math:: Y = \dfrac{X-EX}{\sqrt{E(X-EX)^2}}

    :param data: The node with data tensor.
    :param axes: A list of axes, along which to reduce. Array of integers.
    :param normalize_variance: Flag that denotes if mean values are shared across channels.
                               Boolen value.
    :param eps: The number added to the variance to avoid division by zero
               when normalizing the value. Scalar value.
    :param name: Optional output node name.
    :return: The new node performing a MVN operation on input tensor.
    """
    return MVN(data, AxisSet(axes), normalize_variance, eps)
Exemple #4
0
def get_broadcast_axes(left_shape, right_shape, axis):
    # type: (TensorShape, TensorShape, Optional[int]) -> AxisSet
    """Generate a list of broadcast axes for ngraph++ broadcast.

    Informally, a broadcast "adds" axes to the input tensor,
    replicating elements from the input tensor as needed to fill the new dimensions.
    Function calculate which of the output axes is being so added.
    For example, an output shape of `{2,5,6,2,8}` and input shape of `{2,6}` means
    that the broadcast axes must be `{1,3,4}`.
    """
    axes_indexes = list(range(0, len(left_shape)))
    if axis is None:
        right_begin = len(left_shape) - len(right_shape)
    else:
        right_begin = axis
    right_axes_indexes = list(
        range(right_begin, right_begin + len(right_shape)))
    for index in reversed(right_axes_indexes):
        del axes_indexes[index]
    return AxisSet(set(axes_indexes))
Exemple #5
0
def get_broadcast_axes(output_shape, input_shape, axis=None):
    # type: (TensorShape, TensorShape, int) -> AxisSet
    """Generate a list of broadcast axes for ngraph++ broadcast.

    Informally, a broadcast "adds" axes to the input tensor,
    replicating elements from the input tensor as needed to fill the new dimensions.
    Function calculate which of the output axes are added in this way.

    :param output_shape: The new shape for the output tensor.
    :param input_shape: The shape of input tensor.
    :param axis: The axis along which we want to replicate elements.
    :return: The indices of added axes.
    """
    axes_indexes = list(range(0, len(output_shape)))
    if axis is None:
        output_begin = len(output_shape) - len(input_shape)
    else:
        output_begin = axis
    right_axes_indexes = list(range(output_begin, output_begin + len(input_shape)))
    for index in reversed(right_axes_indexes):
        del axes_indexes[index]
    return AxisSet(set(axes_indexes))
Exemple #6
0
def test_broadcast():

    element_type = Type.f32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function(NodeVector([Broadcast(A, Shape([3, 3]), AxisSet({0}))]), parameter_list, 'test')
    backend = Backend.create(pytest.config.getoption('backend'))

    a = backend.create_tensor(element_type, Shape([3]))
    result = backend.create_tensor(element_type, Shape([3, 3]))

    a.write(util.numpy_to_c(np.array([1, 2, 3], dtype=np.float32)), 0, 12)

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 36)
    backend.call(backend.compile(function), [result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 36)

    a_arr = np.array([[0], [0], [0]], dtype=np.float32)
    b_arr = np.array([[1, 2, 3]], dtype=np.float32)
    result_arr_ref = np.add(a_arr, b_arr)

    assert np.allclose(result_arr, result_arr_ref)
Exemple #7
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function(NodeVector([Sum(A, AxisSet({1}))]), parameter_list, 'test')
    backend = Backend.create(pytest.config.getoption('backend'))

    a = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 0, 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 4)
    backend.call(backend.compile(function), [result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Exemple #8
0
def test_broadcast():

    element_type = Type.f32
    A = Parameter(element_type, Shape([3]))
    parameter_list = [A]
    function = Function(NodeVector([Broadcast(A, Shape([3, 3]), AxisSet({0}))]), parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(element_type, Shape([3]))
    result = backend.make_primary_tensor_view(element_type, Shape([3, 3]))

    a.write(util.numpy_to_c(np.array([1, 2, 3], dtype=np.float32)), 0, 12)

    result_arr = np.zeros((3, 3), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 36)
    cf.call([a], [result])
    result.read(util.numpy_to_c(result_arr), 0, 36)

    a_arr = np.array([[0], [0], [0]], dtype=np.float32)
    b_arr = np.array([[1, 2, 3]], dtype=np.float32)
    result_arr_ref = np.add(a_arr, b_arr)

    assert np.allclose(result_arr, result_arr_ref)
Exemple #9
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function(NodeVector([Sum(A, AxisSet({1}))]), parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(element_type, shape)
    result = backend.make_primary_tensor_view(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 0, 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 4)
    cf.call([a], [result])
    result.read(util.numpy_to_c(result_arr), 0, 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Exemple #10
0
def unary_op(op_str, a):
    if op_str == 'Abs':
        return Abs(a)
    elif op_str == 'Acos':
        return Acos(a)
    elif op_str == 'Asin':
        return Asin(a)
    elif op_str == 'Atan':
        return Atan(a)
    elif op_str == 'Ceiling':
        return Ceiling(a)
    elif op_str == 'Cos':
        return Cos(a)
    elif op_str == 'Cosh':
        return Cosh(a)
    elif op_str == 'Floor':
        return Floor(a)
    elif op_str == 'log':
        return Log(a)
    elif op_str == 'exp':
        return Exp(a)
    elif op_str == 'negative':
        return Negative(a)
    elif op_str == 'Reverse':
        return Reverse(a, AxisSet({1}))
    elif op_str == 'Sign':
        return Sign(a)
    elif op_str == 'Sin':
        return Sin(a)
    elif op_str == 'Sinh':
        return Sinh(a)
    elif op_str == 'Sqrt':
        return Sqrt(a)
    elif op_str == 'Tan':
        return Tan(a)
    elif op_str == 'Tanh':
        return Tanh(a)
Exemple #11
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function([Sum(A, AxisSet({1}))], parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 4)
    handle = backend.compile(function)
    handle.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Exemple #12
0
def softmax(node, axes):  # type: (Node, Iterable[int]) -> Node
    """Softmax operation on input tensor."""
    if type(axes) is not set:
        axes = set(axes)
    return Softmax(node, AxisSet(axes))
Exemple #13
0
def relu(op):  # type: (Node) -> Node
    """Relu operator."""
    return Maximum(op, make_float32_constant_like(0., op))


# Flatten
X1 = Reshape(Input, AxisVector([0, 1, 2]), Shape([bz, 784]))

# Normalize
X2 = X1 / make_float32_constant_like(255., X1)

# Affine 1
W1 = Parameter(float_element_type, Shape([784, 100]))
b1 = Parameter(float_element_type, Shape([100]))
X3 = Dot(X2, W1) + Broadcast(b1, Shape([bz, 100]), AxisSet({0}))
X4 = relu(X3)

# Affine 2
W2 = Parameter(float_element_type, Shape([100, 10]))
b2 = Parameter(float_element_type, Shape([10]))
X5 = Dot(X4, W2) + Broadcast(b2, Shape([bz, 10]), AxisSet({0}))

# Softmax
Logits = X5
Exp = Exp(Logits)
Max = Reduce(Exp, make_float32_constant(0., [], set()), MaxFn, AxisSet({1}))
MaxBroadcast = Broadcast(Max, Shape([bz, 10]), AxisSet({1}))
Softmax = Exp / MaxBroadcast

# Loss