Beispiel #1
0
def reduce(
        node,  # type: Node
        initial_value,  # type: ScalarData
        reduction_function,  # type: Union[Callable, Function]
        reduction_axes=None,  # type: List[int]
        name=None,  # type: str
):
    # type: (...) -> Node
    """Perform general tensor reduction operation.

    :param node: The node providing data for reduction operation.
    :param initial_value: The initial value for reduction operation.
    :param reduction_function: The function performing binary reduction operation or a nGraph
                           Function object. The operation must accept two nodes providing scalar
                           operands and return a node which produces a scalar result.
    :param reduction_axes: The list of axes indices to be reduced. Default to reduce all axes.
    :param name: The new name for output node.
    :return: The node performing reduction operation with provided reduction node.
    """
    if reduction_axes is None:
        reduction_axes = list(range(len(node.shape)))
    init_val_node = constant(initial_value)
    if not isinstance(reduction_function, Function):
        # wrap reduction function into Function object
        param1 = Parameter(node.get_element_type(), Shape([]))
        param2 = Parameter(node.get_element_type(), Shape([]))
        reduction_operation = Function(
            NodeVector([reduction_function(param1, param2)]), [param1, param2],
            'reduction_operation')
    else:
        reduction_operation = reduction_function
    return Reduce(node, init_val_node, reduction_operation,
                  AxisSet(set(reduction_axes)))
Beispiel #2
0
def test_reduce():

    float_element_type = Type.f32

    AddParam1 = Parameter(float_element_type, Shape([]))
    AddParam2 = Parameter(float_element_type, Shape([]))
    constant_op = Constant(float_element_type, Shape([]), [0.])
    reduce_function = Function(NodeVector([Add(AddParam1, AddParam2)]),
                               [AddParam1, AddParam2], 'add')

    A = Parameter(float_element_type, Shape([2, 2, 2]))
    parameter_list = [A]

    function = Function(
        NodeVector([Reduce(A, constant_op, reduce_function, AxisSet({0}))]),
        parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(float_element_type, Shape([2, 2, 2]))
    result = backend.make_primary_tensor_view(float_element_type, Shape([2,
                                                                         2]))

    a.write(util.numpy_to_c(np.arange(8, dtype=np.float32).reshape(2, 2, 2)),
            0, 32)

    result_arr = np.zeros((2, 2), dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 16)
    cf.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 16)

    a_arr = np.arange(8).reshape(2, 2, 2)
    result_arr_ref = np.add.reduce(a_arr)

    assert np.allclose(result_arr, result_arr_ref)
Beispiel #3
0
# Affine 1
W1 = Parameter(float_element_type, Shape([784, 100]))
b1 = Parameter(float_element_type, Shape([100]))
X3 = Dot(X2, W1) + Broadcast(b1, Shape([bz, 100]), AxisSet({0}))
X4 = relu(X3)

# Affine 2
W2 = Parameter(float_element_type, Shape([100, 10]))
b2 = Parameter(float_element_type, Shape([10]))
X5 = Dot(X4, W2) + Broadcast(b2, Shape([bz, 10]), AxisSet({0}))

# Softmax
Logits = X5
Exp = Exp(Logits)
Max = Reduce(Exp, make_float32_constant(0., [], set()), MaxFn, AxisSet({1}))
MaxBroadcast = Broadcast(Max, Shape([bz, 10]), AxisSet({1}))
Softmax = Exp / MaxBroadcast

# Loss
LogSoftmax = Log(Softmax)
Loss = Sum(LogSoftmax * LabelOneHot, AxisSet({0, 1})) / make_float32_constant(
    float(bz), [], set())

# Derivatives
dLogits = Softmax - LabelOneHot
dX5 = dLogits

dX4 = Dot(dX5, transpose(W2, Shape([1, 0])))
dW2 = Dot(transpose(X4, Shape([1, 0])), dX5)
db2 = Sum(dX5, AxisSet({0}))