Пример #1
0
def sum(node, reduction_axes=None, name=None):
    # type: (Node, Iterable[int], str) -> Node
    """Perform element-wise sums of the input tensor, eliminating the specified reduction axes.

    :param node: The node providing data for operation.
    :param reduction_axes: The axes to eliminate through summation.
    :param name: The optional new name for ouptut node.
    :return: The new node performing summation along `reduction_axes` element-wise.
    """
    return Sum(node, AxisSet(get_reduction_axes(node, reduction_axes)))
Пример #2
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function(NodeVector([Sum(A, AxisSet({1}))]), parameter_list, 'test')
    backend = Backend.create(pytest.config.getoption('backend'))

    a = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 0, 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 4)
    backend.call(backend.compile(function), [result], [a])
    result.read(util.numpy_to_c(result_arr), 0, 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Пример #3
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function(NodeVector([Sum(A, AxisSet({1}))]), parameter_list, 'test')
    backend, cf = make_backend_call_frame(function)

    a = backend.make_primary_tensor_view(element_type, shape)
    result = backend.make_primary_tensor_view(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 0, 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 0, 4)
    cf.call([a], [result])
    result.read(util.numpy_to_c(result_arr), 0, 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Пример #4
0
def test_sum():

    element_type = Type.f32
    shape = Shape([1, 4])
    A = Parameter(element_type, shape)
    parameter_list = [A]
    function = Function([Sum(A, AxisSet({1}))], parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, Shape([1]))

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 16)

    result_arr = np.array([0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 4)
    handle = backend.compile(function)
    handle.call([result], [a])
    result.read(util.numpy_to_c(result_arr), 4)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    result_arr_ref = np.sum(a_arr)

    assert np.allclose(result_arr[0], result_arr_ref)
Пример #5
0
# Affine 2
W2 = Parameter(float_element_type, Shape([100, 10]))
b2 = Parameter(float_element_type, Shape([10]))
X5 = Dot(X4, W2) + Broadcast(b2, Shape([bz, 10]), AxisSet({0}))

# Softmax
Logits = X5
Exp = Exp(Logits)
Max = Reduce(Exp, make_float32_constant(0., [], set()), MaxFn, AxisSet({1}))
MaxBroadcast = Broadcast(Max, Shape([bz, 10]), AxisSet({1}))
Softmax = Exp / MaxBroadcast

# Loss
LogSoftmax = Log(Softmax)
Loss = Sum(LogSoftmax * LabelOneHot, AxisSet({0, 1})) / make_float32_constant(
    float(bz), [], set())

# Derivatives
dLogits = Softmax - LabelOneHot
dX5 = dLogits

dX4 = Dot(dX5, transpose(W2, Shape([1, 0])))
dW2 = Dot(transpose(X4, Shape([1, 0])), dX5)
db2 = Sum(dX5, AxisSet({0}))

dX3 = Convert((Greater(X3, make_float32_constant(0., [bz, 100], {0, 1}))),
              float_element_type) * dX4
dX2 = Dot(dX3, transpose(W1, Shape([1, 0])))
dW1 = Dot(transpose(X2, Shape([1, 0])), dX3)
db1 = Sum(dX3, AxisSet({0}))