示例#1
0
def mean(a, axis=None, keepdims=False):
    """
    Compute the arithmetic mean along the specified axis.

    Returns the average of the array elements. The average is taken
    over the flattened array by default, otherwise over the specified
    axis.

    Note:
        Numpy arguments dtype and out are not supported.
        On GPU, the supported dtypes are np.float16, and np.float32.
        On CPU, the supported dtypes are np.float16, and np.float32.

    Args:
        a (Tensor): input tensor containing numbers whose mean is desired.
                    If a is not an array, a conversion is attempted.
        axis (None or int or tuple of ints): optional. Axis or axes along
                    which the means are computed. The default is to compute
                    the mean  of the flattened array. If this is a tuple of
                    ints, a mean is performed over multiple axes.
        keepdims(bool): optional. If this is set to True, the axes which
                    are reduced are left in the result as dimensions with
                    size one. With this option, the result will broadcast
                    correctly against the input tensor.

    Returns:
        Tensor or scalar, an array containing the mean values.

    Raises:
        ValueError: if axes are out of the range of [-a.ndim, a.ndim), or
        if the axes contain duplicates.

    Supported Platforms:
        ``Ascend`` ``GPU`` ``CPU``

    Examples:
        >>> a = np.arange(6, dtype='float32')
        >>> output = np.mean(a, 0)
        >>> print(output)
        2.5
    """
    axis = _check_axis_valid(axis, P.Rank()(a))
    if _is_scalar(a.shape):
        if keepdims:
            return a
        return squeeze(a)
    if keepdims:
        res = P.ReduceMean(True)(a, axis)
    else:
        res = P.ReduceMean(False)(a, axis)
    return res
示例#2
0
 def __init__(self):
     super(StridedSlicessdNet, self).__init__()
     self.rank = P.Rank()
示例#3
0
     'block': P.Sub(),
     'desc_inputs': [[3], [3]],
     'desc_bprop': [[3]]}),
 ('TruncatedNormal', {
     'block': P.TruncatedNormal(),
     'desc_const': [(1, 2, 3)],
     'desc_inputs': [],
     'skip': ['backward'],
     'add_fake_input': True}),
 ('Select', {
     'block': P.Select(),
     'desc_inputs': [Tensor(np.array([[True, False, False], [False, True, True]])),
                     [2, 3], [2, 3]],
     'desc_bprop': [[2, 3]]}),
 ('Rank', {
     'block': P.Rank(),
     'desc_inputs': [[2, 3]],
     'skip': ['backward']}),
 ('InvertPermutation', {
     'block': P.InvertPermutation(),
     'desc_const': [(0, 3, 1, 2)],
     'desc_inputs': [],
     'skip': ['backward']}),
 ('Square', {
     'block': P.Square(),
     'desc_inputs': [[4]],
     'desc_bprop': [[4]]}),
 ('Rsqrt', {
     'block': P.Rsqrt(),
     'desc_inputs': [[4]],
     'desc_bprop': [[4]]}),
示例#4
0
def inner(a, b):
    """
    Inner product of two tensors.

    Ordinary inner product of vectors for 1-D tensors (without complex
    conjugation), in higher dimensions a sum product over the last
    axes.

    Note:
        Numpy argument out is not supported.
        On GPU, the supported dtypes are np.float16, and np.float32.
        On CPU, the supported dtype is np.float32.

    Args:
        a (Tensor): input tensor. If a and b are nonscalar, their last
                    dimensions must match.
        b (Tensor): input tensor. If a and b are nonscalar, their last
                    dimensions must match.

    Returns:
        Tensor or scalar, out.shape = a.shape[:-1] + b.shape[:-1].

    Raises:
        ValueError: if x1.shape[-1] != x2.shape[-1].

    Supported Platforms:
        Supported Platforms:
        ``Ascend`` ``GPU`` ``CPU``

    Examples:
        >>> a = np.ones((5, 3))
        >>> b = np.ones((2, 7, 3))
        >>> output = np.inner(a, b)
        >>> print(output)
        [[[3. 3. 3. 3. 3. 3. 3.]
        [3. 3. 3. 3. 3. 3. 3.]]

        [[3. 3. 3. 3. 3. 3. 3.]
        [3. 3. 3. 3. 3. 3. 3.]]

        [[3. 3. 3. 3. 3. 3. 3.]
        [3. 3. 3. 3. 3. 3. 3.]]

        [[3. 3. 3. 3. 3. 3. 3.]
        [3. 3. 3. 3. 3. 3. 3.]]

        [[3. 3. 3. 3. 3. 3. 3.]
        [3. 3. 3. 3. 3. 3. 3.]]]
    """
    if P.Rank()(a) == 0 or P.Rank()(b) == 0:
        if _is_scalar(a.shape):
            a, b = b, a
        return _apply_bin_op(P.Mul(), a, b)

    _ = _check_shape_aligned(a.shape, b.shape)
    aligned_shape_a = (F.shape_mul(a.shape[:-1]), a.shape[-1])
    aligned_shape_b = (F.shape_mul(b.shape[:-1]), a.shape[-1])
    a_aligned = P.Reshape()(a, aligned_shape_a)
    b_aligned = P.Reshape()(b, aligned_shape_b)

    res = P.MatMul(False, True)(a_aligned, b_aligned)
    res = P.Reshape()(res, a.shape[:-1] + b.shape[:-1])
    return res
示例#5
0
def _expand(x, ndim):
    """Expand x to ndim"""
    while P.Rank()(x) < ndim:
        x = P.ExpandDims()(x, 0)
    return x