def min(x, axis=None, keepdims=False, requires_grad=False): return Tensor._op( MaxMin, x, op_kwargs=dict(axis=axis, keepdims=keepdims, maxmin="min"), requires_grad=requires_grad, )
def std(x, axis=None, ddof=0, keepdims=False, requires_grad=False): return Tensor._op( StdDev, x, op_kwargs=dict(axis=axis, keepdims=keepdims, ddof=ddof), requires_grad=requires_grad, )
def empty(shape, dtype=np.float32, requires_grad=False): """ 将 np.empty(shape, dtype) 填入Tensor的数据中 :return 返回一个给定shape和dtype的Tensor """ return Tensor(np.empty(shape, dtype), requires_grad=requires_grad)
def rands(shape, requires_grad=False): """ 将 np.random.random(shape) 填入Tensor的数据中 :return 返回一个给定shape的随机Tensor """ return Tensor(np.random.random(shape), requires_grad=requires_grad)
def reshape(a, *newshape, requires_grad=False): if not newshape: raise TypeError("reshape() takes at least 1 argument (0 given)") return Tensor._op(Reshape, a, op_args=(newshape, ), requires_grad=requires_grad)
def var(x, axis=None, ddof=0, keepdims=False, requires_grad=False): return Tensor._op( Variance, x, op_kwargs=dict(axis=axis, keepdims=keepdims, ddof=ddof), requires_grad=requires_grad, )
def swapaxes(a, axis1, axis2, requires_grad=False): """ 交换Tensor的两个维度 """ return Tensor._op(SwapAxes, a, op_args=(axis1, axis2), requires_grad=requires_grad)
def zeros_like(other, dtype=None, requires_grad=False): """ 将 np.zeros_like(other, dtype) 填入Tensor的数据中 :return 返回一个与目标形状和类型一致的Tensor """ if isinstance(other, Tensor): other = other.data return Tensor(np.zeros_like(other, dtype), requires_grad=requires_grad)
def permute(a, *axes, requires_grad=False): """ 重新排列Tensor的各个维度,等同于numpy中的np.transpose操作 """ if not axes: axes = None return Tensor._op(Permute, a, op_args=(axes, ), requires_grad=requires_grad)
def where(condition, x=None, y=None, requires_grad=False): if x is None and y is None: if isinstance(condition, Tensor): condition = condition.data return np.where(condition) return Tensor._op(Where, x, y, op_kwargs=dict(condition=condition), requires_grad=requires_grad)
def transpose(a, *axes, requires_grad=False): """ 转置矩阵,目前适用二维Tensor :param a: :param axes: :param requires_grad: :return: """ if (a.ndim < 2): raise NotImplemented("此处应当自动扩维,但是还未实现") alist = list(range(a.ndim)) if not axes: # 若不指定axes,则默认置换最后两个维度 axes = [a.ndim - 1, a.ndim - 2] alist[axes[0]], alist[axes[1]] = alist[axes[1]], alist[axes[0]], return Tensor._op(Permute, a, op_args=(alist, ), requires_grad=requires_grad)
def einsum(*operands, optimize=False, requires_grad=False): # 这段没有验证过,直接超过来的 operands = list(operands) if isinstance(operands[0], str): # operands form: "ijk, ijk", x, y variables = operands[1:] if any(isinstance(i, Tensor) for i in operands): operands[1:] = (var.data if isinstance(var, Tensor) else var for var in operands[1:]) else: # operands form: op0, sublist0, op1, sublist1, ..., [sublistout] end = -1 if len(operands) % 2 else None # -1 if sublistout is included variables = operands[:end:2] if any(isinstance(i, Tensor) for i in operands): operands[:end:2] = (var.data if isinstance(var, Tensor) else var for var in operands[:end:2]) in_lbls, out_lbls, _ = _parse_einsum_input(operands) return Tensor._op(EinSum, *variables, op_kwargs=dict(in_lbls=in_lbls, out_lbls=out_lbls, optimize=optimize), requires_grad=requires_grad)
def multiply(a, b, requires_grad=False): return Tensor._op(Multiply, a, b, requires_grad=requires_grad)
def log10(a, requires_grad=False): return Tensor._op(Log10, a, requires_grad=requires_grad)
def exp(a, requires_grad=False): return Tensor._op(Exp, a, requires_grad=requires_grad)
def matmul(a, b, requires_grad=False): return Tensor._op(MatMul, a, b, requires_grad=requires_grad)
def add(a, b, requires_grad=False): return Tensor._op(Add, a, b, requires_grad=requires_grad)
def mean(x, axis=None, keepdims=False, requires_grad=False): return Tensor._op(Mean, x, op_args=(axis, keepdims), requires_grad=requires_grad)
def subtract(a, b, requires_grad=False): return Tensor._op(Subtract, a, b, requires_grad=requires_grad)
def divide(a, b, requires_grad=False): return Tensor._op(Divide, a, b, requires_grad=requires_grad)
def minimum(a, b, requires_grad=False): return Tensor._op(Minimum, a, b, requires_grad=requires_grad)
def flatten(a, requires_grad=False): return Tensor._op(Flatten, a, requires_grad=requires_grad)
def squeeze(a, axis=None, requires_grad=False): return Tensor._op(Squeeze, a, op_args=(axis, ), requires_grad=requires_grad)
def power(a, b, requires_grad=False): return Tensor._op(Power, a, b, requires_grad=requires_grad)
def expand_dims(a, axis, requires_grad=False): return Tensor._op(ExpandDims, a, op_args=(axis, ), requires_grad=requires_grad)
def positive(a, requires_grad=False): return Tensor._op(Positive, a, requires_grad=requires_grad)
def cbrt(a, requires_grad=False): return Tensor._op(Cbrt, a, requires_grad=requires_grad)
def negative(a, requires_grad=False): return Tensor._op(Negative, a, requires_grad=requires_grad)
def abs(a, requires_grad=False): return Tensor._op(Abs, a, requires_grad=requires_grad)
def sum(x, axis=None, keepdims=False, requires_grad=False): return Tensor._op(Sum, x, op_args=(axis, keepdims), requires_grad=requires_grad)