def cat(tensors, dim=0, out=None): x = paddle.concat(tensors, axis=dim) if out is None: return varbase_to_tensor(x) else: paddle.assign(x, out) return out
def cat(tensors, dim=0, out=None): x = fluid.layers.concat(tensors, axis=dim) if out is None: return varbase_to_tensor(x) else: fluid.layers.assign(x, out) return out
def randn(*shape, requires_grad=True): if isinstance(shape[0], Iterable): shape = shape[0] X = varbase_to_tensor(paddle.randn(shape)) if not requires_grad: X.stop_gradient = True return X
def stack(inputs, dim=0, out=None): x = paddle.stack(inputs, axis=dim) if out is None: return varbase_to_tensor(x) else: paddle.assign(x, out) return out
def stack(inputs, dim=0, out=None): x = fluid.layers.stack(inputs, axis=dim) if out is None: return varbase_to_tensor(x) else: fluid.layers.assign(x, out) return out
def zeros(*size, out=None, dtype="float32", device=None, requires_grad=True): if isinstance(size[0], Iterable): size = size[0] if isinstance(size[0], Iterable): size = size[0] X = varbase_to_tensor(paddle.zeros(size, dtype)) if not requires_grad: X.stop_gradient = True return X
def mean(input, dim=None, keepdim=False, out=None): if isinstance(dim, tuple): dim = list(dim) x = fluid.layers.reduce_mean(input, dim, keepdim) if out is None: return varbase_to_tensor(x) else: fluid.layers.assign(x, out) return out
def chunk(self, chunks, dim): slices = fluid.layers.unstack(self, axis=dim, num=None) out_list = [] step = int(np.ceil(len(slices) / chunks)) for st in range(0, len(slices), step): out_list.append( varbase_to_tensor( fluid.layers.concat([ paddle.fluid.layers.unsqueeze(x, dim, name=None) for x in slices[st:(st + step)] ], axis=dim, name=None))) return out_list
def zeros_like(x, out=None, device=None): return varbase_to_tensor(paddle.zeros_like(x, out))
def pow(x, y): return varbase_to_tensor(fluid.layers.pow(x, y))
def sqrt(x): return varbase_to_tensor(fluid.layers.sqrt(x))
def sum(x, dim=None, keepdim=False): return varbase_to_tensor(fluid.layers.reduce_sum(x, dim, keepdim))
def rsqrt(x): return varbase_to_tensor(paddle.fluid.layers.rsqrt(x, name=None))
def flatten(x, dim=1): x = fluid.layers.flatten(x, axis=dim) return varbase_to_tensor(x)
def ones(*size, out=None, dtype="float32", device=None): if isinstance(size[0], Iterable): size = size[0] return varbase_to_tensor(paddle.ones(size, dtype))
def zeros(*size, out=None, dtype="float32", device=None, requires_grad=True): X = varbase_to_tensor(fluid.layers.zeros(size, dtype)) if not requires_grad: X.stop_gradient = True return X
def randn(*shape, requires_grad=True): X = varbase_to_tensor(fluid.layers.randn(shape)) if not requires_grad: X.stop_gradient = True return X
def flatten(x, start_dim=0, end_dim=-1): x = paddle.flatten(x, start_axis=start_dim, stop_axis=end_dim) return varbase_to_tensor(x)
def ones(*size, out=None, dtype="float32", device=None): return varbase_to_tensor(paddle.ones(size, dtype))
def rsqrt(x): return varbase_to_tensor(paddle.rsqrt(x, name=None))
def sqrt(x): return varbase_to_tensor(paddle.sqrt(x))
def pow(x, y): return varbase_to_tensor(paddle.pow(x, y))
def ones(*size, out=None, dtype="float32", device=None): return varbase_to_tensor(fluid.layers.ones(size, dtype))
def randn(*shape, requires_grad=True): X = varbase_to_tensor(paddle.randn(*shape)) if not requires_grad: X.stop_gradient = True return X
def zeros_like(x, out=None, device=None): return varbase_to_tensor(fluid.layers.zeros_like(x, out))
def flatten(x, dim=1): x = paddle.flatten(x, axis=dim) return varbase_to_tensor(x)
def clamp(input, min, max, out=None): return varbase_to_tensor(paddle.clip(input, min, max))
def min(x, dim=None, keepdim=False): return varbase_to_tensor(fluid.layers.reduce_min(x, dim, keep_dim=keepdim))
def clamp(input, min, max, out=None): return varbase_to_tensor(fluid.layers.clip(input, min, max))
def min(x, dim=None, keepdim=False): return varbase_to_tensor(paddle.min(x, dim, keepdim=keepdim))