def split(x, batch_size, dim=0): if isinstance(batch_size, int): if batch_size > x.shape[dim]: return [x] #do nothing return [ convertTensor(y) for y in paddle.split(x, x.shape[dim] // batch_size, dim) ] else: return [convertTensor(y) for y in paddle.split(x, batch_size, dim)]
def repeat_interleave(x, repeats, dim=None): orig_shape = list(x.shape) if dim is None: dim = 1 x = x.view(-1, 1) size = [1] * len(x.shape) size[dim] = repeats x = paddle.tile(x, size) return convertTensor(x).view(-1) else: if len(orig_shape) == dim + 1: x = x.unsqueeze(-1) # x=x.view(-1,1) size = [1] * len(orig_shape) size[-1] = repeats x = paddle.tile(x, size) orig_shape[dim] = -1 return convertTensor(x).view(orig_shape)
def index_copy(x: paddorch.Tensor, dim, index, tensor): query_key = [] for k in range(dim): query_key.append(None) if isinstance(index, Tensor): index = index.long() query_key.append(index) # x[tuple(query_key)]=tensor query_key = paddle.concat(query_key) y = convertTensor(paddle.scatter(x, query_key, tensor)) return y
def gather(x, dim, index): index_shape = index.shape index_flatten = index.flatten() if dim < 0: dim = len(x.shape) + dim nd_index = [] for k in range(len(x.shape)): if k == dim: nd_index.append(index_flatten) else: reshape_shape = [1] * len(x.shape) reshape_shape[k] = x.shape[k] dim_index = paddle.expand( paddle.reshape(paddle.arange(x.shape[k], dtype=index.dtype), reshape_shape), index_shape).flatten() nd_index.append(dim_index) ind2 = paddle.transpose(paddle.stack(nd_index), [1, 0]) # ind2 = paddle.stack(nd_index).transpose([1, 0]) paddle_out = paddle.gather_nd(x, ind2).reshape(index_shape) return convertTensor(paddle_out)
def addmm(*args, **kwargs): return convertTensor(paddle.addmm(*args, **kwargs))
def log_softmax(x, dim=-1): return convertTensor(paddle.nn.functional.log_softmax(x, axis=dim))
def repeat(x, *size): if isinstance(size[0], Iterable): size = size[0] x = paddle.tile(x, size) return convertTensor(x)
def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False): if input.shape != other.shape: other = paddle.expand_as(other, input) return convertTensor(paddle.allclose(input, other, rtol, atol, equal_nan))
def index_select(x, dim, index): return convertTensor( paddle.index_select(x, index.astype("int32"), axis=dim))
def exp(x): return convertTensor(paddle.exp(x))
def argsort(x, dim=-1, descending=False): return convertTensor(paddle.argsort(x, axis=dim, descending=descending))
def unique(x): return convertTensor(paddle.unique(x))
def tanh(x): return convertTensor(fluid.layers.tanh(x))
def sigmoid(x): return convertTensor(fluid.layers.sigmoid(x))
def LongTensor(x): if isinstance(x, int): return Tensor(paddle.to_tensor([x])) if isinstance(x, list): x = paddle.to_tensor(x, dtype="int64") return convertTensor(x.astype("int64"))
def softmax(x, dim=-1, dtype=None): return convertTensor(paddle.nn.functional.softmax(x, axis=dim))
def diag(x): return convertTensor(paddle.diag(x, offset=0, padding_value=0, name=None))
def unqueeze(x, dim): return convertTensor(paddle.unsqueeze(x, axis=dim))
def fmod(x, y): if isinstance(y, int): y = paddle.Tensor(np.array([y], dtype="float32")) return convertTensor(paddle.floor_mod(x, y))
def reshape(x, shape): return convertTensor(paddle.reshape(x, shape))
def clamp(x, min=None, max=None): return convertTensor(paddle.clip(x, min=min, max=max))
def uniform_(shape, low, high): return convertTensor( paddle.uniform(shape, dtype='float32', min=low, max=high, seed=0))
def cos(x): return convertTensor(paddle.cos(x))
def full(shape, fill_value, dtype="float32", device="cpu"): return convertTensor( paddle.full(shape, fill_value, dtype=dtype, name=device))
def erf(x): return convertTensor(paddle.erf(x))
def sort(x, axis=1, descending=False): return convertTensor( paddle.sort(x, axis=axis, descending=descending, name=None))
def logical_or(a, b): return convertTensor(paddle.logical_or(a, b))
def randperm(n): return convertTensor(paddle.randperm(n, dtype='int32', name=None))
def relu(x): return convertTensor(paddle.fluid.layers.relu(x))
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): return convertTensor(paddle.norm(input, p=p, axis=dim, keepdim=keepdim))