예제 #1
0
def imagenet_norm(x):
    mean = [0.485, 0.456, 0.406]
    std = [0.299, 0.224, 0.225]
    mean = torch.FloatTensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).to(
        x.device)
    std = torch.FloatTensor(std).unsqueeze(0).unsqueeze(2).unsqueeze(3).to(
        x.device)
    return (x - mean) / std
    def get_preds_fromhm(hm):
        max, idx = torch.max(
            hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
        idx += 1
        preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
        preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1)
        preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1)

        for i in range(preds.size(0)):
            for j in range(preds.size(1)):
                hm_ = hm[i, j, :]
                pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1
                if pX > 0 and pX < 63 and pY > 0 and pY < 63:
                    diff = torch.FloatTensor(
                        [hm_[pY, pX + 1] - hm_[pY, pX - 1],
                         hm_[pY + 1, pX] - hm_[pY - 1, pX]])
                    preds[i, j].add_(diff.sign_().mul_(.25))

        preds.add_(-0.5)
        return preds
예제 #3
0
import paddle
import paddorch as paddorch
import torch
import numpy as np

x = np.arange(2 * 768).reshape((1, 2, 768))
x_t = torch.repeat_interleave(torch.FloatTensor(x), repeats=4, dim=1)
x_p = paddorch.repeat_interleave(paddorch.FloatTensor(x), repeats=4, dim=1)

assert np.max(np.abs(x_t.numpy() - x_p.numpy())) < 0.001, "fail match torch"
print(paddorch.repeat_interleave(x_p, 2))

y = paddorch.tensor([[1, 2], [3, 4]])

print(paddorch.repeat_interleave(y, 2))

print(paddorch.repeat_interleave(y, 3, dim=1))
예제 #4
0
import pickle
import paddorch
import numpy as np


a = paddorch.FloatTensor(np.array([ 0.9, 0.1]))
pickle.dump(a,open("tensor.pkl",'wb'))


b=pickle.load(open("tensor.pkl",'rb'))
print(b)



a = paddorch.LongTensor(np.array([ 9, 1]))
pickle.dump(a,open("tensor.pkl",'wb'))


b=pickle.load(open("tensor.pkl",'rb'))
print(b)
예제 #5
0
N_dim = 200
N_dim2 = N_dim
torch.manual_seed(0)
a = torch.randn(N_dim2, N_dim).to_sparse().requires_grad_(False)
a_dense = a.to_dense().numpy()
b = torch.randn(N_dim, N_dim2, requires_grad=True)

torch_y = torch.sparse.mm(a, b)
print(torch_y)
import paddorch
import paddle
import numpy as np

a = paddorch.sparse.FloatTensor(
    paddorch.LongTensor(a._indices().detach().numpy()),
    paddorch.FloatTensor(a._values().detach().numpy()), (N_dim2, N_dim))
b = paddorch.from_numpy(b.detach().numpy())
b_param = paddorch.nn.Parameter(b)
b.stop_gradient = False
a.values.stop_gradient = False
y = paddorch.sparse.mm(a, b)
print("max diff", np.max(np.abs(torch_y.detach().numpy() - y.numpy())))
c = a.to_dense()

import time
before = time.time()
for _ in range(6):
    y = paddorch.sparse.mm(a, b)

    # y = paddorch.mm(c, b )
예제 #6
0
        y = torch.sparse.mm(a, b)

    b=torch.cat([b,y],dim=1)

y.sum().backward()

after=time.time()
print("time:",after-before)

# print("max grad",torch.max(a.grad))
if device=="cuda":
    import sys
    sys.exit()

with fluid.dygraph.guard(place=place):
    a=paddorch.sparse.FloatTensor(paddorch.LongTensor(np.stack([I,J]) ), paddorch.FloatTensor(V ) ,(N_dim2, N_dim))
    b=paddorch.from_numpy(b_np )
    b_param=paddorch.nn.Parameter(b)
    b.stop_gradient=False
    a.values.stop_gradient=False

    import time
    before=time.time()
    for _ in range(6):
        y = paddorch.sparse.mm(a, b )

        b=paddorch.cat([b,y],dim=1)
        break

    y.sum().backward()
예제 #7
0
파일: wing.py 프로젝트: zzz2010/paddorch
def np2tensor(image):
    """Converts numpy array to torch tensor."""
    return torch.FloatTensor(image).permute(2, 0, 1) / 255 * 2 - 1