コード例 #1
0
def video_latent(nets, args, x_src, y_list, z_list, psi, fname):
    x_src.stop_gradient = True

    latent_dim = z_list[0].size(1)
    s_list = []
    for i, y_trg in enumerate(y_list):
        z_many = porch.randn(10000, latent_dim)
        y_many = porch.LongTensor(10000).fill_(y_trg[0])
        s_many = nets.mapping_network(z_many, y_many)
        s_avg = porch.mean(s_many, dim=0, keepdim=True)
        s_avg = s_avg.repeat(x_src.size(0), 1)

        for z_trg in z_list:
            s_trg = nets.mapping_network(z_trg, y_trg)
            s_trg = porch.lerp(s_avg, s_trg, psi)
            s_list.append(s_trg)

    s_prev = None
    video = []
    # fetch reference images
    for idx_ref, s_next in enumerate(tqdm(s_list, 'video_latent',
                                          len(s_list))):
        if s_prev is None:
            s_prev = s_next
            continue
        if idx_ref % len(z_list) == 0:
            s_prev = s_next
            continue
        frames = interpolate(nets, args, x_src, s_prev, s_next).cpu()
        video.append(frames)
        s_prev = s_next
    for _ in range(10):
        video.append(frames[-1:])
    video = tensor2ndarray255(porch.cat(video))
    save_video(fname, video)
コード例 #2
0
    def _preprocess(self, edge_list_path, node_label_path):
        with open(edge_list_path) as f:
            edge_list = []
            node2id = defaultdict(int)
            for line in f:
                x, y = list(map(int, line.split()))
                # Reindex
                if x not in node2id:
                    node2id[x] = len(node2id)
                if y not in node2id:
                    node2id[y] = len(node2id)
                edge_list.append([node2id[x], node2id[y]])
                edge_list.append([node2id[y], node2id[x]])

        num_nodes = len(node2id)
        with open(node_label_path) as f:
            nodes = []
            labels = []
            label2id = defaultdict(int)
            for line in f:
                x, label = list(map(int, line.split()))
                if label not in label2id:
                    label2id[label] = len(label2id)
                nodes.append(node2id[x])
                if "hindex" in self.name:
                    labels.append(label)
                else:
                    labels.append(label2id[label])
            if "hindex" in self.name:
                median = np.median(labels)
                labels = [int(label > median) for label in labels]
        assert num_nodes == len(set(nodes))
        y = torch.zeros(num_nodes, len(label2id))
        y[nodes, labels] = 1
        return torch.LongTensor(edge_list).t(), y, node2id
コード例 #3
0
    def _preprocess(self, root, name):
        dict_path = os.path.join(root, name + ".dict")
        graph_path = os.path.join(root, name + ".graph")

        with open(graph_path) as f:
            edge_list = []
            node2id = defaultdict(int)
            f.readline()
            for line in f:
                x, y, t = list(map(int, line.split()))
                # Reindex
                if x not in node2id:
                    node2id[x] = len(node2id)
                if y not in node2id:
                    node2id[y] = len(node2id)
                # repeat t times
                for _ in range(t):
                    # to undirected
                    edge_list.append([node2id[x], node2id[y]])
                    edge_list.append([node2id[y], node2id[x]])

        name_dict = dict()
        with open(dict_path) as f:
            for line in f:
                name, str_x = line.split("\t")
                x = int(str_x)
                if x not in node2id:
                    node2id[x] = len(node2id)
                name_dict[name] = node2id[x]

        num_nodes = len(node2id)

        return torch.LongTensor(edge_list).t(), name_dict, node2id
コード例 #4
0
 def forward(self, x, y):
     out = self.main(x)
     out = porch.Tensor(out)
     out = out.view(out.size(0), -1)  # (batch, num_domains)
     idx = porch.LongTensor(np.arange(y.shape[0]))
     # out = out[idx, y]  # (batch)
     s = porch.take(
         out, list(zip(range(y.shape[0]),
                       y.numpy().astype(int).tolist())))
     return s
コード例 #5
0
ファイル: wing.py プロジェクト: zzz2010/starganv2_paddle
def preprocess(x):
    """Preprocess 98-dimensional heatmaps."""
    N, C, H, W = x.size()
    x = truncate(x)
    x = normalize(x)

    sw = H // 256
    operations = Munch(chin=OPPAIR(0, 3),
                       eyebrows=OPPAIR(-7 * sw, 2),
                       nostrils=OPPAIR(8 * sw, 4),
                       lipupper=OPPAIR(-8 * sw, 4),
                       liplower=OPPAIR(8 * sw, 4),
                       lipinner=OPPAIR(-2 * sw, 3))

    for part, ops in operations.items():
        start, end = index_map[part]
        x[:, start:end] = resize(shift(x[:, start:end], ops.shift), ops.resize)

    zero_out = porch.cat([
        porch.arange(0, index_map.chin.start),
        porch.arange(index_map.chin.end, 33),
        porch.LongTensor([
            index_map.eyebrowsedges.start, index_map.eyebrowsedges.end,
            index_map.lipedges.start, index_map.lipedges.end
        ])
    ])
    x[:, zero_out] = 0

    start, end = index_map.nose
    x[:, start + 1:end] = shift(x[:, start + 1:end], 4 * sw)
    x[:, start:end] = resize(x[:, start:end], 1)

    start, end = index_map.eyes
    x[:, start:end] = resize(x[:, start:end], 1)
    x[:, start:end] = resize(shift(x[:, start:end], -8), 3) + \
        shift(x[:, start:end], -24)

    # Second-level mask
    x2 = deepcopy(x)
    x2[:, index_map.chin.start:index_map.chin.end] = 0  # start:end was 0:33
    x2[:, index_map.lipedges.start:index_map.lipinner.
       end] = 0  # start:end was 76:96
    x2[:, index_map.eyebrows.start:index_map.eyebrows.
       end] = 0  # start:end was 33:51

    x = porch.sum(x, dim=1, keepdim=True)  # (N, 1, H, W)
    x2 = porch.sum(x2, dim=1, keepdim=True)  # mask without faceline and mouth

    x[x != x] = 0  # set nan to zero
    x2[x != x] = 0  # set nan to zero
    return x.clamp_(0, 1), x2.clamp_(0, 1)
コード例 #6
0
ファイル: utils.py プロジェクト: zzz2010/starganv2_paddle
def translate_using_latent(nets, args, x_src, y_trg_list, z_trg_list, psi,
                           filename):
    x_src.stop_gradient = True
    N, C, H, W = x_src.size()
    latent_dim = z_trg_list[0].size(1)
    x_concat = [x_src]
    masks = nets.fan.get_heatmap(x_src) if args.w_hpf > 0 else None

    for i, y_trg in enumerate(y_trg_list):
        z_many = porch.randn(10000, latent_dim)
        y_many = porch.LongTensor(10000).fill_(y_trg[0])
        s_many = nets.mapping_network(z_many, y_many)
        s_avg = porch.mean(s_many, dim=0, keepdim=True)
        s_avg = s_avg.repeat(N, 1)

        for z_trg in z_trg_list:
            s_trg = nets.mapping_network(z_trg, y_trg)
            s_trg = porch.lerp(s_avg, s_trg, psi)
            x_fake = nets.generator(x_src, s_trg, masks=masks)
            x_concat += [x_fake]

    x_concat = porch.cat(x_concat, dim=0)
    save_image(x_concat, N, filename)
コード例 #7
0
        out_model_fn = "../expr/checkpoints/afhq/100000_nets_ema.ckpt/mapping_network.pdparams"
        mapping_network_ema.load_state_dict(porch.load(out_model_fn))

    else:
        mapping_network_ema = core.model.MappingNetwork(
            16, 64, 2)  # copy.deepcopy(mapping_network)
        out_model_fn = "../expr/checkpoints/celeba_hq/100000_nets_ema.ckpt/mapping_network.pdparams"
        mapping_network_ema.load_state_dict(porch.load(out_model_fn))

    d_optimizer = fluid.optimizer.AdamOptimizer(
        learning_rate=lr, parameter_list=mapping_network_ema.parameters())
    from tqdm import tqdm

    mapping_network_ema.train()
    z_train_p = porch.Tensor(z_train)
    y_train_p = porch.LongTensor(y_train)
    m_out_train_p = porch.Tensor(m_out_train)
    best_loss = 100000000
    for ii in range(100000000000):
        st = np.random.randint(0, z_train_p.shape[0] - batch_size)
        out = mapping_network_ema(z_train_p[st:st + batch_size],
                                  y_train_p[st:st + batch_size])
        d_avg_cost = fluid.layers.mse_loss(
            out, m_out_train_p[st:st + batch_size]
        )  #+fluid.layers.mse_loss(out1,m_out_train_1p)+fluid.layers.mse_loss(out2,m_out_train_2p)

        d_avg_cost.backward()
        d_optimizer.minimize(d_avg_cost)
        mapping_network_ema.clear_gradients()
        if ii % 99 == 0:
            print("d_avg_cost", d_avg_cost.numpy())
コード例 #8
0
ファイル: pickle_test.py プロジェクト: zzz2010/paddorch
import pickle
import paddorch
import numpy as np


a = paddorch.FloatTensor(np.array([ 0.9, 0.1]))
pickle.dump(a,open("tensor.pkl",'wb'))


b=pickle.load(open("tensor.pkl",'rb'))
print(b)



a = paddorch.LongTensor(np.array([ 9, 1]))
pickle.dump(a,open("tensor.pkl",'wb'))


b=pickle.load(open("tensor.pkl",'rb'))
print(b)
コード例 #9
0
        if graph_file.is_file() and graph_file.name in todo
    ]
    #  graphs = [yuxiao_kdd17_graph_to_dgl(graph_file) for graph_file in graph_dir.iterdir() if graph_file.is_file() \
    #          and graph_file.name.find("soc-Friendster-SNAP.txt.lpm.lscc") == -1 \
    #          and graph_file.name.find("soc-Facebook-NetRep.txt.lpm.lscc") == -1 \
    #          and graph_file.suffix == '.lscc']
    #  graphs = []
    #  for name in ["cs", "physics"]:
    #      g = Coauthor(name)[0]
    #      g.remove_nodes((g.in_degrees() == 0).nonzero().squeeze())
    #      g.readonly()
    #      graphs.append(g)
    #  for name in ["computers", "photo"]:
    #      g = AmazonCoBuy(name)[0]
    #      g.remove_nodes((g.in_degrees() == 0).nonzero().squeeze())
    #      g.readonly()
    #      graphs.append(g)
    graphs.sort(key=lambda g: g.number_of_nodes(), reverse=True)
    graph_sizes = torch.LongTensor([g.number_of_nodes() for g in graphs])
    for i, g in enumerate(graphs):
        g.ndata.clear()
        g.edata.clear()
        #  model = ProNE(args.embed_dim, step=5, mu=0.2, theta=0.5)
        #  emb = model.train(g.to_networkx()).astype(np.float32)
        #  g.ndata['prone'] = torch.from_numpy(emb)
        print(i, g, graph_sizes[i])
    logger.info("save graphs to %s", args.save_file)
    save_graphs(
        filename=args.save_file, g_list=graphs, labels={"graph_sizes": graph_sizes}
    )
コード例 #10
0
 def batcher_dev(batch):
     graph_q, label = zip(*batch)
     graph_q = dgl.batch(graph_q)
     return graph_q, torch.LongTensor(label)
コード例 #11
0
import torch
N_dim = 200
N_dim2 = N_dim
torch.manual_seed(0)
a = torch.randn(N_dim2, N_dim).to_sparse().requires_grad_(False)
a_dense = a.to_dense().numpy()
b = torch.randn(N_dim, N_dim2, requires_grad=True)

torch_y = torch.sparse.mm(a, b)
print(torch_y)
import paddorch
import paddle
import numpy as np

a = paddorch.sparse.FloatTensor(
    paddorch.LongTensor(a._indices().detach().numpy()),
    paddorch.FloatTensor(a._values().detach().numpy()), (N_dim2, N_dim))
b = paddorch.from_numpy(b.detach().numpy())
b_param = paddorch.nn.Parameter(b)
b.stop_gradient = False
a.values.stop_gradient = False
y = paddorch.sparse.mm(a, b)
print("max diff", np.max(np.abs(torch_y.detach().numpy() - y.numpy())))
c = a.to_dense()

import time
before = time.time()
for _ in range(6):
    y = paddorch.sparse.mm(a, b)

    # y = paddorch.mm(c, b )
コード例 #12
0
        y = torch.sparse.mm(a, b)

    b=torch.cat([b,y],dim=1)

y.sum().backward()

after=time.time()
print("time:",after-before)

# print("max grad",torch.max(a.grad))
if device=="cuda":
    import sys
    sys.exit()

with fluid.dygraph.guard(place=place):
    a=paddorch.sparse.FloatTensor(paddorch.LongTensor(np.stack([I,J]) ), paddorch.FloatTensor(V ) ,(N_dim2, N_dim))
    b=paddorch.from_numpy(b_np )
    b_param=paddorch.nn.Parameter(b)
    b.stop_gradient=False
    a.values.stop_gradient=False

    import time
    before=time.time()
    for _ in range(6):
        y = paddorch.sparse.mm(a, b )

        b=paddorch.cat([b,y],dim=1)
        break

    y.sum().backward()
コード例 #13
0
ファイル: wing.py プロジェクト: zzz2010/paddorch
def preprocess(x):
    """Preprocess 98-dimensional heatmaps."""
    N, C, H, W = x.shape
    x = truncate(x)
    x = normalize(x)

    sw = H // 256
    operations = Munch(chin=OPPAIR(0, 3),
                       eyebrows=OPPAIR(-7 * sw, 2),
                       nostrils=OPPAIR(8 * sw, 4),
                       lipupper=OPPAIR(-8 * sw, 4),
                       liplower=OPPAIR(8 * sw, 4),
                       lipinner=OPPAIR(-2 * sw, 3))

    for part, ops in operations.items():
        start, end = index_map[part]
        torch.copy(resize(shift(x[:, start:end], ops.shift), ops.resize),
                   x[:, start:end])
        # =

    zero_out = torch.cat([
        torch.arange(0, index_map.chin.start),
        torch.arange(index_map.chin.end, 33),
        torch.LongTensor([
            index_map.eyebrowsedges.start, index_map.eyebrowsedges.end,
            index_map.lipedges.start, index_map.lipedges.end
        ])
    ])

    x.fill_(val=0, dim=1, indices=zero_out.numpy())
    # x_numpy=x.numpy()
    # x_numpy[:,  zero_out.numpy()] = 0
    # x.set_value(x_numpy)
    # torch.copy(target)
    # x[:, zero_out] = 0

    start, end = index_map.nose
    torch.copy(shift(x[:, start + 1:end], 4 * sw), x[:, start + 1:end])
    torch.copy(resize(x[:, start:end], 1), x[:, start:end])
    # x[:, start+1:end] = shift(x[:, start+1:end], 4*sw)
    # x[:, start:end] = resize(x[:, start:end], 1)

    start, end = index_map.eyes

    torch.copy(resize(x[:, start:end], 1), x[:, start:end])
    torch.copy(resize(shift(x[:, start:end], -8), 3) + \
        shift(x[:, start:end], -24), x[:, start:end] )

    # x[:, start:end] = resize(x[:, start:end], 1)
    # x[:, start:end] = resize(shift(x[:, start:end], -8), 3) + \
    #     shift(x[:, start:end], -24)

    # Second-level mask
    x2 = x.clone()  #deepcopy(x)
    x2.fill_(0, 1, list(range(index_map.chin.start, index_map.chin.end)))
    x2.fill_(0, 1, list(range(index_map.lipedges.start,
                              index_map.lipedges.end)))
    x2.fill_(0, 1, list(range(index_map.eyebrows.start,
                              index_map.eyebrows.end)))
    # x2[:, index_map.chin.start:index_map.chin.end] = 0  # start:end was 0:33
    # x2[:, index_map.lipedges.start:index_map.lipinner.end] = 0  # start:end was 76:96
    # x2[:, index_map.eyebrows.start:index_map.eyebrows.end] = 0  # start:end was 33:51

    x = torch.sum(x, dim=1, keepdim=True)  # (N, 1, H, W)
    x2 = torch.sum(x2, dim=1, keepdim=True)  # mask without faceline and mouth

    x_numpy = x.numpy
    zero_indices = np.where(x_numpy != x_numpy)[0]
    x.fill_(0, 0, zero_indices)
    x2.fill_(0, 0, zero_indices)
    # x[x != x] = 0  # set nan to zero
    # x2[x != x] = 0  # set nan to zero
    return x.clamp_(0, 1), x2.clamp_(0, 1)
コード例 #14
0
import paddorch
from paddorch import index_copy_inplace_nograd

memory = paddorch.zeros((4, 3))
k = paddorch.arange(0, 6).view(2, 3)
out_ids = paddorch.LongTensor([1, 3])

index_copy_inplace_nograd(memory, 0, out_ids, k)
print("paddorch", memory)

import torch

memory = torch.zeros((4, 3))
k = torch.arange(0, 6).view(2, 3).float()
out_ids = torch.LongTensor([1, 3])

memory.index_copy_(0, out_ids, k)
print("pytorch", memory)