Exemplo n.º 1
0
    def __call__(self, engine: Engine,
                 batch: Tuple[Tensor, Tensor]) -> Dict[str, Tensor]:
        self.net.train()
        self.opt.zero_grad()

        x, _ = batch
        x.to(self.device)

        z_mean, z_std = self.net.encode(x)
        z = self.net.sample(z_mean, z_std)
        x_recon = self.net.decode(z)

        loss_dict = vae_loss(x_recon, x, z_mean, z_std)
        loss_dict["loss"].backward()
        self.opt.step()

        return loss_dict
Exemplo n.º 2
0
    def __call__(self,
                 engine: Engine,
                 batch: Tuple[Tensor,
                              Tensor]) -> Dict[str,
                                               Tensor]:
        self.net.eval()

        x, _ = batch
        x = x.to(self.device)

        with torch.no_grad():
            z_mean, z_std = self.net.encode(x)
            z = self.net.sample(z_mean, z_std)
            x_recon = self.net.decode(z)

            loss_dict = vae_loss(x_recon, x, z_mean, z_std)

        return loss_dict
for batch_idx in range(num_iters):
    # train GCN
    optimizer_vae.zero_grad()
    gcn_vae.train()
    z = gcn_vae(adj_norm, feat)
    adj_h = torch.mm(z, z.t())
    vae_train_loss = reconstruction_loss(adj_label, adj_h, norm)
    vae_train_loss.backward()
    optimizer_vae.step()

    #train mlp
    optimizer_mlp.zero_grad()
    mlp.train()
    z_mean, z_log_std = mlp(feat)
    mlp_train_loss = vae_loss(z_mean, z_log_std, adj_label)
    mlp_train_loss.backward()
    optimizer_mlp.step()
    print('GCN [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
        batch_idx, num_iters, 100. * batch_idx / num_iters,
        vae_train_loss.item()))

    if batch_idx % 10 == 0:
        # print('GCN [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(batch_idx, num_iters,
        #         100. * batch_idx / num_iters,
        #         vae_train_loss.item()))
        # print('MLP [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(batch_idx, num_iters,
        #         100. * batch_idx / num_iters,
        #         mlp_train_loss.item()))

        with torch.no_grad():
Exemplo n.º 4
0
    # train R-GCN

    optimizer.zero_grad()
    gcn_step.train()
    # loss = recursive_loss(gcn_step, adj, feat, size_update)
    loss = recursive_loss_with_noise(gcn_step, adj, feat, size_update, norm)
    loss.backward()
    train_loss += loss.item()
    optimizer.step()

    # train GCN
    optimizer_vae.zero_grad()
    gcn_vae.train()
    adj_vae_norm = torch.from_numpy(preprocess_graph(adj.numpy()))
    z_mean, z_log_std = gcn_vae(adj_vae_norm, feat)
    vae_train_loss = vae_loss(z_mean, z_log_std, adj, norm)
    vae_train_loss.backward()
    optimizer_vae.step()

    # train mlp
    optimizer_mlp.zero_grad()
    mlp.train()
    z_mean, z_log_std = mlp(feat)
    mlp_train_loss = vae_loss(z_mean, z_log_std, adj, norm)
    mlp_train_loss.backward()
    optimizer_mlp.step()

    if batch_idx % 10 == 0:
        info ='R-GCN [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(batch_idx, len(dataloader),
                100. * batch_idx / len(dataloader),
                loss.item())