Пример #1
0
        def train_epoch(epoch):
            recons_batch_loss = []
            transreg_batch_loss = []
            for batch_idx, data in enumerate(train_loader):
                x = Variable(data).to(device)
                x_hat, z, trans = self.forward(x)

                chamfer_loss = self.reconstruction_loss(x, x_hat)
                trans_reg_loss = feature_transform_reguliarzer(trans)
                loss = chamfer_loss + trans_reg_loss

                recons_batch_loss.append(chamfer_loss.item())
                transreg_batch_loss.append(trans_reg_loss.item())

                opt.zero_grad()
                loss.backward()
                opt.step()

                if batch_idx % 50 == 0 and True:  #suppress iteration output now
                    print(
                        'Train Epoch: {} [{}/{} ({:.0f}%)]\t Chamfer Loss: {:.6f}; Trans Reg Loss: {:.6f}'
                        .format(epoch, batch_idx * len(data),
                                len(train_loader.dataset),
                                100. * batch_idx / len(train_loader),
                                chamfer_loss.item(), trans_reg_loss.item()))

            recons_epoch_loss.append(np.mean(recons_batch_loss))
            transreg_epoch_loss.append(np.mean(transreg_batch_loss))
            return
Пример #2
0
    def forward(self, pred, target, trans_feat):

        # https://pytorch.org/docs/stable/generated/torch.nn.NLLLoss.html
        loss = F.nll_loss(pred, target)
        mat_diff_loss = feature_transform_reguliarzer(trans_feat)

        total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale
        return total_loss
Пример #3
0
 def forward(self, pred, target, trans_feat, weight):
     loss = F.nll_loss(pred, target, weight=weight)
     mat_diff_loss = feature_transform_reguliarzer(trans_feat)
     total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale
     return total_loss