def test_poisson_nll_loss_reduction_modes(self):
     input = torch.tensor([0.5, 1.5, 2.5], device=device)
     target = torch.tensor([1., 2., 3.], device=device)
     component_wise_loss = torch.exp(input) - target * input
     self.assertEqual(component_wise_loss,
                      F.poisson_nll_loss(input, target, reduction='none'))
     self.assertEqual(torch.sum(component_wise_loss),
                      F.poisson_nll_loss(input, target, reduction='sum'))
     self.assertEqual(torch.mean(component_wise_loss),
                      F.poisson_nll_loss(input, target, reduction='mean'))
     with self.assertRaisesRegex(ValueError, 'is not valid'):
         F.poisson_nll_loss(input, target, reduction='total')
 def train_batch(self, x, optim):
     """
     Trains on a data batch, with the given optimizer...
     """
     optim.zero_grad()
     if self.use_reparam:
         output, mu, logvar = self(x)
         output += EPS
         loss = loss_function(output, x, mu, logvar)
         loss.backward()
     else:
         output = self(x) + EPS
         if self.loss == 'poisson':
             loss = F.poisson_nll_loss(output,
                                       x,
                                       log_input=False,
                                       full=True,
                                       reduction='sum')
         elif self.loss == 'l1':
             loss = F.l1_loss(output, x, reduction='sum')
         elif self.loss == 'mse':
             loss = F.mse_loss(output, x, reduction='sum')
         loss.backward()
     optim.step()
     self.clamp_m()
     return loss.item()
Exemple #3
0
 def forward(self):
     a = torch.randn(3, 2)
     b = torch.rand(3, 2)
     c = torch.rand(3)
     log_probs = torch.randn(50, 16, 20).log_softmax(2).detach()
     targets = torch.randint(1, 20, (16, 30), dtype=torch.long)
     input_lengths = torch.full((16, ), 50, dtype=torch.long)
     target_lengths = torch.randint(10, 30, (16, ), dtype=torch.long)
     return len(
         F.binary_cross_entropy(torch.sigmoid(a), b),
         F.binary_cross_entropy_with_logits(torch.sigmoid(a), b),
         F.poisson_nll_loss(a, b),
         F.cosine_embedding_loss(a, b, c),
         F.cross_entropy(a, b),
         F.ctc_loss(log_probs, targets, input_lengths, target_lengths),
         # F.gaussian_nll_loss(a, b, torch.ones(5, 1)), # ENTER is not supported in mobile module
         F.hinge_embedding_loss(a, b),
         F.kl_div(a, b),
         F.l1_loss(a, b),
         F.mse_loss(a, b),
         F.margin_ranking_loss(c, c, c),
         F.multilabel_margin_loss(self.x, self.y),
         F.multilabel_soft_margin_loss(self.x, self.y),
         F.multi_margin_loss(self.x, torch.tensor([3])),
         F.nll_loss(a, torch.tensor([1, 0, 1])),
         F.huber_loss(a, b),
         F.smooth_l1_loss(a, b),
         F.soft_margin_loss(a, b),
         F.triplet_margin_loss(a, b, -b),
         # F.triplet_margin_with_distance_loss(a, b, -b), # can't take variable number of arguments
     )
Exemple #4
0
 def train_batch(self, x, optim, batches=None):
     """
     Trains on a data batch, with the given optimizer...
     """
     optim.zero_grad()
     if self.use_reparam:
         output, mu, logvar = self.forward(x, batches)
         output += EPS
         loss = loss_function(output, x, mu, logvar)
         loss.backward()
     else:
         output, w = self.forward(x, batches)
         output += EPS
         if self.loss == 'poisson':
             loss = F.poisson_nll_loss(output,
                                       x,
                                       log_input=False,
                                       full=True,
                                       reduction='sum')
         elif self.loss == 'l1':
             loss = F.l1_loss(output, x, reduction='sum')
         elif self.loss == 'mse':
             loss = F.mse_loss(output, x, reduction='sum')
         if self.use_multibatch_loss:
             loss += self.multibatch_loss_weight * multibatch_loss(
                 w, batches, self.num_batches)
         loss.backward()
     optim.step()
     self.clamp_m()
     return loss.item()
    def reconstruction_error(self, input_tensor: torch.Tensor,
                             hidden: torch.Tensor, dim: int) -> torch.Tensor:
        """
        Compute the log probability of the original feature under p(x|z).

        Parameters
        ----------
        input_tensor: torch.Tensor
            Original feature.
        hidden: torch.Tensor
           Latest decoder hidden state.
        dim: int
            Current feature dimension.

        Returns
        -------
        reconstr_error: torch.Tensor
            Log probability of the input feature under the decoder's distribution.
        """
        lambda_ = torch.exp(self.lambda_(hidden)).int().squeeze(1)
        err = F.poisson_nll_loss(input_tensor,
                                 lambda_,
                                 reduction="none",
                                 log_input=True)

        return err
Exemple #6
0
def get_loss(loss_function, output, label, use_gpu):
    '''
    get objective loss of model and backprograte to compute gradients
    some loss function not impelement
    '''
    if not isinstance(loss_function, str):
        raise TypeError('loss_function should be str object')
    label = np.asarray(label)

    if loss_function == 'binary_cross_entropy':
        loss = F.binary_cross_entropy(output, label)
    elif loss_function == 'poisson_nll_loss':
        loss = F.poisson_nll_loss(output, label)
    elif loss_function == 'cross_entropy':
        loss = F.cross_entropy(output, label)
    elif loss_function == 'hinge_embedding_loss':
        loss = F.hinge_embedding_loss(output, label)
    elif loss_function == 'margin_ranking_loss':
        loss = F.margin_ranking_loss(output, label)
    elif loss_function == 'multilabel_soft_margin_loss':
        loss = F.multilabel_soft_margin_loss(output, label)
    elif loss_function == 'multi_margin_loss':
        loss = F.multi_margin_loss(output, label)
    elif loss_function == 'nll_loss':
        if use_gpu:
            label = Variable(torch.LongTensor(label).cuda())
        label = Variable(torch.LongTensor(label))
        loss = F.nll_loss(output, label)
    elif loss_function == 'binary_cross_entropy_with_logits':
        loss = F.binary_cross_entropy_with_logits(output, label)

    return loss
Exemple #7
0
 def loss(self, y_pred: Dict[str, torch.Tensor],
          target: torch.Tensor) -> torch.Tensor:
     return F.poisson_nll_loss(super().to_prediction(y_pred),
                               target,
                               log_input=True,
                               full=False,
                               eps=1e-6,
                               reduction="none")
Exemple #8
0
def loss_function(recon_x, x, mu, logvar, yhat, y):
    if loss_type=="binary":
        BCE = F.binary_cross_entropy(recon_x, x.view(-1, 68*68), reduction='sum')
    if loss_type=="poisson"
        BCE = F.poisson_nll_loss(recon_x , x, reduction='sum', log_input=True)
    NCE = F.mse_loss(yhat, y, reduction='sum')
    KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
    return 0.01 * (BCE + KLD) + NCE
Exemple #9
0
 def closure():
     if torch.is_grad_enabled():
         # Clear cummulated gradients in every epoch
         optimizer.zero_grad()
     # Get output from the model
     outputs = self(x)
     # Compute average loss for grad
     loss = F.poisson_nll_loss(outputs, y)
     if loss.requires_grad:
         # Get gradients for parameters
         loss.backward()
     return loss
Exemple #10
0
def loss_function(recon_x, x, mu, logvar):
    #BCE = poisson_loss(recon_x, x)
    BCE = F.poisson_nll_loss(recon_x,
                             x,
                             log_input=False,
                             full=False,
                             reduction='sum')
    #BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum')
    # see Appendix B from VAE paper:
    # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
    # https://arxiv.org/abs/1312.6114
    # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
    KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
    return BCE + KLD
Exemple #11
0
 def test_poisson_nll_loss(self):
     inp = torch.randn(32,
                       128,
                       device='cuda',
                       dtype=self.dtype,
                       requires_grad=True)
     target = torch.randn(32,
                          128,
                          device='cuda',
                          dtype=self.dtype,
                          requires_grad=False)
     output = F.poisson_nll_loss(inp,
                                 target,
                                 log_input=True,
                                 full=False,
                                 size_average=None,
                                 eps=1e-08,
                                 reduce=None,
                                 reduction='mean')
    def configure_criterion(self, y, t):

        criterion = F.cross_entropy(y, t)

        if self.hparams.criterion == "cross_entropy":
            criterion = F.cross_entropy(y, t)
        elif self.hparams.criterion == "binary_cross_entropy":
            criterion = F.binary_cross_entropy(y, t)
        elif self.hparams.criterion == "binary_cross_entropy_with_logits":
            criterion = F.binary_cross_entropy_with_logits(y, t)
        elif self.hparams.criterion == "poisson_nll_loss":
            criterion = F.poisson_nll_loss(y, t)
        elif self.hparams.criterion == "hinge_embedding_loss":
            criterion = F.hinge_embedding_loss(y, t)
        elif self.hparams.criterion == "kl_div":
            criterion = F.kl_div(y, t)
        elif self.hparams.criterion == "l1_loss":
            criterion = F.l1_loss(y, t)
        elif self.hparams.criterion == "mse_loss":
            criterion = F.mse_loss(y, t)
        elif self.hparams.criterion == "margin_ranking_loss":
            criterion = F.margin_ranking_loss(y, t)
        elif self.hparams.criterion == "multilabel_margin_loss":
            criterion = F.multilabel_margin_loss(y, t)
        elif self.hparams.criterion == "multilabel_soft_margin_loss":
            criterion = F.multilabel_soft_margin_loss(y, t)
        elif self.hparams.criterion == "multi_margin_loss":
            criterion = F.multi_margin_loss(y, t)
        elif self.hparams.criterion == "nll_loss":
            criterion = F.nll_loss(y, t)
        elif self.hparams.criterion == "smooth_l1_loss":
            criterion = F.smooth_l1_loss(y, t)
        elif self.hparams.criterion == "soft_margin_loss":
            criterion = F.soft_margin_loss(y, t)

        return criterion
Exemple #13
0
    def fit(self, x, y, epochs, optim, lr):
        # Set the model into training mode
        self.train()
        num_cells = y.shape[1]
        device = y.device

        # Initialization
        min_loss = torch.ones(num_cells, dtype=torch.float32,
                              device=device) * 1e9
        best_weight = torch.ones_like(self.linear.weight.data)
        best_bias = torch.ones_like(self.linear.bias.data)

        if optim == 'adam':
            # Use Adam to fit the model
            optimizer = torch.optim.Adam(self.parameters(), lr=lr)
        else:
            # Use LBFGS to fit the model
            optimizer = torch.optim.LBFGS(self.parameters(), lr=lr)

        for epoch in tqdm(range(1, epochs + 1), 'Epoch: ', leave=False):
            if optim == 'adam':
                # Clear cummulated gradients in every epoch
                optimizer.zero_grad()
                # Get output from the model
                outputs = self(x)
                # Compute poisson negative log likelihood loss for each cell
                loss_cells = F.poisson_nll_loss(outputs, y, reduction='none')
                loss_cells = torch.sum(loss_cells, dim=0)

                # Compute average loss for grad
                loss = torch.sum(loss_cells) / num_cells
                # Get gradients for parameters
                loss.backward()
                # Update parameters
                optimizer.step()
            else:

                def closure():
                    if torch.is_grad_enabled():
                        # Clear cummulated gradients in every epoch
                        optimizer.zero_grad()
                    # Get output from the model
                    outputs = self(x)
                    # Compute average loss for grad
                    loss = F.poisson_nll_loss(outputs, y)
                    if loss.requires_grad:
                        # Get gradients for parameters
                        loss.backward()
                    return loss

                # Update parameters
                optimizer.step(closure)

                # Get output from the model
                outputs = self(x)
                # Compute poisson negative log likelihood loss for each cell
                loss_cells = F.poisson_nll_loss(outputs, y, reduction='none')
                loss_cells = torch.sum(loss_cells, dim=0)
                loss = torch.sum(loss_cells) / num_cells

            # Update min loss, best weight/bias for each cell
            mask = loss_cells < min_loss
            min_loss[mask] = loss_cells[mask].detach(
            )  # Don't grad on this tensor
            best_weight[mask] = self.linear.weight.data[mask]
            best_bias[mask] = self.linear.bias.data[mask]

            # if epoch % 200 == 0:
            #     print(f'Epoch: {epoch} Loss: {loss.item()}')
        # print(f'Training end, min loss: {torch.sum(min_loss)/num_cells}')

        return min_loss.cpu().numpy(), best_weight.cpu().numpy(
        ), best_bias.cpu().numpy()
Exemple #14
0
def poisson_nll(y_pred, y_true):
    return F.poisson_nll_loss(y_pred, y_true)
Exemple #15
0
def poisson_nll_loss(input, target, *args, **kwargs):
    return F.poisson_nll_loss(input.F, target, *args, **kwargs)
Exemple #16
0
def poisson_nll_loss_cpu(y_pred: torch.Tensor,
                         y_true: torch.Tensor) -> torch.Tensor:
    y_pred = y_pred.detach().cpu()

    return func.poisson_nll_loss(y_pred, y_true)
Exemple #17
0
def loss_function(recon_x, that, x, t , mu, logvar):
    # BCE = F.binary_cross_entropy(recon_x, x , reduction='sum')
    BCE = F.poisson_nll_loss(recon_x , x, reduction='sum', log_input=True)
    NCE = F.mse_loss(that, t, reduction='sum')
    KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
    return 0.01*(BCE + KLD) + NCE
Exemple #18
0
def adjusted_poisson_nll_loss_cpu(y_pred: torch.Tensor, y_true: torch.Tensor,
                                  a: torch.Tensor,
                                  b: torch.Tensor) -> torch.Tensor:
    y_pred = y_pred.detach().cpu()

    return func.poisson_nll_loss(y_pred - torch.log(a) + torch.log(b), y_true)