コード例 #1
0
 def _train(self):
     for _ in range(self.n_steps_per_epoch):
         self.optimizer.zero_grad()
         y = self.model.matrix()[:, self.perm]
         loss = nn.functional.mse_loss(y, self.target_matrix)
         if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
             semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
             loss += self.semantic_loss_weight * semantic_loss.mean()
         loss.backward()
         self.optimizer.step()
     return {'negative_loss': -loss.item()}
コード例 #2
0
 def _train(self):
     for _ in range(self.n_steps_per_epoch):
         self.optimizer.zero_grad()
         y = self.model.matrix()[:, self.br_perm]
         loss = nn.functional.mse_loss(y, self.target_matrix)
         semantic_loss = semantic_loss_exactly_one(
             nn.functional.log_softmax(self.model.logit, dim=-1))
         total_loss = loss + self.semantic_loss_weight * semantic_loss.mean(
         )
         total_loss.backward()
         self.optimizer.step()
     return {'negative_loss': -loss.item()}
コード例 #3
0
 def _train(self):
     temperature = 1.0 / (0.3 * self._iteration + 1)
     for _ in range(self.n_steps_per_epoch):
         self.optimizer.zero_grad()
         y = self.model.matrix(temperature)
         loss = nn.functional.mse_loss(y, self.target_matrix)
         if (not self.model.fixed_order) and hasattr(
                 self, 'semantic_loss_weight'):
             semantic_loss = semantic_loss_exactly_one(
                 nn.functional.log_softmax(self.model.logit, dim=-1))
             loss += self.semantic_loss_weight * semantic_loss.mean()
         loss.backward()
         self.optimizer.step()
     return {'negative_loss': -polished_loss_fft_learn_perm(self)}