Esempio n. 1
0
 def divergence_loss(self, parameters):
     return normal_kl_loss(*parameters)
Esempio n. 2
0
 def loss(self, source_parameters, total_parameters, reconstruction,
          target):
     loss_val = self.rec_loss(reconstruction, target)
     kld = vl.normal_kl_loss(total_parameters, source_parameters)
     return loss_val + kld
Esempio n. 3
0
 def loss(self, parameters, approximation, target):
     loss_val = func.binary_cross_entropy_with_logits(
         approximation, target, reduction="sum") / target.size(0)
     for mean, logvar in parameters:
         loss_val += vl.normal_kl_loss(mean, logvar)
     return loss_val
Esempio n. 4
0
 def loss(self, parameters, prior_parameters, approximation, target):
     loss_val = func.binary_cross_entropy_with_logits(
         approximation, target, reduction="sum") / target.size(0)
     for p, p_r in zip(parameters, prior_parameters):
         loss_val += vl.normal_kl_loss(*p, *p_r)
     return loss_val
Esempio n. 5
0
 def divergence_loss(self, parameters, prior_parameters):
     mu, lv = parameters
     mu_r, lv_r = prior_parameters
     result = vl.normal_kl_loss(mu, lv, mu_r, lv_r)
     return result
Esempio n. 6
0
 def divergence_loss(self, normal_parameters, decoder_parameters):
     tc_loss = vl.tc_encoder_loss(*decoder_parameters)
     div_loss = vl.normal_kl_loss(*normal_parameters)
     result = div_loss + self.gamma * tc_loss
     return result