Beispiel #1
0
    def divergence_loss(self, posterior, prior):
        if isinstance(posterior, DistributionList) and isinstance(
                prior, DistributionList):
            result = 0.0
            scalars = {}
            for idx, (s, o) in enumerate(zip(posterior.items, prior.items)):
                match_result = match(s, o)
                scalars[f"layer {idx}"] = float(match_result)
                result = result + match_result
            self.writer.add_scalars("layer kld", scalars, self.step_id)
            return result

        return match(posterior, prior)
Beispiel #2
0
 def forward(self, inputs):
   posterior = self.encoder(inputs)
   code = posterior.rsample()
   prior = Normal(torch.zeros_like(code), torch.ones_like(code))
   self.loss = match(posterior, prior)
   reconstruction = self.decoder(code)
   return reconstruction
Beispiel #3
0
    def match(self, other):
        result = 0.0

        # print(self.items[0].loc, other.items[0].loc)
        # print(self.items[0].scale, other.items[0].scale)
        for s, o in zip(self.items, other.items):
            match_result = match(s, o)
            result = result + match_result
        return result
Beispiel #4
0
 def stack_loss(self, task_args, scale_args):
     task_loss = 0.0
     count = 0
     for idx, ta in enumerate(zip(*task_args)):
         level_loss = self.task_loss(*ta)
         self.current_losses[f"level {idx}"] = float(level_loss)
         task_loss += level_loss
         count += 1
     task_loss = task_loss / count
     self.current_losses["task"] = float(task_loss)
     prior_loss = match(MatchableList(scale_args.prior),
                        MatchableList(detach(scale_args.posterior)))
     self.current_losses["prior"] = float(prior_loss)
     policy_loss = self.policy_loss(task_args, scale_args)
     self.current_losses["policy"] = float(policy_loss)
     result = self.task_weight * task_loss
     result = result + self.prior_weight * prior_loss
     result = result + self.policy_weight * policy_loss
     return result
 def prior_loss(self, data, reconstruction, prior, codes):
     rec_loss = match(data, reconstruction).mean()
     kl_loss = match(prior, codes).mean()
     self.current_losses["kullback leibler"] = float(kl_loss)
     self.current_losses["reconstruction"] = float(rec_loss)
     return rec_loss + kl_loss
 def prior_loss(self, generated_codes, prior_codes):
     result = match(generated_codes, prior_codes).mean()
     self.current_losses["prior"] = float(result)
     return result
Beispiel #7
0
 def loss(self, reconstruction, target):
   return match(reconstruction, target)
Beispiel #8
0
 def divergence_loss(self, posterior, prior):
   return match(posterior, prior)