Exemple #1
0
    def forward(self, prepared):
        loc, log_variance = self.encoder(prepared.to(module_device(self)))

        # Alternatives:
        # - RelaxedBernoulli
        # - RelaxedCategorical
        # - RelaxedCategorical * Codebook

        encoding = loc + torch.randn_like(loc) * torch.exp(0.5 * log_variance)

        return architecture.PredictionBatch(
            images=self.decoder(encoding).cpu(),
            loc=loc.cpu(),
            log_variance=log_variance.cpu(),
            encoding=encoding.cpu(),
        )
 def forward(
     self, standard_image: architecture.StandardImageBatch
 ) -> ImagePredictionBatch:
     return ImagePredictionBatch(
         logits=self.real(standard_image.data.to(module_device(self))))
Exemple #3
0
 def forward(self, standard_image: architecture.StandardImageBatch) -> architecture.LatentBatch:
     return architecture.LatentBatch(encoding=self.latent(
         standard_image.data.to(module_device(self))
     ).cpu())
Exemple #4
0
 def forward(
         self, latent: architecture.LatentBatch
 ) -> architecture.StandardImageBatch:
     return architecture.StandardImageBatch(
         data=self.image(latent.encoding.to(module_device(self))).cpu())
Exemple #5
0
 def forward(self,
             latent: architecture.LatentBatch) -> LatentPredictionBatch:
     # These will not be the same generated as the ones used to create the images, is that a problem?
     return LatentPredictionBatch(
         logits=self.real(latent.encoding.to(module_device(self))))
Exemple #6
0
 def forward(self, prepared):
     return architecture.PredictionBatch(
         images=self.image(
             prepared.to(module_device(self))
         ).cpu()
     )
Exemple #7
0
 def generated(self, n, scale=0.9):
     encoding = torch.randn(
         (n, 1 * 4 * 4), device=module_device(self)) * scale
     return architecture.PredictionBatch(
         images=self.decoder(encoding).cpu())
Exemple #8
0
 def predictions(self, features: List[architecture.StandardizedImage]):
     return self.forward(
         torch.stack([feature.data for feature in features]).to(module_device(self))
     )