def forward(self, prepared): loc, log_variance = self.encoder(prepared.to(module_device(self))) # Alternatives: # - RelaxedBernoulli # - RelaxedCategorical # - RelaxedCategorical * Codebook encoding = loc + torch.randn_like(loc) * torch.exp(0.5 * log_variance) return architecture.PredictionBatch( images=self.decoder(encoding).cpu(), loc=loc.cpu(), log_variance=log_variance.cpu(), encoding=encoding.cpu(), )
def forward( self, standard_image: architecture.StandardImageBatch ) -> ImagePredictionBatch: return ImagePredictionBatch( logits=self.real(standard_image.data.to(module_device(self))))
def forward(self, standard_image: architecture.StandardImageBatch) -> architecture.LatentBatch: return architecture.LatentBatch(encoding=self.latent( standard_image.data.to(module_device(self)) ).cpu())
def forward( self, latent: architecture.LatentBatch ) -> architecture.StandardImageBatch: return architecture.StandardImageBatch( data=self.image(latent.encoding.to(module_device(self))).cpu())
def forward(self, latent: architecture.LatentBatch) -> LatentPredictionBatch: # These will not be the same generated as the ones used to create the images, is that a problem? return LatentPredictionBatch( logits=self.real(latent.encoding.to(module_device(self))))
def forward(self, prepared): return architecture.PredictionBatch( images=self.image( prepared.to(module_device(self)) ).cpu() )
def generated(self, n, scale=0.9): encoding = torch.randn( (n, 1 * 4 * 4), device=module_device(self)) * scale return architecture.PredictionBatch( images=self.decoder(encoding).cpu())
def predictions(self, features: List[architecture.StandardizedImage]): return self.forward( torch.stack([feature.data for feature in features]).to(module_device(self)) )