def sample_latent(self, batch_size): syntax_latent = to_var(torch.randn([batch_size, self.latent_size])) semantic_latent = to_var(torch.randn([batch_size, self.latent_size])) return { "syn_z": syntax_latent, "sem_z": semantic_latent, }
def sampling(mean, logv): if is_sampling: std = torch.exp(0.5 * logv) z = to_var(torch.randn([batch_size, self.latent_size])) z = z * std + mean else: z = mean return z
def hidden_to_latent(self, ret, is_sampling): hidden = ret['hidden'] batch_size = hidden.size(1) hidden = hidden.permute(1, 0, 2).contiguous() if self.hidden_factor > 1: hidden = hidden.view(batch_size, self.args.enc_hidden_dim * self.hidden_factor) else: hidden = hidden.squeeze() mean = self.hidden2mean(hidden) logv = self.hidden2logv(hidden) if is_sampling: std = torch.exp(0.5 * logv) z = to_var(torch.randn([batch_size, self.latent_size])) z = z * std + mean else: z = mean ret["latent"] = z ret["mean"] = mean ret['logv'] = logv return ret
def sample_latent(self, batch_size): z = to_var(torch.randn([batch_size, self.latent_size])) return {"latent": z}