コード例 #1
0
    def get_latents(self, encodings, probs_b):
        """Read out latents (z) form input encodings for a single segment."""
        readout_mask = probs_b[:, 1:, None]  # Offset readout by 1 to left.
        readout = (encodings[:, :-1] * readout_mask).sum(1)
        hidden = F.relu(self.head_z_1(readout))
        logits_z = self.head_z_2(hidden)

        # Gaussian latents.
        if self.latent_dist == 'gaussian':
            if self.training:
                mu, log_var = torch.split(logits_z, self.latent_dim, dim=1)
                sample_z = utils.gaussian_sample(mu, log_var)
            else:
                sample_z = logits_z[:, :self.latent_dim]

        # Concrete / Gumbel softmax latents.
        elif self.latent_dist == 'concrete':
            if self.training:
                sample_z = utils.gumbel_softmax_sample(logits_z,
                                                       temp=self.temp_z)
            else:
                sample_z_idx = torch.argmax(logits_z, dim=1)
                sample_z = utils.to_one_hot(sample_z_idx, logits_z.size(1))
        else:
            raise ValueError('Invalid argument for `latent_dist`.')

        return logits_z, sample_z
コード例 #2
0
    def get_boundaries(self, encodings, segment_id, lengths, training):
        """Get boundaries (b) for a single segment in batch."""
        if segment_id == self.max_num_segments - 1:
            # Last boundary is always placed on last sequence element.
            logits_b = None
            # sample_b = jnp.zeros_like(encodings[:, :, 0]).scatter_(
            #     1, jnp.expand_dims(lengths, -1) - 1, 1)
            sample_b = jnp.zeros_like(encodings[:, :, 0])
            sample_b = jax.ops.index_update(
                sample_b, jax.ops.index[jnp.arange(len(lengths)), lengths - 1],
                1)
        else:
            hidden = nn.relu(self.head_b_1(encodings))
            logits_b = jnp.squeeze(self.head_b_2(hidden), -1)
            # Mask out first position with large neg. value.
            neg_inf = jnp.ones((encodings.shape[0], 1)) * utils.NEG_INF
            # TODO(tkipf): Mask out padded positions with large neg. value.
            logits_b = jnp.concatenate([neg_inf, logits_b[:, 1:]], axis=1)
            if training:
                sample_b = utils.gumbel_softmax_sample(hk.next_rng_key(),
                                                       logits_b,
                                                       temp=self.temp_b)
            else:
                sample_b_idx = jnp.argmax(logits_b, axis=1)
                sample_b = nn.one_hot(sample_b_idx, logits_b.shape[1])

        return logits_b, sample_b
コード例 #3
0
ファイル: model.py プロジェクト: zwjyyc/finch
    def build_train_generator_encoder_graph(self):
        z_mean, z_logvar = self.encoder(self.enc_inp, reuse=True)
        z = self.reparam(z_mean, z_logvar)
        c = self.draw_c(self.discriminator(self.enc_inp, reuse=True))
        latent_vec = tf.concat((z, c), -1)
        outputs = self.generator(latent_vec, reuse=True)

        self.train_ge_vae_nll_loss = self.seq_loss_fn(*outputs)
        self.train_ge_vae_kl_w = self.kl_w_fn()
        self.train_ge_vae_kl_loss = self.kl_loss_fn(z_mean, z_logvar)
        vae_loss = self.train_ge_vae_nll_loss + self.train_ge_vae_kl_w * self.train_ge_vae_kl_loss

        z_prior = self.draw_z_prior()
        c_prior = self.draw_c_prior()
        latent_vec = tf.concat((z_prior, c_prior), -1)
        _, logits_gen = self.generator(latent_vec, reuse=True)
        self.temperature = self.temperature_fn()
        gumbel_softmax = gumbel_softmax_sample(logits_gen[:, :-1, :], self.temperature)

        c_logits = self.discriminator(gumbel_softmax, reuse=True, refeed=True, gumbel=True)
        self.l_attr_c = self.cross_entropy_fn(c_logits, c_prior)

        z_mean_gen, z_logvar_gen = self.encoder(gumbel_softmax, reuse=True, gumbel=True)
        self.l_attr_z = self.mutinfo_loss_fn(z_mean_gen, z_logvar_gen)

        generator_loss_op = vae_loss + (args.lambda_c*self.l_attr_c) + (args.lambda_z*self.l_attr_z)
        encoder_loss_op = vae_loss

        self.train_generator_op = self.optimizer.apply_gradients(
            self.gradient_clipped(generator_loss_op, scope=self.scopes['G']))
        self.train_encoder_op = self.optimizer.apply_gradients(
            self.gradient_clipped(encoder_loss_op, scope=self.scopes['E']))
コード例 #4
0
    def get_boundaries(self, encodings, segment_id, lengths):
        """Get boundaries (b) for a single segment in batch."""
        if segment_id == self.max_num_segments - 1:
            # Last boundary is always placed on last sequence element.
            logits_b = None
            sample_b = torch.zeros_like(encodings[:, :, 0]).scatter_(
                1,
                lengths.unsqueeze(1) - 1, 1)
        else:
            hidden = F.relu(self.head_b_1(encodings))
            logits_b = self.head_b_2(hidden).squeeze(-1)
            # Mask out first position with large neg. value.
            neg_inf = torch.ones(encodings.size(0), 1,
                                 device=encodings.device) * utils.NEG_INF
            # TODO(tkipf): Mask out padded positions with large neg. value.
            logits_b = torch.cat([neg_inf, logits_b[:, 1:]], dim=1)
            if self.training:
                sample_b = utils.gumbel_softmax_sample(logits_b,
                                                       temp=self.temp_b)
            else:
                sample_b_idx = torch.argmax(logits_b, dim=1)
                sample_b = utils.to_one_hot(sample_b_idx, logits_b.size(1))

        return logits_b, sample_b
コード例 #5
0
 def sample(self, num_samples):
     index = gumbel_softmax_sample(
         tf.tile(self.pi_logits, [num_samples, 1]), self.temp)
     sample_mus = tf.matmul(index, self.mus)
     return gumbel_sigmoid_sample(sample_mus, self.temp, self.hard)