Пример #1
0
    def forward(self, context):
        batch_size, _ = context.size()
        context = self.fc(context)

        pi = self.pi_net(context)
        pi = F.gumbel_softmax(pi, tau=self.gumbel_temp, hard=True, eps=1e-10)
        pi = pi.unsqueeze(1)

        mus = self.context_to_mu(context)
        logsigmas = self.context_to_logsigma(context)

        # mus = torch.clamp(mus, -30, 30)
        logsigmas = torch.clamp(logsigmas, -20, 20)

        stds = torch.exp(0.5 * logsigmas)

        epsilons = gVar(
            torch.randn([batch_size, self.n_components * self.z_size]))

        zi = (epsilons * stds + mus).view(batch_size, self.n_components,
                                          self.z_size)
        z = torch.bmm(pi, zi).squeeze(1)  # [batch_sz x z_sz]
        mu = torch.bmm(pi, mus.view(batch_size, self.n_components,
                                    self.z_size))
        logsigma = torch.bmm(
            pi, logsigmas.view(batch_size, self.n_components, self.z_size))
        return z, mu, logsigma
Пример #2
0
    def forward(self, context):
        batch_size, _ = context.size()
        context = self.fc(context)
        mu = self.context_to_mu(context)
        logsigma = self.context_to_logsigma(context)

        # mu = torch.clamp(mu, -30, 30)
        logsigma = torch.clamp(logsigma, -20, 20)
        std = torch.exp(0.5 * logsigma)
        epsilon = gVar(torch.randn([batch_size, self.z_size]))
        z = epsilon * std + mu
        return z, mu, logsigma
Пример #3
0
    def forward(self, inputs, input_lens=None, noise=False):
        if self.embedding is not None:
            inputs = self.embedding(inputs)

        batch_size, seq_len, emb_size = inputs.size()
        inputs = F.dropout(inputs, self.dropout, self.training)

        need_pack = False
        if input_lens is not None and need_pack:
            input_lens_sorted, indices = input_lens.sort(descending=True)
            inputs_sorted = inputs.index_select(0, indices)
            inputs = pack_padded_sequence(inputs_sorted,
                                          input_lens_sorted.data.tolist(),
                                          batch_first=True)

        init_hidden = gVar(
            torch.zeros(self.n_layers * (1 + self.bidirectional), batch_size,
                        self.hidden_size))
        if self.rnn_class == 'lstm':
            init_hidden = (init_hidden, init_hidden)
        hids, h_n = self.rnn(inputs, init_hidden)
        if self.rnn_class == 'lstm':
            h_n = h_n[0]
        if input_lens is not None and need_pack:
            # noinspection PyUnboundLocalVariable
            _, inv_indices = indices.sort()
            hids, lens = pad_packed_sequence(hids, batch_first=True)
            hids = hids.index_select(0, inv_indices)
            h_n = h_n.index_select(1, inv_indices)
        h_n = h_n.view(self.n_layers, (1 + self.bidirectional), batch_size,
                       self.hidden_size)
        h_n = h_n[-1]
        enc = h_n.transpose(1, 0).contiguous().view(batch_size, -1)
        if noise and self.noise_radius > 0:
            gauss_noise = gVar(
                torch.normal(means=torch.zeros(enc.size()),
                             std=self.noise_radius))
            enc = enc + gauss_noise

        return enc, hids
Пример #4
0
    def forward(self, context, context_lens, utt_lens, floors, noise=False):
        batch_size, max_context_len, max_utt_len = context.size()
        utts = context.view(-1, max_utt_len)

        utt_lens = utt_lens.view(-1)
        utt_encs, _ = self.utt_encoder(utts, utt_lens)
        utt_encs = utt_encs.view(batch_size, max_context_len, -1)

        floor_one_hot = gVar(torch.zeros(floors.numel(), 2))
        floor_one_hot.data.scatter_(1, floors.view(-1, 1), 1)
        floor_one_hot = floor_one_hot.view(-1, max_context_len, 2)
        utt_floor_encs = torch.cat([utt_encs, floor_one_hot], 2)

        utt_floor_encs = F.dropout(utt_floor_encs, 0.25, self.training)
        context_lens_sorted, indices = context_lens.sort(descending=True)
        utt_floor_encs = utt_floor_encs.index_select(0, indices)
        utt_floor_encs = pack_padded_sequence(
            utt_floor_encs,
            context_lens_sorted.data.tolist(),
            batch_first=True)

        init_hidden = gVar(torch.zeros(1, batch_size, self.hidden_size))
        if self.rnn_class == 'lstm':
            hids, h_n = self.rnn(utt_floor_encs, (init_hidden, init_hidden))
            h_n = h_n[0]
        else:
            hids, h_n = self.rnn(utt_floor_encs, init_hidden)
        _, inv_indices = indices.sort()
        h_n = h_n.index_select(1, inv_indices)
        enc = h_n.transpose(1, 0).contiguous().view(batch_size, -1)

        if noise and self.noise_radius > 0:
            gauss_noise = gVar(
                torch.normal(means=torch.zeros(enc.size()),
                             std=self.noise_radius))
            enc = enc + gauss_noise
        return enc
Пример #5
0
    def sampling(self,
                 init_hidden,
                 context,
                 maxlen,
                 SOS_tok,
                 EOS_tok,
                 mode='greedy'):
        batch_size = init_hidden.size(0)
        decoded_words = np.zeros((batch_size, maxlen), dtype=np.int)
        sample_lens = np.zeros(batch_size, dtype=np.int)

        decoder_input = gVar(
            torch.LongTensor([[SOS_tok] * batch_size]).view(batch_size, 1))
        decoder_input = self.embedding(
            decoder_input) if self.embedding is not None else decoder_input
        decoder_input = torch.cat(
            [decoder_input, context.unsqueeze(1)],
            2) if context is not None else decoder_input
        decoder_hidden = init_hidden.unsqueeze(0)
        if self.rnn_class == 'lstm':
            decoder_hidden = (decoder_hidden, decoder_hidden)
        for di in range(maxlen):
            decoder_output, decoder_hidden = self.rnn(decoder_input,
                                                      decoder_hidden)
            decoder_output = self.out(decoder_output)
            if mode == 'greedy':
                topi = decoder_output[:, -1].max(1, keepdim=True)[1]
            elif mode == 'sample':
                topi = torch.multinomial(
                    F.softmax(decoder_output[:, -1], dim=1), 1)
            # noinspection PyUnboundLocalVariable
            decoder_input = self.embedding(
                topi) if self.embedding is not None else topi
            decoder_input = torch.cat(
                [decoder_input, context.unsqueeze(1)],
                2) if context is not None else decoder_input
            ni = topi.squeeze().data.cpu().numpy()
            decoded_words[:, di] = ni

        for i in range(batch_size):
            for word in decoded_words[i]:
                if word == EOS_tok:
                    break
                sample_lens[i] = sample_lens[i] + 1
        return decoded_words, sample_lens