Beispiel #1
0
    def encode(self, inputs, lengths, fr=0):
        bsz, max_len = inputs.size()
        e_hidden_init = self.e_hidden_init.expand(
            2, bsz, self.hidden_dim).contiguous()
        e_cell_init = self.e_cell_init.expand(2, bsz,
                                              self.hidden_dim).contiguous()
        lens, indices = torch.sort(lengths, 0, True)

        if fr and not self.share_vocab:
            in_embs = self.embedding_fr(inputs)
        else:
            in_embs = self.embedding(inputs)

        if fr and not self.share_encoder:
            if self.dropout > 0:
                F.dropout(in_embs, training=self.training)
            all_hids, (enc_last_hid, _) = self.lstm_fr(
                pack(in_embs[indices], lens.tolist(), batch_first=True),
                (e_hidden_init, e_cell_init))
        else:
            if self.dropout > 0:
                F.dropout(in_embs, training=self.training)
            all_hids, (enc_last_hid, _) = self.lstm(
                pack(in_embs[indices], lens.tolist(), batch_first=True),
                (e_hidden_init, e_cell_init))

        _, _indices = torch.sort(indices, 0)
        all_hids = unpack(all_hids, batch_first=True)[0][_indices]

        if self.pool == "max":
            embs = model_utils.max_pool(all_hids, lengths, self.gpu)
        elif self.pool == "mean":
            embs = model_utils.mean_pool(all_hids, lengths, self.gpu)
        return embs
Beispiel #2
0
def reduce_map(weights, layer_feats, num_split, pool_method):
    if pool_method == Pools.AVG:
        train_inp = avg_pool(layer_feats, num_split=num_split)
    elif pool_method == Pools.MAX:
        train_inp = max_pool(layer_feats, num_split=num_split)
    else:  # pool_method is random
        train_inp = randomized_pool(weights, layer_feats, num_split=num_split)

    return train_inp
Beispiel #3
0
    def encode(self, idxs, lengths, fr=0):
        if fr and not self.share_vocab:
            word_embs = self.embedding_fr(idxs)
        else:
            word_embs = self.embedding(idxs)

        if self.dropout > 0:
            F.dropout(word_embs, training=self.training)

        if self.pool == "max":
            word_embs = model_utils.max_pool(word_embs, lengths, self.args.gpu)
        elif self.pool == "mean":
            word_embs = model_utils.mean_pool(word_embs, lengths, self.args.gpu)

        return word_embs