Пример #1
0
    def __init__(self, index, token_name, classes):
        super(OneHotEncoding, self).__init__()

        vocab = Vocab(token_name)
        vocab.init()
        for c in classes[index]:
            vocab.add(c)

        num_class = len(vocab)
        self.num_class = num_class

        one_hot_encoding = torch.eye(num_class)
        self.one_hots = nn.Parameter(one_hot_encoding, requires_grad=False)
Пример #2
0
    def __init__(
        self,
        index,
        token_name,
        classes,
        dropout=0,
        embed_dim=15,
        trainable=True,
        padding_idx=None,
        max_norm=None,
        norm_type=2,
        scale_grad_by_freq=False,
        sparse=False,
    ):
        super(SparseToEmbedding, self).__init__()

        self.embed_dim = embed_dim

        vocab = Vocab(token_name)
        vocab.init()
        for c in classes[index]:
            vocab.add(c)

        embedding_params = {
            "vocab": vocab,
            "dropout": dropout,
            "embed_dim": embed_dim,
            "trainable": trainable,
            "padding_idx": padding_idx,
            "max_norm": max_norm,
            "norm_type": norm_type,
            "scale_grad_by_freq": scale_grad_by_freq,
            "sparse": sparse,
        }

        self.embedding = WordEmbedding(**embedding_params)