Exemplo n.º 1
0
    def __init__(self, config, src_vocab, target_vocab, s_v, t_v, u):
        super(Transformer, self).__init__()
        self.config = config

        h, N, dropout = self.config.h, self.config.N, self.config.dropout
        d_model, d_ff = self.config.d_model, self.config.d_ff

        attn = MultiHeadedAttention(h, d_model)
        ff = PositionwiseFeedForward(d_model, d_ff, dropout)
        position = PositionalEncoding(d_model, dropout)

        attncross = MultiHeadedAttention(h, d_model * 2)
        ffcross = PositionwiseFeedForward(d_model * 2, d_ff, dropout)
        positioncross = PositionalEncoding(d_model * 2, dropout)

        self.encoder = Encoder(
            EncoderLayer(config.d_model, deepcopy(attn), deepcopy(ff),
                         dropout), N)
        self.encoder_cross = EncoderCross(
            EncoderLayerCross((config.d_model) * 2, deepcopy(attncross),
                              deepcopy(ffcross), dropout), N)
        self.src_embed = nn.Sequential(
            Embeddings(config.d_model, src_vocab, s_v, u),
            deepcopy(position))  # Embeddings followed by PE
        # self.src_embed.weight.data.copy_(src_vocab.vectors)
        self.target_embed = nn.Sequential(
            Embeddings(config.d_model, target_vocab, t_v, u),
            deepcopy(position))
        # self.target_embed.weight.data.copy_(target_vocab.vectors)
        # Fully-Connected Layer
        self.fc = nn.Linear(self.config.d_model, self.config.output_size)
        self.sigmoid = nn.Sigmoid()
        self.cos = nn.CosineSimilarity(dim=1, eps=1e-6)
        self.softmax = nn.Softmax()
Exemplo n.º 2
0
    def __init__(self, hidden, attn_heads, feed_forward_hidden, dropout):
        """
        :param hidden: hidden size of transformer
        :param attn_heads: head sizes of multi-head attention
        :param feed_forward_hidden: feed_forward_hidden, usually 4*hidden_size
        :param dropout: dropout rate
        """

        super().__init__()
        self.attention = MultiHeadedAttention(h=attn_heads, d_model=hidden)
        self.feed_forward = PositionwiseFeedForward(d_model=hidden, d_ff=feed_forward_hidden, dropout=dropout)
        self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)
        self.output_sublayer = SublayerConnection(size=hidden, dropout=dropout)
        self.dropout = nn.Dropout(p=dropout)
Exemplo n.º 3
0
    def __init__(self, config, src_vocab):
        super(Transformer, self).__init__()
        self.config = config

        h, N, dropout = self.config.h, self.config.N, self.config.dropout
        d_model, d_ff = self.config.d_model, self.config.d_ff

        attn = MultiHeadedAttention(h, d_model)
        ff = PositionwiseFeedForward(d_model, d_ff, dropout)

        self.encoder = Encoder(
            EncoderLayer(config.d_model, deepcopy(attn), deepcopy(ff),
                         dropout), N)
        self.src_embed = nn.Sequential(Embeddings(config.d_model, src_vocab))

        self.fc = nn.Linear(self.config.d_model, self.config.output_size)

        self.softmax = nn.Softmax()
Exemplo n.º 4
0
    def __init__(self, config, src_vocab):
        super(Matposer, self).__init__()
        self.config = config

        d_row, N, dropout = self.config.d_row, self.config.N, self.config.dropout
        d_model, d_ff = self.config.d_model, self.config.d_ff

        inter = Interactor(d_model, d_ff, out_row=d_row, dropout=dropout)
        ff = PositionwiseFeedForward(d_model, d_ff, dropout)
        position = PositionalEncoding(d_model, dropout)

        self.encoder = Encoder(
            EncoderLayer(d_model, deepcopy(inter), deepcopy(ff), dropout), N)
        self.src_embed = nn.Sequential(Embeddings(d_model, src_vocab),
                                       deepcopy(position))

        self.fc = nn.Linear(d_model, self.config.output_size)

        self.softmax = nn.Softmax()
Exemplo n.º 5
0
    def __init__(self, config):
        super(Transformer, self).__init__()
        self.config = config

        h, N, dropout = self.config.h, self.config.N, self.config.dropout
        d_model, d_ff = self.config.d_model, self.config.d_ff

        attn = MultiHeadedAttention(h, d_model)
        ff = PositionwiseFeedForward(d_model, d_ff, dropout)
        position = PositionalEncoding(d_model, dropout)

        self.encoder = Encoder(EncoderLayer(config.d_model, deepcopy(attn), deepcopy(ff), dropout), N)
        # self.src_embed = nn.Sequential(Embeddings(config.d_model, src_vocab),
        #                                deepcopy(position))  # Embeddings followed by PE

        # Fully-Connected Layer
        self.fc = nn.Linear(
            self.config.d_model,
            self.config.output_size
        )
Exemplo n.º 6
0
    def __init__(self, config, src_vocab):
        super(Transformer, self).__init__()
        self.config = config

        h, N, dropout = self.config.h, self.config.N, self.config.dropout
        d_model, d_ff = self.config.d_model, self.config.d_ff
        self.src_vocab = src_vocab

        attn = MultiHeadedAttention(h, d_model)
        ff = PositionwiseFeedForward(d_model, d_ff, dropout)

        self.encoder_layer = EncoderLayer(config.d_model, deepcopy(attn),
                                          deepcopy(ff), dropout)
        self.encoder = Encoder(self.encoder_layer, N)

        self.src_word_emb = nn.Embedding(src_vocab,
                                         config.d_model,
                                         padding_idx=0)

        # self.pos_bias = nn.Embedding(src_vocab, config.d_model, padding_idx=0)
        # self.pos_bias = nn.Embedding.from_pretrained(get_sinusoid_encoding_table_dim(src_vocab, config.d_model, padding_idx=0),freeze=True)
        # self.pos_bias = nn.Embedding.from_pretrained(get_sinusoid_encoding_table_vocab(src_vocab, config.d_model, padding_idx=0),freeze=True)

        # self.pos_bias = nn.Embedding(1, config.d_model, padding_idx=0)
        # self.pos_bias = nn.Embedding(src_vocab, 1, padding_idx=0)
        # self.position_enc = nn.Embedding(src_vocab, config.d_model, padding_idx=0)
        self.position_enc = nn.Embedding.from_pretrained(
            get_sinusoid_encoding_table(src_vocab,
                                        config.d_model,
                                        padding_idx=0),
            freeze=False)

        # position_enc = torch.randn(1000, config.d_model)
        # position_enc = position_enc.unsqueeze(0)
        # self.register_buffer('position_enc', position_enc)

        self.drop = nn.Dropout(p=dropout)
        self.fc = nn.Linear(self.config.d_model, self.config.output_size)

        self.softmax = nn.Softmax()