Ejemplo n.º 1
0
 def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
     super(DecoderLayer, self).__init__()
     self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
     self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
     self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
 def __init__(self, d_model, d_inner_hid, n_head):
     super(EncoderLayer, self).__init__()
     self.slf_attn = MultiHeadAttention(d_model, n_head)
     self.pos_ffn = PositionwiseFeedForward(d_model, d_inner_hid)
Ejemplo n.º 3
0
 def __init__(self, d_model, d_inner_hid, n_head, d_k, d_v, dropout=0.1):
     super(DecoderLayer, self).__init__()
     self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
 def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1, d_enc=None):
     super(DecoderLayer, self).__init__()
     d_enc = d_model if d_enc is None else d_enc
     self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
     self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout, d_in=d_enc)
     self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)