示例#1
0
    def __init__(self,
                 n_max_seq,
                 n_layers=6,
                 n_head=8,
                 d_k=64,
                 d_v=64,
                 d_word_vec=512,
                 d_model=512,
                 d_inner_hid=1024,
                 dropout=0.1):

        super(Encoder, self).__init__()

        n_position = n_max_seq + 1
        self.n_max_seq = n_max_seq
        self.d_model = d_model

        # this Embedding seemingly has vocab size of n_position
        # and embedding dimension of d_word_vec
        self.position_enc = nn.Embedding(n_position, d_word_vec, padding_idx=0)
        self.position_enc.weight.data = position_encoding_init(
            n_position, d_word_vec)

        self.layer_stack = nn.ModuleList([
            EncoderLayer(d_model,
                         d_inner_hid,
                         n_head,
                         d_k,
                         d_v,
                         dropout=dropout) for _ in range(n_layers)
        ])
示例#2
0
    def __init__(self,
                 x_embed,
                 n_src_vocab,
                 len_max_seq,
                 d_word_vec,
                 n_layers,
                 n_head,
                 d_k,
                 d_v,
                 d_model,
                 d_inner,
                 dropout=0.1):

        super().__init__()

        n_position = len_max_seq + 1

        #self.src_word_emb = nn.Embedding(
        #    n_src_vocab, d_word_vec, padding_idx=Constants.PAD)
        self.src_word_emb = x_embed
        # print(self.src_word_emb)

        self.position_enc = nn.Embedding.from_pretrained(
            get_sinusoid_encoding_table(n_position, d_word_vec, padding_idx=0),
            freeze=True)

        self.layer_stack = nn.ModuleList([
            EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
            for _ in range(n_layers)
        ])
示例#3
0
文件: Models.py 项目: hobincar/SGN
    def __init__(
            self,
            len_max_seq, d_word_vec,
            n_layers, n_head, d_k, d_v,
            d_model, d_inner, dropout=0.1):

        super(Encoder, self).__init__()

        n_position = len_max_seq + 1

        self.position_enc = nn.Embedding.from_pretrained(
            get_sinusoid_encoding_table(n_position, d_word_vec, padding_idx=0),
            freeze=True)

        self.layer_stack = nn.ModuleList([
            EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
            for _ in range(n_layers)])
示例#4
0
    def __init__(self,
                 n_src_vocab,
                 len_max_seq,
                 d_word_vec,
                 n_layers,
                 n_head,
                 d_k,
                 d_v,
                 d_model,
                 d_inner,
                 dropout=0.1,
                 score_util=None,
                 pos_emb=False,
                 rel_pos=False,
                 rel_pos_clip=None,
                 ex_mask=None):

        super().__init__()

        self.n_position = len_max_seq + 1
        self.d_word_vec = d_word_vec
        self.score_util = score_util
        self.pos_emb = pos_emb

        self.score_util_enc = (score_util is not None) and ('enc'
                                                            in score_util)
        self.score_util_attn = (score_util is not None) and ('attn'
                                                             in score_util)
        self.score_util_catenc = (score_util is not None) and ('catenc'
                                                               in score_util)

        self.src_word_emb = nn.Embedding(n_src_vocab,
                                         d_word_vec,
                                         padding_idx=Constants.PAD)

        self.position_enc = nn.Embedding.from_pretrained(
            get_sinusoid_encoding_table(self.n_position,
                                        d_word_vec,
                                        padding_idx=0),
            freeze=True)

        # if score_util in ['enc', 'enc_attn', 'enc_scl_attn', 'catenc', 'catenc_attn']:
        if self.score_util_enc:
            # self.n_part = 100
            self.n_part = 10
            # self.n_part = 5
            self.score_enc = nn.Embedding(self.n_part + 1,
                                          d_word_vec,
                                          padding_idx=Constants.PAD)

        self.layer_stack = nn.ModuleList([
            EncoderLayer(d_model,
                         d_inner,
                         n_head,
                         d_k,
                         d_v,
                         dropout=dropout,
                         rel_pos=rel_pos,
                         rel_pos_clip=rel_pos_clip,
                         ex_mask=ex_mask) for _ in range(n_layers)
        ])