Пример #1
0
    def __init__(self,
                 word_vectors,
                 char_vectors,
                 hidden_size,
                 num_heads,
                 char_embed_drop_prob,
                 drop_prob=0.1):
        super(SketchyReader, self).__init__()
        '''class QANet(nn.Module):

        def __init__(self, word_vectors, char_vectors, hidden_size, device, drop_prob=0.):
        super(QANet, self).__init__()

        self.device = device'''

        self.emb = layers.Embedding(word_vectors=word_vectors,
                                    char_vectors=char_vectors,
                                    hidden_size=hidden_size,
                                    char_embed_drop_prob=char_embed_drop_prob,
                                    word_embed_drop_prob=drop_prob)

        hidden_size *= 2  # update hidden size for other layers due to char embeddings

        self.c_resizer = layers.Initialized_Conv1d(hidden_size, 128)

        self.q_resizer = layers.Initialized_Conv1d(hidden_size, 128)

        self.model_resizer = layers.Initialized_Conv1d(512, 128)

        self.enc = layers.StackedEncoder(
            num_conv_blocks=4,
            kernel_size=7,
            num_heads=num_heads,
            dropout=drop_prob)  # embedding encoder layer
        self.att = layers.BiDAFAttention(
            hidden_size=128,
            drop_prob=drop_prob)  # context-query attention layer

        # self.mod1 = layers.StackedEncoder(num_conv_blocks=2,
        #                                  kernel_size=7,
        #                                  dropout=drop_prob)     # model layer

        # self.mod2 = layers.StackedEncoder(num_conv_blocks=2,
        #                                  kernel_size=7,
        #                                  dropout=drop_prob)     # model layer

        # self.mod3 = layers.StackedEncoder(num_conv_blocks=2,
        #                                  kernel_size=7,
        #                                  dropout=drop_prob)     # model layer
        self.model_encoder_layers = nn.ModuleList([
            layers.StackedEncoder(num_conv_blocks=2,
                                  kernel_size=7,
                                  dropout=drop_prob) for _ in range(7)
        ])

        self.out = layers.SketchyOutput(hidden_size=128)  # output layer
Пример #2
0
    def __init__(self,
                 word_vectors,
                 char_vectors,
                 context_max_len,
                 query_max_len,
                 d_model,
                 train_cemb=False,
                 pad=0,
                 dropout=0.1,
                 num_head=8):
        """
        """
        super(QANet, self).__init__()
        if train_cemb:
            self.char_emb = nn.Embedding.from_pretrained(char_vectors,
                                                         freeze=False)
            print("Training char_embeddings")
        else:
            self.char_emb = nn.Embedding.from_pretrained(char_vectors)

        self.word_emb = nn.Embedding.from_pretrained(word_vectors)
        self.LC = context_max_len
        self.LQ = query_max_len
        self.num_head = num_head
        self.pad = pad
        self.dropout = dropout

        wemb_dim = word_vectors.size()[1]
        cemb_dim = char_vectors.size()[1]
        #print("Word vector dim-%d, Char vector dim-%d" % (wemb_dim, cemb_dim))

        #Layer Declarations
        self.emb = layers.Embedding(wemb_dim, cemb_dim, d_model)
        self.emb_enc = layers.Encoder(num_conv=4,
                                      d_model=d_model,
                                      num_head=num_head,
                                      k=7,
                                      dropout=0.1)
        self.cq_att = layers.CQAttention(d_model=d_model)
        self.cq_resizer = layers.Initialized_Conv1d(
            d_model * 4, d_model
        )  #Foward layer to reduce dimension of cq_att output back to d_dim
        self.model_enc_blks = nn.ModuleList([
            layers.Encoder(num_conv=2,
                           d_model=d_model,
                           num_head=num_head,
                           k=5,
                           dropout=0.1) for _ in range(7)
        ])
        self.out = layers.QAOutput(d_model)
Пример #3
0
    def __init__(self,
                 word_vectors,
                 char_vectors,
                 context_max_len,
                 query_max_len,
                 d_model,
                 d_head,
                 mem_len=0,
                 same_length=False,
                 clamp_len=-1,
                 train_cemb=False,
                 pad=0,
                 dropout=0.1,
                 num_head=8):
        """
        """
        super(QANet, self).__init__()
        if train_cemb:
            self.char_emb = nn.Embedding.from_pretrained(char_vectors,
                                                         freeze=False)
        else:
            self.char_emb = nn.Embedding.from_pretrained(char_vectors)

        self.word_emb = nn.Embedding.from_pretrained(word_vectors)
        self.LC = context_max_len
        self.LQ = query_max_len
        self.num_head = num_head
        self.pad = pad
        self.dropout = dropout
        self.mem_len = mem_len
        self.d_head = d_head
        self.d_model = d_model
        self.num_head = num_head
        self.same_length = same_length
        self.clamp_len = clamp_len
        self.ext_len = 0

        wemb_dim = word_vectors.size()[1]
        cemb_dim = char_vectors.size()[1]

        #Layer Declarations
        self.emb = layers.Embedding(wemb_dim, cemb_dim, d_model)
        self.emb_enc = layers.Encoder(4,
                                      num_head,
                                      d_model,
                                      d_head,
                                      d_inner=d_model * 4,
                                      k=7,
                                      dropout=0.1)  #Hard coded
        self.cq_att = layers.CQAttention(d_model=d_model)
        self.cq_resizer = layers.Initialized_Conv1d(
            d_model * 4, d_model
        )  #Foward layer to reduce dimension of cq_att output back to d_dim
        self.model_enc_blks = nn.ModuleList([
            layers.Encoder(2,
                           num_head,
                           d_model,
                           d_head,
                           d_inner=d_model * 4,
                           k=5,
                           dropout=0.1) for _ in range(7)
        ])
        self.out = layers.QAOutput(d_model)
        self.drop = nn.Dropout(dropout)

        self._create_parameters()