예제 #1
0
    def __init__(self,
                 vocab_size,
                 word_dim,
                 embed_size,
                 use_abs=False,
                 att_units=300,
                 hops=30,
                 gru_units=None,
                 num_layers=None,
                 norm_words=False):
        super(AttentiveTextEncoder, self).__init__()
        self.use_abs = use_abs
        self.embed_size = embed_size
        self.hops = hops
        self.att_units = att_units

        # word embedding
        self.embed = nn.Embedding(vocab_size, word_dim)

        self.attention = SelfAttentiveEncoder(nb_features=word_dim,
                                              att_units=att_units,
                                              hops=hops)

        att_out_size = word_dim * hops

        self.fc = False
        if att_out_size != embed_size:
            self.fc = nn.Linear(att_out_size, embed_size)

        global_initializer(self)
예제 #2
0
    def __init__(self,
                 vocab_size,
                 word_dim,
                 embed_size,
                 use_abs=False,
                 att_units=200,
                 hops=15,
                 gru_units=1024,
                 num_layers=1,
                 norm_words=None):

        super(GRUAttentiveTextEncoder, self).__init__()

        self.use_abs = use_abs
        self.embed_size = embed_size
        self.hops = hops
        self.att_units = att_units

        # word embedding
        self.embed = nn.Embedding(vocab_size, word_dim)
        self.norm_words = norm_words
        # caption embedding
        self.rnn = nn.GRU(word_dim, gru_units, num_layers, batch_first=True)

        self.attention = SelfAttentiveEncoder(nb_features=gru_units,
                                              att_units=att_units,
                                              hops=hops)

        self.fc = nn.Linear(gru_units * hops, embed_size)
        global_initializer(self)
예제 #3
0
    def __init__(self,
                 vocab_size,
                 word_dim,
                 embed_size,
                 use_abs=False,
                 att_units=300,
                 hops=30,
                 gru_units=None,
                 num_layers=1,
                 norm_words=None):
        super(ConvAttentiveTextEncoder, self).__init__()
        self.use_abs = use_abs
        self.embed_size = embed_size
        self.hops = hops
        self.att_units = att_units

        conv_filters = 100
        # word embedding
        self.embed = nn.Embedding(vocab_size, word_dim)

        self.conv1 = ConvBlock(
            in_channels=word_dim,
            out_channels=conv_filters,
            kernel_size=2,
            padding=1,
            activation='ReLU',
            batchnorm=True,
        )

        self.conv2 = ConvBlock(
            in_channels=word_dim,
            out_channels=conv_filters,
            kernel_size=3,
            padding=1,
            activation='ReLU',
            batchnorm=True,
        )

        self.att_emb = SelfAttentiveEncoder(nb_features=word_dim,
                                            att_units=att_units,
                                            hops=hops)

        self.att_conv1 = SelfAttentiveEncoder(nb_features=conv_filters,
                                              att_units=att_units,
                                              hops=hops)

        self.att_conv2 = SelfAttentiveEncoder(nb_features=conv_filters,
                                              att_units=att_units,
                                              hops=hops)

        self.fc = nn.Linear((conv_filters * 2 + word_dim) * hops, embed_size)
        global_initializer(self)