Пример #1
0
 def __init__(self, args=None):
     super(SELF_ATTENTION_YELP_CLASSIFICATION, self).__init__()
     self.embedding = Embedding((len(word2index), embeding_size))
     self.lstm = LSTM(input_size=embeding_size,
                      hidden_size=lstm_hidden_size,
                      bidirectional=True)
     self.attention = SelfAttention(lstm_hidden_size * 2,
                                    dim=attention_unit,
                                    num_vec=attention_hops)
     self.mlp = MLP(
         size_layer=[lstm_hidden_size * 2 * attention_hops, nfc, class_num])
 def __init__(self, init_embed,
              num_classes,
              hidden_dim=256,
              num_layers=1,
              attention_unit=256,
              attention_hops=1,
              nfc=128):
     super(BiLSTM_SELF_ATTENTION,self).__init__()
     self.embed = get_embeddings(init_embed)
     self.lstm = LSTM(input_size=self.embed.embedding_dim, hidden_size=hidden_dim, num_layers=num_layers, bidirectional=True)
     self.attention = SelfAttention(input_size=hidden_dim * 2 , attention_unit=attention_unit, attention_hops=attention_hops)
     self.mlp = MLP(size_layer=[hidden_dim* 2*attention_hops, nfc, num_classes])
Пример #3
0
 def __init__(self, config):
     super(BiLSTMCRF, self).__init__()
     vocab_size = config["vocab_size"]
     word_emb_dim = config["word_emb_dim"]
     hidden_dim = config["rnn_hidden_units"]
     num_classes = config["num_classes"]
     bi_direciton = config["bi_direction"]
     self.Embedding = Embedding(vocab_size, word_emb_dim)
     self.Lstm = LSTM(word_emb_dim, hidden_dim, bidirectional=bi_direciton)
     self.Linear = Linear(2*hidden_dim if bi_direciton else hidden_dim, num_classes)
     self.Crf = ConditionalRandomField(num_classes)
     self.mask = None
Пример #4
0
 def __init__(self,
              init_embed,
              num_classes,
              hidden_dim=256,
              num_layers=1,
              nfc=128):
     super(BiLSTMSentiment, self).__init__()
     self.embed = encoder.Embedding(init_embed)
     self.lstm = LSTM(input_size=self.embed.embedding_dim,
                      hidden_size=hidden_dim,
                      num_layers=num_layers,
                      bidirectional=True)
     self.mlp = MLP(size_layer=[hidden_dim * 2, nfc, num_classes])
Пример #5
0
    def __init__(self, char_emb_dim, word_emb_dim, vocab_size, num_char):
        super(CharLM, self).__init__()
        self.char_emb_dim = char_emb_dim
        self.word_emb_dim = word_emb_dim
        self.vocab_size = vocab_size

        # char attention layer
        self.char_embed = nn.Embedding(num_char, char_emb_dim)

        # convolutions of filters with different sizes
        self.convolutions = []

        # list of tuples: (the number of filter, width)
        self.filter_num_width = [(25, 1), (50, 2), (75, 3), (100, 4), (125, 5),
                                 (150, 6)]

        for out_channel, filter_width in self.filter_num_width:
            self.convolutions.append(
                nn.Conv2d(
                    1,  # in_channel
                    out_channel,  # out_channel
                    kernel_size=(char_emb_dim,
                                 filter_width),  # (height, width)
                    bias=True))

        self.highway_input_dim = sum([x for x, y in self.filter_num_width])

        self.batch_norm = nn.BatchNorm1d(self.highway_input_dim, affine=False)

        # highway net
        self.highway1 = Highway(self.highway_input_dim)
        self.highway2 = Highway(self.highway_input_dim)

        # LSTM
        self.lstm_num_layers = 2

        self.lstm = LSTM(self.highway_input_dim,
                         hidden_size=self.word_emb_dim,
                         num_layers=self.lstm_num_layers,
                         dropout=0.5)
        # output layer
        self.dropout = nn.Dropout(p=0.5)
        self.linear = nn.Linear(self.word_emb_dim, self.vocab_size)