예제 #1
0
    def __init__(self, vocab_num, embed_dim=100, bigram_vocab_num=None, bigram_embed_dim=100, num_bigram_per_char=None,
                 hidden_size=200, bidirectional=True, embed_drop_p=0.2, num_layers=1, tag_size=4):
        """
        默认使用BMES的标注方式
        :param vocab_num:
        :param embed_dim:
        :param bigram_vocab_num:
        :param bigram_embed_dim:
        :param num_bigram_per_char:
        :param hidden_size:
        :param bidirectional:
        :param embed_drop_p:
        :param num_layers:
        :param tag_size:
        """
        super(CWSBiLSTMCRF, self).__init__()

        self.tag_size = tag_size

        self.encoder_model = CWSBiLSTMEncoder(vocab_num, embed_dim, bigram_vocab_num, bigram_embed_dim, num_bigram_per_char,
                 hidden_size, bidirectional, embed_drop_p, num_layers)

        size_layer = [hidden_size, 200, tag_size]
        self.decoder_model = MLP(size_layer)
        allowed_trans = allowed_transitions({0:'b', 1:'m', 2:'e', 3:'s'}, encoding_type='bmes')
        self.crf = ConditionalRandomField(num_tags=tag_size, include_start_end_trans=False,
                                          allowed_transitions=allowed_trans)
예제 #2
0
파일: main.py 프로젝트: youtang1993/fastNLP
 def __init__(self, args=None):
     super(SELF_ATTENTION_YELP_CLASSIFICATION, self).__init__()
     self.embedding = Embedding((len(word2index), embeding_size))
     self.lstm = LSTM(input_size=embeding_size,
                      hidden_size=lstm_hidden_size,
                      bidirectional=True)
     self.attention = SelfAttention(lstm_hidden_size * 2,
                                    dim=attention_unit,
                                    num_vec=attention_hops)
     self.mlp = MLP(
         size_layer=[lstm_hidden_size * 2 * attention_hops, nfc, class_num])
예제 #3
0
    def __init__(self, vocab_num, embed_dim=100, bigram_vocab_num=None, bigram_embed_dim=100, num_bigram_per_char=None,
                 hidden_size=200, bidirectional=True, embed_drop_p=None, num_layers=1, tag_size=2):
        super(CWSBiLSTMSegApp, self).__init__()

        self.tag_size = tag_size

        self.encoder_model = CWSBiLSTMEncoder(vocab_num, embed_dim, bigram_vocab_num, bigram_embed_dim, num_bigram_per_char,
                 hidden_size, bidirectional, embed_drop_p, num_layers)

        size_layer = [hidden_size, 200, tag_size]
        self.decoder_model = MLP(size_layer)
 def __init__(self, init_embed,
              num_classes,
              hidden_dim=256,
              num_layers=1,
              attention_unit=256,
              attention_hops=1,
              nfc=128):
     super(BiLSTM_SELF_ATTENTION,self).__init__()
     self.embed = get_embeddings(init_embed)
     self.lstm = LSTM(input_size=self.embed.embedding_dim, hidden_size=hidden_dim, num_layers=num_layers, bidirectional=True)
     self.attention = SelfAttention(input_size=hidden_dim * 2 , attention_unit=attention_unit, attention_hops=attention_hops)
     self.mlp = MLP(size_layer=[hidden_dim* 2*attention_hops, nfc, num_classes])
예제 #5
0
 def __init__(self,
              init_embed,
              num_classes,
              hidden_dim=256,
              num_layers=1,
              nfc=128):
     super(BiLSTMSentiment, self).__init__()
     self.embed = encoder.Embedding(init_embed)
     self.lstm = LSTM(input_size=self.embed.embedding_dim,
                      hidden_size=hidden_dim,
                      num_layers=num_layers,
                      bidirectional=True)
     self.mlp = MLP(size_layer=[hidden_dim * 2, nfc, num_classes])
예제 #6
0
 def __init__(self,
              init_embed,
              num_classes,
              hidden_dim=256,
              num_layers=1,
              nfc=128,
              wdrop=0.5):
     super(AWDLSTMSentiment, self).__init__()
     self.embed = get_embeddings(init_embed)
     self.lstm = LSTM(input_size=self.embed.embedding_dim,
                      hidden_size=hidden_dim,
                      num_layers=num_layers,
                      bidirectional=True,
                      wdrop=wdrop)
     self.mlp = MLP(size_layer=[hidden_dim * 2, nfc, num_classes])