def __init__(self, char_init_embed, word_init_embed, pos_init_embed, spo_embed_dim, sentence_length, hidden_size, num_classes, dropout=0.3, id2words=None, encoding_type='bieso', weight=None): super().__init__() # self.Embedding = nn.Embedding(init_embed) # print(char_init_embed) self.char_embed = nn.Embedding(char_init_embed[0], char_init_embed[1]) self.word_embed = nn.Embedding(word_init_embed[0], word_init_embed[1]) # word2vec self.word_embed.weight.data.copy_(torch.from_numpy(weight)) self.pos_embed = nn.Embedding(pos_init_embed[0], pos_init_embed[1]) # spo embed size: 50 self.embed_dim = self.char_embed.embedding_dim + self.word_embed.embedding_dim + self.pos_embed.embedding_dim + spo_embed_dim # sentence length #self.sen_len = sentence_length #self.zeros = torch.zeros(self.sen_len, dtype=torch.long) self.norm1 = torch.nn.LayerNorm(self.embed_dim) self.Rnn = nn.LSTM(input_size=self.embed_dim, hidden_size=hidden_size, num_layers=2, dropout=dropout, bidirectional=True, batch_first=True) self.Linear1 = nn.Linear(hidden_size * 2, hidden_size * 2 // 3) self.norm2 = torch.nn.LayerNorm(hidden_size * 2 // 3) self.relu = torch.nn.LeakyReLU() self.drop = torch.nn.Dropout(dropout) self.Linear2 = nn.Linear(hidden_size * 2 // 3, num_classes) if id2words is None: self.Crf = CRF(num_classes, include_start_end_trans=False) else: self.Crf = CRF(num_classes, include_start_end_trans=False, allowed_transitions=allowed_transitions(id2words, encoding_type=encoding_type))
def __init__(self, char_init_embed, word_init_embed, pos_init_embed, spo_embed_dim, num_classes, num_layers, inner_size, key_size, value_size, num_head, dropout=0.1, id2words=None, encoding_type='bieso'): super().__init__() # self.Embedding = nn.Embedding(init_embed) #print(char_init_embed) self.char_embed = nn.Embedding(char_init_embed[0], char_init_embed[1]) self.word_embed = nn.Embedding(word_init_embed[0], word_init_embed[1]) self.pos_embed = nn.Embedding(pos_init_embed[0], pos_init_embed[1]) # spo embed size: 50 self.embed_dim = self.char_embed.embedding_dim + self.word_embed.embedding_dim + self.pos_embed.embedding_dim + spo_embed_dim self.norm1 = torch.nn.LayerNorm(self.embed_dim) # self.Rnn = encoder.LSTM(input_size=self.embed_dim, hidden_size=hidden_size, num_layers=2, # dropout=dropout, bidirectional=True, batch_first=True) self.transformer = encoder.TransformerEncoder( num_layers=num_layers, model_size=self.embed_dim, inner_size=inner_size, key_size=key_size, value_size=value_size, num_head=num_head, dropout=dropout) self.Linear1 = nn.Linear(self.embed_dim, self.embed_dim // 3) self.norm2 = torch.nn.LayerNorm(self.embed_dim // 3) self.relu = torch.nn.LeakyReLU() self.drop = torch.nn.Dropout(dropout) self.Linear2 = nn.Linear(self.embed_dim // 3, num_classes) self.Linear = nn.Linear(self.embed_dim, num_classes) if id2words is None: self.Crf = CRF(num_classes, include_start_end_trans=False) else: self.Crf = CRF(num_classes, include_start_end_trans=False, allowed_transitions=allowed_transitions( id2words, encoding_type=encoding_type))
def __init__(self, char_init_embed, word_init_embed, pos_init_embed, spo_embed_dim, num_classes, num_layers, inner_size, key_size, value_size, num_head, dropout=0.1, id2words=None, encoding_type='bieso', weight=None): super().__init__() # self.Embedding = nn.Embedding(init_embed) #print(char_init_embed) self.char_embed = nn.Embedding(char_init_embed[0], char_init_embed[1]) self.word_embed = nn.Embedding(word_init_embed[0], word_init_embed[1]) self.word_embed.weight.data.copy_(torch.from_numpy(weight)) self.pos_embed = nn.Embedding(pos_init_embed[0], pos_init_embed[1]) # spo embed size: 50 self.embed_dim = self.char_embed.embedding_dim + self.word_embed.embedding_dim + self.pos_embed.embedding_dim + spo_embed_dim self.norm1 = torch.nn.LayerNorm(self.embed_dim) self.transformer = encoder.TransformerEncoder( num_layers=num_layers, model_size=self.embed_dim, inner_size=inner_size, key_size=key_size, value_size=value_size, num_head=num_head, dropout=dropout) self.Linear1 = nn.Linear(self.embed_dim, self.embed_dim // 3) self.norm2 = torch.nn.LayerNorm(self.embed_dim // 3) self.relu = torch.nn.LeakyReLU() self.drop = torch.nn.Dropout(dropout) self.Linear2 = nn.Linear(self.embed_dim // 3, num_classes) self.Linear = nn.Linear(self.embed_dim, num_classes) if id2words is None: self.Crf = CRF(num_classes, include_start_end_trans=False) else: self.Crf = CRF(num_classes, include_start_end_trans=False, allowed_transitions=allowed_transitions( id2words, encoding_type=encoding_type))