示例#1
0
    def __init__(self, config, print_info: bool = True):
        super(NNCRF_sl, self).__init__()
        self.device = config.device
        self.encoder = BiLSTMEncoder(config, print_info=print_info)
        self.inferencer = LinearCRF(config, print_info=print_info)
        self.label2idx = config.label2idx
        self.idx2word = config.idx2word
        self.idx2labels = config.idx2labels
        self.Oid = self.label2idx['O']
        self.padid = self.label2idx['<PAD>']
        self.startid = self.label2idx['<START>']
        self.stopid = self.label2idx['<STOP>']

        self.pos_dic, self.type_dic = gen_dic(config.label2idx.keys(),
                                              self.label2idx)

        self.tags_num = len(self.idx2labels)
        e_type, pos = gen_embedding_table(self.idx2labels, self.type_dic,
                                          self.pos_dic)
        self.type_embedding = torch.nn.Embedding(
            self.tags_num,
            self.tags_num).from_pretrained(e_type,
                                           freeze=True).cuda(self.device)
        self.pos_embedding = torch.nn.Embedding(
            self.tags_num,
            self.tags_num).from_pretrained(pos, freeze=True).cuda(self.device)
示例#2
0
 def __init__(self, config: Config, print_info: bool = True):
     super(NNCRF, self).__init__()
     self.device = config.device
     self.encoder = BiLSTMEncoder(config, print_info=print_info)
     self.inferencer = None
     if config.use_crf_layer:
         self.inferencer = LinearCRF(config, print_info=print_info)
示例#3
0
文件: model.py 项目: luogan1234/HEAT
 def __init__(self, config):
     super().__init__(config)
     if config.text_encoder == 'bilstm':
         encoder = BiLSTMEncoder(config)
     if config.text_encoder in ['bert', 'bert_freeze']:
         encoder = BERTEncoder(config)
     if not config.remove_name:
         self.name_module = NameModule(config, encoder)
     if not config.remove_para or not config.remove_img:
         self.cross_modal_module = CrossModalModule(config, encoder)
     if config.fusion == 'concatenation':
         self.fc = nn.Linear(config.feature_dim, config.label_num)
     elif config.fusion == 'attention':  # single head (only 3 modules)
         self.ent_Q = nn.Parameter(
             self.weight_init(config.attention_dim, 'uniform'))
         self.ent_K = nn.Linear(config.module_embedding_dim,
                                config.attention_dim)
         self.ent_V = nn.Linear(config.module_embedding_dim,
                                config.module_embedding_dim)
         self.fc = nn.Linear(config.module_embedding_dim, config.label_num)
     else:
         self.fc = nn.ModuleList([
             nn.Linear(config.module_embedding_dim, config.label_num)
             for i in range(3)
         ])
     self.dropout = nn.Dropout(config.dropout_rate)
示例#4
0
 def __init__(self, config):
     super().__init__()
     if config.encoder == 'textcnn':
         self.encoder = TextCNNEncoder(config)
     if config.encoder == 'bilstm':
         self.encoder = BiLSTMEncoder(config)
     if config.encoder in ['bert', 'bert_freeze']:
         self.encoder = BERTEncoder(config)