def __init__(self, params): super(BiencoderRanker, self).__init__() self.params = params # self.ctxt_bert=BertModel_new.from_pretrained(params['context_bert_model'],config=BertConfig_new.from_pretrained(params['context_bert_model'])) self.ctxt_bert = load_model('./model/bert-large-uncased', params['context_bert_model']) self.cand_encoder = wiki_encoder.WikiEncoderModule(params) # self.load_cand_encoder_state() self.context_encoder = BertEncoder( self.ctxt_bert, params["out_dim"], layer_pulled=params["pull_from_layer"], add_linear=params["add_linear"], ) self.mention_score = MentionScoresHead(bert_output_dim=768, score_method='qa_linear', max_mention_length=10) self.mention_loss = MentionLoss() # self.change_mention_embedding_dim = nn.Linear(768,1024) # self.change_mention_embedding_dim=nn.Sequential( # nn.Linear(768, 768), # nn.ReLU(), # nn.Dropout(0.1), # nn.Linear(768, 1024), # ) #冻结参数 for param in self.cand_encoder.parameters(): param.requires_grad = True
def __init__(self, params): super(BiencoderRanker, self).__init__() self.params = params self.ctxt_bert = BertModel_new.from_pretrained( params['context_bert_model'], config=BertConfig_new.from_pretrained( params['context_bert_model'])) self.cand_encoder = wiki_encoder.WikiEncoderModule(params) self.load_cand_encoder_state() self.context_encoder = BertEncoder( self.ctxt_bert, params["out_dim"], layer_pulled=params["pull_from_layer"], add_linear=params["add_linear"], ) self.mention_score = mention_detection.MentionScoresHead( bert_output_dim=768, score_method='qa_mlp', max_mention_length=10) self.mention_loss = mention_detection.MentionLoss() self.change_mention_embedding_dim = nn.Linear(768, 1024) #冻结参数 for param in self.cand_encoder.parameters(): param.requires_grad = False