def run(self, record): text_a, text_b = record[0], record[1] example = self.processor._create_single_example(text_a, text_b) feature = convert_single_example(example, 32, self.tokenizer) input_ids = torch.tensor(feature.input_ids, dtype=torch.long).unsqueeze(0) segment_ids = torch.tensor(feature.segment_ids, dtype=torch.long).unsqueeze(0) input_mask = torch.tensor(feature.input_mask, dtype=torch.long).unsqueeze(0) # ort_inputs = { # 'input_ids': self.to_numpy(input_ids), # 'segment_ids': self.to_numpy(segment_ids), # 'input_mask': self.to_numpy(input_mask) # } # print(input_ids) # print(segment_ids) # print(input_mask) ort_inputs = { 'input_ids': self.to_numpy(input_ids), 'segment_ids': self.to_numpy(segment_ids), 'input_mask': self.to_numpy(input_mask) } print(self.session) ort_outputs = self.session.run(None, ort_inputs) # print(ort_outputs) ort_logits = torch.from_numpy(ort_outputs[0]) print(ort_logits) # tensor([[4.7433, -4.5335]]) # ort_logits_1 = torch.from_numpy(ort_outputs_1[0]) # 二维向量 prob = ort_logits.sigmoid()[:, 1].tolist()[0] #[0.123] # print(ort_logits) return prob
def run(self, record): ''' 预测小类标签 ''' text_a, text_b = record[0], record[1] example = self.processor._create_single_example(text_a, text_b) feature = convert_single_example(example, self.max_seq_length, self.tokenizer) input_ids = torch.tensor(feature.input_ids, dtype=torch.long).unsqueeze(0).to( self.config.device) segment_ids = torch.tensor(feature.segment_ids, dtype=torch.long).unsqueeze(0).to( self.config.device) input_mask = torch.tensor(feature.input_mask, dtype=torch.long).unsqueeze(0).to( self.config.device) # print(input_ids) # print(segment_ids) # print(input_mask) logits = self.model(input_ids, segment_ids, input_mask).detach() # print(logits) prob = logits.sigmoid()[:, 1].tolist() #[0.123] # prob = torch.sigmoid(logits) # return prob[0].cpu().tolist()[0] return prob[0]
def infer(bert, nezha, query_A, query_B): text_a, text_b = query_A, query_B example = processor._create_single_example(text_a, text_b) feature = convert_single_example(example, max_seq_len, tokenizer) input_ids = torch.tensor(feature.input_ids, dtype=torch.long).unsqueeze(0) segment_ids = torch.tensor(feature.segment_ids, dtype=torch.long).unsqueeze(0) input_mask = torch.tensor(feature.input_mask, dtype=torch.long).unsqueeze(0) ort_inputs = { 'input_ids': to_numpy(input_ids), 'segment_ids': to_numpy(segment_ids), 'input_mask': to_numpy(input_mask) } ort_outputs_bert = bert.run(None, ort_inputs) ort_outputs_nezha = nezha.run(None, ort_inputs) ort_logits_bert = torch.from_numpy(ort_outputs_bert[0]) ort_logits_nezha = torch.from_numpy(ort_outputs_nezha[0]) res = torch.mean(torch.stack([ort_logits_bert, ort_logits_nezha]), 0) prob = res.sigmoid()[:, 1].tolist()[0] return prob
def infer(model, query_A, query_B): text_a, text_b = query_A, query_B example = processor._create_single_example(text_a, text_b) feature = convert_single_example(example, max_seq_len, tokenizer) input_ids = torch.tensor(feature.input_ids, dtype=torch.long).unsqueeze(0).to(config.device) input_mask = torch.tensor(feature.input_mask, dtype=torch.long).unsqueeze(0).to(config.device) segment_ids = torch.tensor(feature.segment_ids, dtype=torch.long).unsqueeze(0).to(config.device) logits = model(input_ids, segment_ids, input_mask).detach() prob = logits.sigmoid()[:, 1].tolist() #[0.123] return prob[0]
def infer(model, query_A, query_B): text_a, text_b = query_A, query_B example = processor._create_single_example(text_a, text_b) feature = convert_single_example(example, max_seq_len, tokenizer) input_ids = torch.tensor(feature.input_ids, dtype=torch.long).unsqueeze(0) segment_ids = torch.tensor(feature.segment_ids, dtype=torch.long).unsqueeze(0) input_mask = torch.tensor(feature.input_mask, dtype=torch.long).unsqueeze(0) ort_inputs = { 'input_ids': to_numpy(input_ids), 'segment_ids': to_numpy(segment_ids), 'input_mask': to_numpy(input_mask) } ort_outputs = model.run(None, ort_inputs) ort_logits = torch.from_numpy(ort_outputs[0]) prob = ort_logits.sigmoid()[:, 1].tolist()[0] #[0.123] return prob
def run(self, record): text_a, text_b = record[0], record[1] example = self.processor._create_single_example(text_a, text_b) feature = convert_single_example(example, self.max_seq_length, self.tokenizer) input_ids = torch.tensor(feature.input_ids, dtype=torch.long).unsqueeze(0) input_mask = torch.tensor(feature.input_mask, dtype=torch.long).unsqueeze(0) segment_ids = torch.tensor(feature.segment_ids, dtype=torch.long).unsqueeze(0) ort_inputs = { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } ort_outputs = self.session.run(None, ort_inputs) print(ort_outputs) print(type(ort_outputs))