def rel_test(self, seq2seq) -> Tuple[Tuple[float, float, float]]: predicts = [] gold = [] loss = 0.0 data = prepare.load_data(self.mode) if mode == 'test': data = prepare.test_process(data) else: data = prepare.process(data) data = data_prepare.Data(data, config.batch_size, config) for batch_i in tqdm(range(data.batch_number)): batch_data = data.next_batch(is_random=False) pred_action_list, pred_logits_list = self.test_step( batch_data, seq2seq) predicts.extend(pred_action_list) gold.extend(batch_data.all_triples) mean_loss = 0.0 if self.config.losstype == 1: ##1.原来################################### for t in range(seq2seq.decoder.decodelen): # print(pred_logits_list[t]) mean_loss = mean_loss + F.nll_loss( pred_logits_list[t], torch.from_numpy(batch_data.standard_outputs).to( self.device).to(torch.long)[:, t]) # print(pred_logits_list[t], # torch.from_numpy(batch_data.standard_outputs).to(self.device).to( # torch.long)[:, t]) # print(torch.from_numpy(batch_data.standard_outputs).to(self.device).to( # torch.long)[:, t],pred_logits_list[t].shape,F.nll_loss(pred_logits_list[t], # torch.from_numpy(batch_data.standard_outputs).to(self.device).to( # torch.long)[:, t]),loss) mean_loss /= pred_logits_list[0].shape[0] if (batch_i < 1000): loss += mean_loss loss /= 1000 f1, precision, recall = evaluation.compare(predicts, gold, self.config, show_rate=None, simple=True) (r_f1, r_precision, r_recall), (e_f1, e_precision, e_recall) = evaluation.rel_entity_compare( predicts, gold, self.config) return loss.item(), (f1, precision, recall), (r_f1, r_precision, r_recall), (e_f1, e_precision, e_recall)
def rel_test(self) -> Tuple[Tuple[float, float, float]]: predicts = [] gold = [] for batch_i in range(self.data.batch_number): batch_data = self.data.next_batch(is_random=False) pred_action_list, pred_logits_list = self.test_step(batch_data) pred_action_list = pred_action_list.cpu().numpy() predicts.extend(pred_action_list) gold.extend(batch_data.all_triples) (r_f1, r_precision, r_recall), (e_f1, e_precision, e_recall) = evaluation.rel_entity_compare(predicts, gold, self.config) self.data.reset() return (r_f1, r_precision, r_recall), (e_f1, e_precision, e_recall)
def train_step(self, batch: data_prepare.InputData) -> torch.Tensor: self.optimizer.zero_grad() sentence = batch.sentence_fw sentence_eos = batch.input_sentence_append_eos all_events = batch.standard_outputs all_triples = batch.all_triples all_events = torch.from_numpy(all_events).to(self.device).to( torch.long) sentence = torch.from_numpy(sentence).to(self.device) sentence_eos = torch.from_numpy(sentence_eos).to(self.device) lengths = torch.Tensor(batch.input_sentence_length).int().tolist() pred_action_list, pred_logits_list = self.seq2seq( sentence, sentence_eos, lengths) if self.config.losstype == 1: ##1.原来################################### loss = 0 for t in range(self.seq2seq.decoder.decodelen): # print(pred_logits_list[t]) loss = loss + self.loss(pred_logits_list[t], all_events[:, t]) # break (r_f1, r_precision, r_recall), (e_f1, e_precision, e_recall) = evaluation.rel_entity_compare( pred_action_list, batch.all_triples, self.config) f1, precision, recall = evaluation.compare(pred_action_list, batch.all_triples, self.config, show_rate=None, simple=True) loss.backward() self.optimizer.step() return loss, (f1, precision, recall), (r_f1, r_precision, r_recall), (e_f1, e_precision, e_recall)
def rel_test(self) -> Tuple[Tuple[float, float, float]]: predicts = [] gold = [] data = prepare.load_data(mode) if mode == 'test': data = prepare.test_process(data) else: data = prepare.process(data) data = data_prepare.Data(data, config.batch_size, config) for batch_i in range(data.batch_number): batch_data = data.next_batch(is_random=False) pred_action_list, pred_logits_list = self.test_step(batch_data) pred_action_list = pred_action_list.cpu().numpy() predicts.extend(pred_action_list) gold.extend(batch_data.all_triples) (r_f1, r_precision, r_recall), (e_f1, e_precision, e_recall) = evaluation.rel_entity_compare( predicts, gold, self.config) self.data.reset() return (r_f1, r_precision, r_recall), (e_f1, e_precision, e_recall)
def event_test(self, seq2seq) -> Tuple[Tuple[float, float, float]]: predicts = [] gold = [] data = prepare.load_data(self.mode) if mode == 'test': data = prepare.test_process(data) else: data = prepare.process(data) data = data_prepare.Data(data, config.batch_size, config) loss = 0.0 # Loss=nn.NLLLoss() for batch_i in tqdm(range(data.batch_number)): batch_data = data.next_batch(is_random=False) pred_action_list, pred_logits_list = self.test_step( batch_data, seq2seq) pred_action_list = pred_action_list.cpu().numpy() gold.extend(batch_data.standard_outputs) # for i in range() predicts.extend([ pred_action_list[:, i] for i in range(pred_action_list.shape[1]) ]) if decoder_type == 'onecrf': loss += pred_logits_list #crf的时候输出的是loss else: if self.config.losstype == 1: ##1.原来################################### for t in range(seq2seq.decoder.decodelen): # print(pred_logits_list[t]) loss = loss + F.nll_loss( pred_logits_list[t], torch.from_numpy(batch_data.standard_outputs).to( self.device).to(torch.long)[:, t]) elif self.config.losstype == 2: ##2.loss2,排列组合################################### all_events = torch.from_numpy( batch_data.standard_outputs).to(self.device).to( torch.long) all_triples = batch_data.all_triples lengths = batch_data.input_sentence_length # print(pred_logits_list) # print(pred_action_list) for i in range(all_events.shape[0]): # print(all_triples[i]) now_loss = 0. triple_num = min( len(all_triples[i]) // (lengths[i] + 1), self.config.triple_number) # print(pred_action_list[:self.max_sentence_length*triple_num,i].shape) # pred_logits_list_event[:1+triple_num,i] # print(pred_logits_list_entity[:self.max_sentence_length*triple_num,i].shape) for j in range(triple_num): glob = all_events[i, j * (self.max_sentence_length + 1):(j + 1) * (self.max_sentence_length + 1)] # print(glob.shape) for k in range(triple_num): # print(pred_logits_list.shape,pred_logits_list[k*(self.max_sentence_length+1):(k+1)*(self.max_sentence_length+1),i].shape) now_loss += F.nll_loss( pred_logits_list[ k * (self.max_sentence_length + 1):(k + 1) * (self.max_sentence_length + 1), i], glob) if (triple_num != 0): now_loss /= (triple_num * triple_num) # print(pred_logits_list[triple_num*(self.max_sentence_length+1)+1,i],all_events[i,triple_num*(self.max_sentence_length+1)+1]) loss += now_loss + F.nll_loss( pred_logits_list[ triple_num * (self.max_sentence_length + 1):triple_num * (self.max_sentence_length + 1) + 1, i], all_events[i, triple_num * (self.max_sentence_length + 1):triple_num * (self.max_sentence_length + 1) + 1]) # for t in range(seq2seq.decoder.decodelen): # # print(pred_logits_list[t], batch_data.standard_outputs[:, t]) # loss += F.nll_loss(pred_logits_list[t],torch.from_numpy(batch_data.standard_outputs).to(self.device).to(torch.long)[:, t]).item() # print(loss) loss /= batch_i # for g in gold: # for i in range(5): # l=g[i*(config.max_sentence_length+1):(i+1)*(config.max_sentence_length+1)] # if(l[0]>30): # print(l) require_f1, require_precision, require_recall = evaluation.event_entity_yaoqiu_compare( predicts, gold, self.config) f1, precision, recall = evaluation.compare(predicts, gold, self.config, show_rate=None, simple=True) (r_f1, r_precision, r_recall), (e_f1, e_precision, e_recall) = evaluation.rel_entity_compare( predicts, gold, self.config) (event_f1, event_precision, event_recall), (entity_f1, entity_precision, entity_recall) = evaluation.event_entity_compare( predicts, gold, self.config) data.reset() return loss.item(), (require_f1, require_precision, require_recall), ( f1, precision, r_recall), (r_f1, r_precision, r_recall), ( e_f1, e_precision, e_recall), (event_f1, event_precision, event_recall), (entity_f1, entity_precision, entity_recall)