def f1_avg(y_pred, y_true):
    '''
    mission 1&2
    :param y_pred:
    :param y_true:
    :return:
    '''
    f1_micro = f1_score(y_pred=y_pred,
                        y_true=y_true,
                        pos_label=1,
                        average='micro')
    f1_macro = f1_score(y_pred=y_pred,
                        y_true=y_true,
                        pos_label=1,
                        average='macro')
    return (f1_micro + f1_macro) / 2
Exemple #2
0
 def get_f1(self):
     y_pred_top =  self.predict2top()
     y_pred_half = self.predict2half()
     y_pred_both = self.predict2both()
     if self.type == "top":
         f1_micro = f1_score(y_pred=y_pred_top, y_true=self.y_true, pos_label=1, average='micro')
         f1_macro = f1_score(y_pred=y_pred_top, y_true=self.y_true, pos_label=1, average='macro')
     elif self.type == "half":
         f1_micro = f1_score(y_pred=y_pred_half, y_true=self.y_true, pos_label=1, average='micro')
         f1_macro = f1_score(y_pred=y_pred_half, y_true=self.y_true, pos_label=1, average='macro')
     elif self.type == "both":
         f1_micro = f1_score(y_pred=y_pred_both, y_true=self.y_true, pos_label=1, average='micro')
         f1_macro = f1_score(y_pred=y_pred_both, y_true=self.y_true, pos_label=1, average='macro')
     return (f1_micro, f1_macro)
Exemple #3
0
def eval(tagger, data_loader, criterion):
    tagger.eval()
    preds_collection = []
    labels_collection = []
    total_loss = 0
    total_samples = 0
    for i, data in enumerate(data_loader):
        sentences, targets, labels = data
        sentences, targets, labels = sentences.cuda(), targets.cuda(
        ), labels.cuda()
        sentences = sentence_clip(sentences)
        targets = targets[:, 0:sentences.size(1)].contiguous()
        labels = labels[:, 0:sentences.size(1)].contiguous()
        logits = tagger(sentences, targets)
        logits = logits.view(-1, 2)
        preds = logits.argmax(dim=-1)
        labels = labels.view(-1)
        loss = criterion(logits, labels).item()
        batch_size = (labels != -1).long().sum().item()
        total_loss += loss * batch_size
        total_samples += batch_size
        preds_collection.append(preds)
        labels_collection.append(labels)
    preds_collection = torch.cat(preds_collection, dim=0)
    labels_collection = torch.cat(labels_collection, dim=0)
    mask = labels_collection != -1
    preds_collection = preds_collection.masked_select(mask).cpu().numpy()
    labels_collection = labels_collection.masked_select(mask).cpu().numpy()
    precision = precision_score(y_true=labels_collection,
                                y_pred=preds_collection,
                                labels=[0, 1],
                                average='macro')
    recall = recall_score(y_true=labels_collection,
                          y_pred=preds_collection,
                          labels=[0, 1],
                          average='macro')
    f1 = f1_score(y_true=labels_collection,
                  y_pred=preds_collection,
                  labels=[0, 1],
                  average='macro')
    loss = total_loss / total_samples
    return loss, precision, recall, f1
Exemple #4
0
def getresult(y_pred,test_label):
    f1_micro = f1_score(y_pred=y_pred, y_true=test_label, pos_label=1, average='micro')
    f1_macro = f1_score(y_pred=y_pred, y_true=test_label, pos_label=1, average='macro')
    return (f1_micro,f1_macro)
Exemple #5
0
 def score(self, y_pred, y):
     return f1_score(y.numpy(), y_pred.numpy(), average='micro')
Exemple #6
0
        loss = criterion(logits, labels)
        loss.backward()
        optimizer.step()
        batch_size = (labels != -1).long().sum().item()
        total_loss += batch_size * loss.item()
        total_samples += batch_size
        preds_collection.append(preds)
        labels_collection.append(labels)
        # if i % 100 == 0:
        #     print('[epoch %d] [step %d] [loss %.4f]' % (epoch, i, loss.item()))
    preds_collection = torch.cat(preds_collection, dim=0)
    labels_collection = torch.cat(labels_collection, dim=0)
    mask = labels_collection != -1
    preds_collection = preds_collection.masked_select(mask).cpu().numpy()
    labels_collection = labels_collection.masked_select(mask).cpu().numpy()
    train_precision = precision_score(y_true=labels_collection, y_pred=preds_collection, labels=[0, 1], average='macro')
    train_recall = recall_score(y_true=labels_collection, y_pred=preds_collection, labels=[0, 1], average='macro')
    train_f1 = f1_score(y_true=labels_collection, y_pred=preds_collection, labels=[0, 1], average='macro')
    train_loss = total_loss / total_samples
    print('epoch: %d\ttrain\tloss: %.4f\tprecision: %.4f\trecall: %.4f\tf1: %.4f' % (epoch, train_loss, train_precision, train_recall, train_f1))
    val_loss, val_precision, val_recall, val_f1 = eval(tagger, val_loader, criterion)
    print('epoch: %d\tval\tloss: %.4f\tprecision: %.4f\trecall: %.4f\tf1: %.4f\n' % (epoch, val_loss, val_precision, val_recall, val_f1))
    visualizer.add(epoch, train_loss, train_precision, train_recall, train_f1, val_loss, val_precision, val_recall, val_f1)
    if val_f1 > max_val_f1_score:
        max_val_f1_score = val_f1
        torch.save(tagger, save_path)
        print('save model')

print('max val f1_score: %.4f' % max_val_f1_score)

visualizer.plot()