コード例 #1
0
def dev_step() -> float: 
    optimizer.zero_grad()
    mymodel.eval()
    new_corpus = []
    ent_span_losses = []
    ent_ids_losses = []
    rel_losses = []
    bin_rel_losses = []
    for k in range(0, len(dev_corpus), batch_size):
        batch = dev_corpus[k: k + batch_size]
        new_batch, outputs = step(batch)
        new_corpus.extend(new_batch)
        ent_span_losses.append(outputs['ent_span_loss'].item())
        ent_ids_losses.append(outputs['ent_ids_loss'].item())
        rel_losses.append(outputs['rel_loss'].item())
        bin_rel_losses.append(outputs['bin_rel_loss'].item())
    avg_ent_span_loss = np.mean(ent_span_losses)
    avg_ent_ids_loss = np.mean(ent_ids_losses)
    avg_rel_loss = np.mean(rel_losses)
    avg_bin_rel_loss = np.mean(bin_rel_losses)
    loss = avg_ent_span_loss + avg_ent_ids_loss + avg_rel_loss + avg_bin_rel_loss

    print("Epoch : %d Avg Loss : %.5f(%.5f, %.5f, %.5f, %.5f)" % (
          i, loss,
          avg_ent_span_loss, 
          avg_ent_ids_loss,
          avg_rel_loss,
          avg_bin_rel_loss))
    writer.add_scalar("Dev/Loss", loss, num_iter)
    writer.add_scalar("Dev/EntSpanLoss", avg_ent_span_loss, num_iter)
    writer.add_scalar("Dev/EntLoss", avg_ent_ids_loss, num_iter)
    writer.add_scalar("Dev/RelLoss", avg_rel_loss, num_iter)
    writer.add_scalar("Dev/BinRelLoss", avg_bin_rel_loss, num_iter)

    eval_path = os.path.join(config.save_dir, "validate.dev.output")
    eval_ent_span_path = os.path.join(config.save_dir, "validate.dev.output.ent_span")
    myutil.print_predictions(new_corpus, eval_path, vocab)
    myutil.print_ent_span_predictions(new_corpus, eval_ent_span_path, vocab)
    entity, relation= eval_file(eval_path)
    eval_file(eval_ent_span_path)


    writer.add_scalar("Dev/EntPrecision", entity.prec, num_iter)
    writer.add_scalar("Dev/EntRecall", entity.rec, num_iter)
    writer.add_scalar("Dev/EntFscore", entity.fscore, num_iter)
    writer.add_scalar("Dev/RelPrecision", relation.prec, num_iter)
    writer.add_scalar("Dev/RelRecall", relation.rec, num_iter)
    writer.add_scalar("Dev/RelFscore", relation.fscore, num_iter)
    return relation.fscore
コード例 #2
0
def dev_step(dev_tensors, batch_size):
    optimizer.zero_grad()
    mymodel.eval()
    predictions = []
    entity_losses = []
    relation_losses = []
    new_tensors = []
    all_ent_num = 0
    all_rel_num = 0
    for k in range(0, len(dev_tensors), batch_size):
        batch = dev_tensors[k:k + batch_size]
        entity_loss, relation_loss, pred_entity_tags, pred_rel_tags, X_len, candi_rel_num, batch = step(
            batch)
        all_rel_num += candi_rel_num
        all_ent_num += sum(X_len)
        predictions.extend(list(zip(pred_entity_tags, pred_rel_tags)))
        entity_losses.append(entity_loss.item())
        relation_losses.append(relation_loss.item())
        new_tensors.extend(batch)
    entity_loss = sum(entity_losses) / all_ent_num
    if all_rel_num == 0:
        relation_loss = 0
    else:
        relation_loss = sum(relation_losses) / all_rel_num
    loss = entity_loss + relation_loss

    print('Epoch : %d Minibatch : %d Loss : %.5f\t(%.5f, %.5f)' %
          (i, j, loss, entity_loss, relation_loss))

    eval_path = os.path.join(config.save_dir, "validate.dev.output")
    utils.print_predictions(new_tensors, predictions, eval_path, word_vocab,
                            chunk_vocab, rel_vocab)
    entity_score, relation_score = eval_file(eval_path)
    return relation_score
コード例 #3
0
ファイル: train.py プロジェクト: ystone1025/AntNRE
def dev_step() -> float: 
    optimizer.zero_grad()
    mymodel.eval()
    new_corpus = []
    ent_losses = []
    rel_losses = []
    for k in range(0, len(dev_corpus), batch_size):
        batch = dev_corpus[k: k + batch_size]
        new_batch, outputs = step(batch)
        new_corpus.extend(new_batch)
        ent_losses.append(outputs['ent_loss'].item())
        rel_losses.append(outputs['rel_loss'].item())
    avg_ent_loss = np.mean(ent_losses)
    avg_rel_loss = np.mean(rel_losses)
    loss = avg_ent_loss + avg_rel_loss

    print("Epoch : %d Minibatch : %d Avg Loss : %.5f\t(%.5f, %.5f)" % (i, j, loss, avg_ent_loss, avg_rel_loss))

    eval_path = os.path.join(config.save_dir, "validate.dev.output")
    myutil.print_predictions(new_corpus, eval_path, vocab)
    entity_score, relation_score = eval_file(eval_path)
    return relation_score
コード例 #4
0
    state_dict = torch.load(
            open(config.load_model_path_list[dom_id], 'rb'),
            map_location=lambda storage, loc: storage)
    cur_state_dict = mymodel.state_dict()
    for k in state_dict.keys():
      if k in cur_state_dict:
        cur_state_dict[k] = state_dict[k]
    mymodel.load_state_dict(cur_state_dict)
    print("loading previous model successful [%s]" % config.load_model_path_list[dom_id])

    #  print(ent_span_vocab.item2idx)
    #  print(dom2corpus[dom_id]['chunk_vocab'].item2idx)
    #  print(mymodel.ent2span[1].weight)
    #  print(mymodel.ent2span[1].bias)

    for title, tensors in zip( ["train", "dev", "test"],
                               [dom2corpus[dom_id]['train_tensors'],
                                dom2corpus[dom_id]['dev_tensors'],
                                dom2corpus[dom_id]['test_tensors']]):
        if title == "train": continue
        print("\nEvaluating %s" % title)
        predictions, new_tensors = predict_all(tensors, config.batch_size, dom_id)
        eval_path = os.path.join(config.save_dir, "final.%s.output.Domain_%d" % (title, dom_id))
        utils.print_predictions(new_tensors,
                                predictions,
                                eval_path,
                                word_vocab,
                                dom2corpus[dom_id]['chunk_vocab'],
                                dom2corpus[dom_id]['rel_vocab'])
        eval_file(eval_path)
コード例 #5
0
ファイル: test.py プロジェクト: yanqiuxia/joint_entrel_gcn
    sort_batch_tensor = myutil.get_minibatch(batch, vocab, config.use_cuda)
    outputs = mymodel(sort_batch_tensor)
    new_batch = create_batch_list(sort_batch_tensor, outputs)
    return new_batch


def predict_all(corpus) -> None:
    mymodel.eval()
    new_corpus = []
    for k in range(0, len(corpus), batch_size):
        print("[ %d / %d ]" % (len(corpus), min(len(corpus), k + batch_size)))
        batch = corpus[k:k + batch_size]
        new_batch = step(batch)
        new_corpus.extend(new_batch)
    return new_corpus


batch_size = config.batch_size
for title, corpus in zip(["train", "dev", "test"],
                         [train_corpus, dev_corpus, test_corpus]):
    if title == "train": continue
    print("\nEvaluating %s" % title)
    new_corpus = predict_all(corpus)
    eval_path = os.path.join(config.save_dir, "final.%s.output" % title)
    eval_ent_span_path = os.path.join(config.save_dir,
                                      "final.%s.output.ent_span" % title)
    myutil.print_ent_span_predictions(new_corpus, eval_ent_span_path, vocab)
    myutil.print_predictions(new_corpus, eval_path, vocab)
    eval_file(eval_ent_span_path)
    eval_file(eval_path)
コード例 #6
0
def dev_step(dev_tensors, batch_size, dom_id, i_epoch):
    optimizer.zero_grad()
    mymodel.eval()
    predictions = []
    ent_losses = []
    rel_losses = []
    if config.add_share_loss:
        ent_span_losses = []
        rel_bin_losses = []
        share_predictions = []
        if config.add_trans_loss:
            trans_losses = []
    new_tensors = []
    all_ent_num = 0
    all_rel_num = 0
    all_rel_bin_num = 0
    for k in range(0, len(dev_tensors), batch_size):
        batch = dev_tensors[k: k + batch_size]
        if config.add_share_loss:
            if config.add_trans_loss:
                (ent_loss, ent_span_loss, rel_loss, rel_bin_loss, trans_loss,
                pred_ent_tags, pred_ent_span_tags, pred_rel_tags, pred_rel_bin_tags,
                candi_rel_num, candi_rel_bin_num, X_len, batch) = step(batch, dom_id, i_epoch)
                trans_losses.append(trans_loss.item())
            else:
                (ent_loss, ent_span_loss, rel_loss, rel_bin_loss,
                pred_ent_tags, pred_ent_span_tags, pred_rel_tags, pred_rel_bin_tags,
                candi_rel_num, candi_rel_bin_num, X_len, batch) = step(batch, dom_id, i_epoch)
            all_rel_bin_num += candi_rel_bin_num
            ent_span_losses.append(ent_span_loss.item())
            rel_bin_losses.append(rel_bin_loss.item())
            share_predictions.extend(list(zip(pred_ent_span_tags, pred_rel_bin_tags)))
        else:
            (ent_loss, rel_loss, pred_ent_tags, pred_rel_tags, 
            candi_rel_num,  X_len, batch) = step(batch, dom_id, i_epoch)
        all_rel_num += candi_rel_num
        all_ent_num += sum(X_len)
        predictions.extend(list(zip(pred_ent_tags, pred_rel_tags)))
        ent_losses.append(ent_loss.item())
        rel_losses.append(rel_loss.item())
        new_tensors.extend(batch)
    ent_loss = sum(ent_losses) / all_ent_num
    if all_rel_num == 0:
        rel_loss = 0
    else:
        rel_loss = sum(rel_losses) / all_rel_num
    if config.add_share_loss:
        ent_span_loss = sum(ent_span_losses) / all_ent_num
        if all_rel_bin_num == 0:
            rel_bin_loss = 0
        else:
            rel_bin_loss = sum(rel_bin_losses) / all_rel_bin_num
        if config.add_trans_loss:
            trans_loss = sum(trans_losses) / len(trans_losses)
            loss = ent_loss + rel_loss + ent_span_loss + rel_bin_loss + trans_loss
            print('Domain : %d Epoch : %d Minibatch : %d Loss : %.5f\t(%.5f, %.5f, %.5f, %.5f, %.5f)' % (
                dom_id, i_epoch, i,
                loss,
                ent_loss, rel_loss,
                ent_span_loss, rel_bin_loss,
                trans_loss))
        else:
            loss = ent_loss + rel_loss + ent_span_loss + rel_bin_loss
            print('Domain : %d Epoch : %d Minibatch : %d Loss : %.5f\t(%.5f, %.5f, %.5f, %.5f)' % (
                dom_id, i_epoch, i,
                loss,
                ent_loss, rel_loss,
                ent_span_loss, rel_bin_loss))
    else:
        loss = ent_loss + rel_loss
        print('Domain : %d Epoch : %d Minibatch : %d Loss : %.5f\t(%.5f, %.5f)' % (
            dom_id, i_epoch, i,
            loss,
            ent_loss, rel_loss))

    eval_path = os.path.join(config.save_dir, "validate.dev.output.Domain_%d" % dom_id)
    share_eval_path = os.path.join(config.save_dir, "validate.dev.output.share.Domain_%d" % dom_id)
    utils.print_predictions(new_tensors,
                            predictions,
                            eval_path,
                            word_vocab,
                            dom2corpus[dom_id]['chunk_vocab'],
                            dom2corpus[dom_id]['rel_vocab'])
    entity_score, relation_score = eval_file(eval_path)
    if config.add_share_loss:
        print("Share Task Evaluation (Dev)...")
        utils.print_share_predictions(new_tensors,
                                      share_predictions,
                                      share_eval_path,
                                      word_vocab,
                                      dom2corpus[dom_id]['ent_span_vocab'])
        eval_file(share_eval_path)
    return relation_score