示例#1
0
def evaluate(model, test_data, step, vocab_srcs, vocab_tgts, config):
    model.eval()
    start_time = time.time()
    corrects, size = 0, 0

    # 初始化宏平均
    macro_averaging = {}
    for i in vocab_tgts.i2w:
        macro_averaging[i] = {'tp': 0, 'fn': 0, 'fp': 0}

    for idx, data in enumerate(test_data):
        if len(data.sentences) == 0:
            continue
        feature_words, feature_labels, target, feature_lengths = pair_data_variable(
            data, vocab_srcs, vocab_tgts, config)
        logit = model(feature_words, feature_labels, feature_lengths)
        correct = (torch.max(logit, 1)[1].view(
            target.size()).data == target.data).sum()
        corrects += correct
        size += 1

        # 统计宏平均
        gold = target.data
        predict = torch.max(logit, 1)[1].view(target.size()).data
        for idx, i in enumerate(gold):
            if predict[idx] == i:
                macro_averaging[vocab_tgts.id2word(i)]['tp'] += 1
            else:
                macro_averaging[vocab_tgts.id2word(i)]['fn'] += 1
                macro_averaging[vocab_tgts.id2word(predict[idx])]['fp'] += 1
    # 计算宏平均
    macro_precesion = 0
    macro_recall = 0
    count = 0
    for k in macro_averaging.keys():
        tp_fp = macro_averaging[k]['tp'] + macro_averaging[k]['fp']
        tp_fn = macro_averaging[k]['tp'] + macro_averaging[k]['fn']
        if tp_fp == 0:
            # 就是是?的时候,这个都不计算在内
            # 如果要计算,就是count += 1
            count += 1
        else:
            macro_precesion += macro_averaging[k]['tp'] / tp_fp
            macro_recall += macro_averaging[k]['tp'] / tp_fn
            count += 1
    macro_precesion = macro_precesion / count
    macro_recall = macro_recall / count
    macro_averaging_result = 2 * macro_precesion * macro_recall / (
        macro_precesion + macro_recall)

    accuracy = 100.0 * corrects / size
    during_time = float(time.time() - start_time)
    print("\nevaluate result: ")
    print(
        "Step:{}, accuracy:{:.4f}({}/{}), macro_averaging:{:.4f}, time:{:.2f}".
        format(step, accuracy, corrects, size, macro_averaging_result,
               during_time))
    model.train()
    return accuracy, macro_averaging_result
示例#2
0
def train(model, train_data, dev_data, test_data, vocab_srcs, vocab_tgts, config):
    # optimizer
    parameters = filter(lambda p: p.requires_grad, model.parameters())
    if config.learning_algorithm == 'sgd':
        optimizer = optim.SGD(parameters, lr=config.lr, weight_decay=config.weight_decay)
    elif config.learning_algorithm == 'adam':
        optimizer = optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)
    else:
        raise RuntimeError('Invalid optimizer method: ' + config.learning_algorithm)

    # train
    global_step = 0
    best_acc = 0
    print('\nstart training...')
    for iter in range(config.epochs):
        iter_start_time = time.time()
        print('Iteration: ' + str(iter))

        batch_num = int(np.ceil(len(train_data) / float(config.batch_size)))
        batch_iter = 0
        for batch in create_batch_iter(train_data, config.batch_size, shuffle=True):
            start_time = time.time()
            feature, target, starts, ends, feature_lengths = pair_data_variable(batch, vocab_srcs, vocab_tgts, config)
            model.train()
            optimizer.zero_grad()
            logit = model(feature, feature_lengths, starts, ends)
            loss = F.cross_entropy(logit, target)
            loss_value = loss.data.cpu().numpy()
            loss.backward()
            torch.nn.utils.clip_grad_norm(model.parameters(), config.clip_norm)
            optimizer.step()

            correct = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
            accuracy = 100.0 * correct / len(batch)

            during_time = float(time.time() - start_time)
            print("Step:{}, Iter:{}, batch:{}, accuracy:{:.4f}({}/{}), time:{:.2f}, loss:{:.6f}"
                  .format(global_step, iter, batch_iter, accuracy, correct, len(batch), during_time, loss_value[0]))

            batch_iter += 1
            global_step += 1

            if batch_iter % config.test_interval == 0 or batch_iter == batch_num:
                dev_acc = evaluate(model, dev_data, global_step, vocab_srcs, vocab_tgts, config)
                test_acc = evaluate(model, test_data, global_step, vocab_srcs, vocab_tgts, config)

                if dev_acc > best_acc:
                    print("Exceed best acc: history = %.2f, current = %.2f" % (best_acc, dev_acc))
                    best_acc = dev_acc
                    if os.path.exists(config.save_model_path):
                        pass
                    else:
                        os.makedirs(config.save_model_path)
                    if -1 < config.save_after <= iter:
                        torch.save(model.state_dict(), os.path.join(config.save_model_path,
                                                                    'model.' + str(global_step)))
        during_time = float(time.time() - iter_start_time)
        print('one iter using time: time:{:.2f}'.format(during_time))
示例#3
0
def evaluate(model, data, step, vocab_srcs, vocab_tgts, config):
    model.eval()
    start_time = time.time()
    corrects, size = 0, 0

    for batch in create_batch_iter(data, config.batch_size):
        feature, target, starts, ends, feature_lengths = pair_data_variable(batch,
                                    vocab_srcs, vocab_tgts, config)
        logit = model(feature, feature_lengths, starts, ends)
        correct = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
        corrects += correct
        size += len(batch)
    accuracy = 100.0 * corrects / size
    during_time = float(time.time() - start_time)
    print("\nevaluate result: ")
    print("Step:{}, accuracy:{:.4f}({}/{}), time:{:.2f}"
          .format(step, accuracy, corrects, size, during_time))
    model.train()
    return accuracy
示例#4
0
def evaluate(model, data, step, vocab_srcs, vocab_tgts, config):
    model.eval()
    start_time = time.time()
    predict_number, gold_number, correct_number = 0, 0, 0

    for batch in create_batch_iter(data, config.batch_size):
        feature, label, feature_lengths = pair_data_variable(
            batch, vocab_srcs, vocab_tgts, config)

        h = model(feature, feature_lengths)
        predict = model._viterbi_decode(h, feature_lengths)
        label = label.view(len(predict))
        for idx, value in enumerate(predict):
            if label.data[idx] != vocab_tgts.word2id('O'):
                gold_number += 1
            if value == vocab_tgts.word2id('O'):
                continue
            elif value == label.data[idx]:
                predict_number += 1
                correct_number += 1
            else:
                predict_number += 1
    if predict_number == 0:
        p = 0
    else:
        p = correct_number / predict_number
    if gold_number == 0:
        r = 0
    else:
        r = correct_number / gold_number
    if (p + r) == 0:
        f_score = 0
    else:
        f_score = 2 * p * r / (p + r)
    during_time = float(time.time() - start_time)
    print("\nevaluate result: ")
    print("Step:{}, f1:{:.4f}, time:{:.2f}".format(step, f_score, during_time))
    model.train()
    return f_score
示例#5
0
def train(model, train_data, dev_data, test_data, vocab_srcs, vocab_tgts,
          config):
    model.train()
    # optimizer
    parameters = filter(lambda p: p.requires_grad, model.parameters())
    if config.learning_algorithm == 'sgd':
        optimizer = optim.SGD(parameters,
                              lr=config.lr,
                              weight_decay=config.weight_decay)
    elif config.learning_algorithm == 'adagrad':
        optimizer = optim.Adagrad(parameters,
                                  lr=config.lr,
                                  weight_decay=config.weight_decay)
    elif config.learning_algorithm == 'adadelta':
        optimizer = optim.Adadelta(parameters,
                                   lr=config.lr,
                                   weight_decay=config.weight_decay)
    elif config.learning_algorithm == 'adam':
        optimizer = optim.Adam(parameters,
                               lr=config.lr,
                               weight_decay=config.weight_decay)
    else:
        raise RuntimeError("Invalid optim method: " +
                           config.learning_algorithm)

    # train
    global_step = 0
    best_f1 = 0
    print('\nstart training...')
    for iter in range(config.epochs):
        iter_start_time = time.time()
        print('Iteration: ' + str(iter))

        batch_num = int(numpy.ceil(len(train_data) / float(config.batch_size)))
        batch_iter = 0
        for batch in create_batch_iter(train_data,
                                       config.batch_size,
                                       shuffle=True):
            start_time = time.time()
            feature, target, feature_lengths = pair_data_variable(
                batch, vocab_srcs, vocab_tgts, config)

            optimizer.zero_grad()
            h_output = model(feature, feature_lengths)
            loss = model.get_loss(h_output, feature_lengths, target)
            loss_value = loss.data.cpu().numpy()
            loss.backward()
            optimizer.step()

            during_time = float(time.time() - start_time)
            print(
                "Step:{}, Iter:{}, batch:{}, time:{:.2f}, loss:{:.6f}".format(
                    global_step, iter, batch_iter, during_time, loss_value[0]))

            batch_iter += 1
            global_step += 1

            if batch_iter % config.test_interval == 0 or batch_iter == batch_num:
                if config.dev_file:
                    dev_f1 = evaluate(model, dev_data, global_step, vocab_srcs,
                                      vocab_tgts, config)
                if config.test_file:
                    test_f1 = evaluate(model, test_data, global_step,
                                       vocab_srcs, vocab_tgts, config)
                if config.dev_file:
                    if dev_f1 > best_f1:
                        print(
                            "Exceed best acc: history = %.2f, current = %.2f" %
                            (best_f1, dev_f1))
                        best_f1 = dev_f1
                        if os.path.exists(config.save_model_path):
                            pass
                        else:
                            os.makedirs(config.save_model_path)
                        if -1 < config.save_after <= iter:
                            torch.save(
                                model.state_dict(),
                                os.path.join(config.save_model_path,
                                             'model.' + str(global_step)))
                else:
                    if test_f1 > best_f1:
                        print(
                            "Exceed best acc: history = %.2f, current = %.2f" %
                            (best_f1, test_f1))
                        best_f1 = test_f1
                        if os.path.exists(config.save_model_path):
                            pass
                        else:
                            os.makedirs(config.save_model_path)
                        if -1 < config.save_after <= iter:
                            torch.save(
                                model.state_dict(),
                                os.path.join(config.save_model_path,
                                             'model.' + str(global_step)))
        during_time = float(time.time() - iter_start_time)
        print('one iter using time: time:{:.2f}'.format(during_time))