Esempio n. 1
0
def eval_att(insts, insts_index, model, config, params):
    model.eval()
    # model_e.eval()
    insts, insts_index = utils.random_data(insts, insts_index)
    buckets, labels_raw, categorys_raw, target_start, target_end = params.generate_batch_buckets(
        len(insts), insts_index, char=params.add_char)

    size = len(insts)
    batch_length = np.array([np.sum(mask) for mask in buckets[0][-1]])
    fea_v, label_v, mask_v, length_v, target_v, start_v, end_v = utils.patch_var(
        buckets[0], batch_length.tolist(), categorys_raw, target_start,
        target_end, params)
    target_v = target_v.squeeze(0)
    start_v = start_v.squeeze(0)
    end_v = end_v.squeeze(0)

    if mask_v.size(0) != config.test_batch_size:
        model.hidden = model.init_hidden(mask_v.size(0), config.lstm_layers)
    else:
        model.hidden = model.init_hidden(config.test_batch_size,
                                         config.lstm_layers)

    logit = model.forward(fea_v, batch_length.tolist(), start_v, end_v,
                          label_v)
    micro_fscore, macro_fscore = calc_fscore(logit, target_v, size, params)

    return micro_fscore, macro_fscore
def eval(insts, encode, decode, config, params):
    encode.eval()
    decode.eval()
    insts = utils.random_instances(insts)
    buckets = params.generate_batch_buckets(len(insts), insts)

    batch_length = np.array([np.sum(mask) for mask in buckets[0][-1]])
    var_b, list_b, mask_v, length_v, gold_v = utils.patch_var(
        buckets[0], batch_length.tolist(), params)
    encode.zero_grad()
    decode.zero_grad()
    if mask_v.size(0) != config.test_batch_size:
        encode.hidden = encode.init_hidden(mask_v.size(0))
    else:
        encode.hidden = encode.init_hidden(config.test_batch_size)
    lstm_out = encode.forward(var_b, list_b, mask_v, batch_length.tolist())
    output, action = decode.forward(lstm_out,
                                    var_b,
                                    list_b,
                                    mask_v,
                                    batch_length.tolist(),
                                    is_train=False)
    ##### output: variable (batch_size, max_length, segpos_num)

    gold_index = list_b[1]

    f_score = evaluation_joint.eval_entity(gold_index, action, params)
    return f_score
def eval_batch(insts, encode, decode, config, params, eval_seg, eval_pos):
    encode.eval()
    decode.eval()
    insts = utils.random_instances(insts)
    buckets = params.generate_batch_buckets(config.test_batch_size, insts)

    gold_total = []
    pos_total = []
    word_total = []
    action_total = []
    eval_gold = []
    eval_poslabels = []
    for index in range(len(buckets)):
        batch_length = np.array([np.sum(mask) for mask in buckets[index][-1]])
        var_b, list_b, mask_v, length_v, gold_v = utils.patch_var(
            buckets[index], batch_length.tolist(), params)
        encode.zero_grad()
        decode.zero_grad()
        if mask_v.size(0) != config.test_batch_size:
            encode.hidden = encode.init_hidden(mask_v.size(0))
        else:
            encode.hidden = encode.init_hidden(config.test_batch_size)
        lstm_out = encode.forward(var_b, list_b, mask_v, batch_length.tolist())
        output, state = decode.forward(lstm_out,
                                       var_b,
                                       list_b,
                                       mask_v,
                                       batch_length.tolist(),
                                       is_train=False)

        gold_index = list_b[1]
        pos_index = list_b[2]
        word_index = list_b[-1]

        gold_total.extend(gold_index)
        pos_total.extend(pos_index)
        word_total.extend(word_index)

        for id in range(mask_v.size(0)):
            eval_gold.append(state[id].words)
            eval_poslabels.append(state[id].pos_labels)

    for idx in range(len(gold_total)):
        eval_seg, eval_pos = jointPRF(word_total[idx], pos_total[idx],
                                      eval_gold[idx], eval_poslabels[idx],
                                      eval_seg, eval_pos)
    p, r, f = eval_seg.getFscore()
    fscore_seg = f
    print('seg eval: precision = ', str(p), '%, recall = ', str(r),
          '%, f-score = ', str(f), "%")
    p, r, f = eval_pos.getFscore()
    fscore_pos = f
    print('pos eval: precision = ', str(p), '%, recall = ', str(r),
          '%, f-score = ', str(f), "%")

    # fscore_seg, fscore_pos = evaluation_joint.eval_entity(gold_total, action_total, params)
    return fscore_seg, fscore_pos
Esempio n. 4
0
def train_ner(train_insts, train_insts_index, dev_insts, dev_insts_index,
              test_insts, test_insts_index, model, crf_layer, config, params):
    print('training...')
    parameters = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = torch.optim.SGD(params=parameters,
                                lr=config.learning_rate,
                                momentum=0.9,
                                weight_decay=config.decay)
    best_f1 = float('-inf')

    for epoch in range(config.maxIters):
        start_time = time.time()
        model.train()
        train_insts, train_insts_index = utils.random_data(
            train_insts, train_insts_index)
        epoch_loss = 0
        train_buckets, train_labels_raw = params.generate_batch_buckets(
            config.train_batch_size, train_insts_index, char=params.add_char)

        for index in range(len(train_buckets)):
            batch_length = np.array(
                [np.sum(mask) for mask in train_buckets[index][-1]])

            fea_v, label_v, mask_v, length_v = utils.patch_var(
                train_buckets[index], batch_length.tolist(), params)
            model.zero_grad()
            if mask_v.size(0) != config.train_batch_size:
                model.hidden = model.init_hidden(mask_v.size(0))
            else:
                model.hidden = model.init_hidden(config.train_batch_size)
            emit_scores = model.forward(fea_v, batch_length.tolist())

            loss = crf_layer.forward(emit_scores, label_v, mask_v)
            loss.backward()
            # nn.utils.clip_grad_norm(model.parameters(), config.clip_grad)
            optimizer.step()
            epoch_loss += to_scalar(loss)
        print('\nepoch is {}, average loss is {} '.format(
            epoch,
            (epoch_loss / (config.train_batch_size * len(train_buckets)))))
        # update lr
        # adjust_learning_rate(optimizer, config.learning_rate / (1 + (epoch + 1) * config.decay))
        print('Dev...')
        dev_f1 = eval(dev_insts, dev_insts_index, model, crf_layer, config,
                      params)
        if dev_f1 > best_f1:
            best_f1 = dev_f1
            print('\nTest...')
            test_f1 = eval(test_insts, test_insts_index, model, crf_layer,
                           config, params)
        print('now, best fscore is: ', best_f1)
Esempio n. 5
0
def eval_batch(insts, encode, decode, config, params):
    encode.eval()
    decode.eval()
    insts = utils.random_instances(insts)
    buckets = params.generate_batch_buckets(config.test_batch_size, insts)

    gold_total = []
    pos_total = []
    word_total = []
    action_total = []
    eval_gold = []
    eval_poslabels = []
    for index in range(len(buckets)):
        batch_length = np.array([np.sum(mask) for mask in buckets[index][-1]])
        var_b, list_b, mask_v, length_v, gold_v = utils.patch_var(buckets[index], batch_length.tolist(), params)
        encode.zero_grad()
        decode.zero_grad()
        if mask_v.size(0) != config.test_batch_size:
            encode.hidden = encode.init_hidden(mask_v.size(0))
        else:
            encode.hidden = encode.init_hidden(config.test_batch_size)
        lstm_out = encode.forward(var_b, list_b, mask_v, batch_length.tolist())
        output, state = decode.forward(lstm_out, var_b, list_b, mask_v, batch_length.tolist())

        gold_index = list_b[1]
        pos_index = list_b[2]
        word_index = list_b[-1]

        gold_total.extend(gold_index)
        pos_total.extend(pos_index)
        word_total.extend(word_index)

        action_total.extend(state.action)
        eval_gold.extend(state.words_record)
        eval_poslabels.extend(state.pos_record)

    fscore_seg, fscore_pos = evaluation_joint.eval_entity(gold_total, action_total, params)
    return fscore_seg, fscore_pos
Esempio n. 6
0
def eval(insts, insts_index, model, crf_layer, config, params):
    model.eval()
    insts, insts_index = utils.random_data(insts, insts_index)
    buckets, labels_raw = params.generate_batch_buckets(len(insts),
                                                        insts_index,
                                                        char=params.add_char)
    batch_length = np.array([np.sum(mask) for mask in buckets[0][-1]])
    fea_v, label_v, mask_v, length_v = utils.patch_var(buckets[0],
                                                       batch_length.tolist(),
                                                       params)
    model.zero_grad()
    if mask_v.size(0) != config.test_batch_size:
        model.hidden = model.init_hidden(mask_v.size(0))
    else:
        model.hidden = model.init_hidden(config.test_batch_size)
    emit_scores = model.forward(fea_v, batch_length.tolist())
    predict_path = crf_layer.viterbi_decode(emit_scores, mask_v)
    ##### predict_path: variable (seq_length, batch_size)

    # f_score = evaluation.eval_entity(label_v.data.tolist(), predict_path.transpose(0, 1).data.tolist(), params)
    f_score = evaluation.eval_entity(
        labels_raw[0],
        predict_path.transpose(0, 1).data.tolist(), params)
    return f_score
def train_segpos(train_insts, dev_insts, test_insts, encode, decode, config,
                 params):
    print('training...')
    parameters_en = filter(lambda p: p.requires_grad, encode.parameters())
    # optimizer_en = torch.optim.SGD(params=parameters_en, lr=config.learning_rate, momentum=0.9, weight_decay=config.decay)
    optimizer_en = torch.optim.Adam(params=parameters_en,
                                    lr=config.learning_rate,
                                    weight_decay=config.decay)
    parameters_de = filter(lambda p: p.requires_grad, decode.parameters())
    # optimizer_de = torch.optim.SGD(params= parameters_de, lr=config.learning_rate, momentum=0.9, weight_decay=config.decay)
    optimizer_de = torch.optim.Adam(params=parameters_de,
                                    lr=config.learning_rate,
                                    weight_decay=config.decay)

    best_dev_f1_seg = float('-inf')
    best_dev_f1_pos = float('-inf')
    best_test_f1_seg = float('-inf')
    best_test_f1_pos = float('-inf')

    dev_eval_seg = Eval()
    dev_eval_pos = Eval()
    test_eval_seg = Eval()
    test_eval_pos = Eval()
    for epoch in range(config.maxIters):
        start_time = time.time()
        encode.train()
        decode.train()
        train_insts = utils.random_instances(train_insts)
        epoch_loss = 0
        train_buckets = params.generate_batch_buckets(config.train_batch_size,
                                                      train_insts)

        for index in range(len(train_buckets)):
            batch_length = np.array(
                [np.sum(mask) for mask in train_buckets[index][-1]])

            var_b, list_b, mask_v, length_v, gold_v = utils.patch_var(
                train_buckets[index], batch_length.tolist(), params)
            encode.zero_grad()
            decode.zero_grad()
            if mask_v.size(0) != config.train_batch_size:
                encode.hidden = encode.init_hidden(mask_v.size(0))
            else:
                encode.hidden = encode.init_hidden(config.train_batch_size)
            lstm_out = encode.forward(var_b, list_b, mask_v,
                                      batch_length.tolist())
            output, state = decode.forward(lstm_out,
                                           var_b,
                                           list_b,
                                           mask_v,
                                           batch_length.tolist(),
                                           is_train=True)
            #### output: variable (batch_size, max_length, segpos_num)

            # num_total = output.size(0)*output.size(1)
            # output = output.contiguous().view(num_total, output.size(2))
            # print(output)
            # gold_v = gold_v.view(num_total)
            # print(output)
            gold_v = gold_v.view(output.size(0))
            # print(gold_v)
            loss = F.cross_entropy(output, gold_v)
            loss.backward()

            nn.utils.clip_grad_norm(parameters_en, max_norm=config.clip_grad)
            nn.utils.clip_grad_norm(parameters_de, max_norm=config.clip_grad)

            optimizer_en.step()
            optimizer_de.step()
            epoch_loss += utils.to_scalar(loss)
        print('\nepoch is {}, average loss is {} '.format(
            epoch,
            (epoch_loss / (config.train_batch_size * len(train_buckets)))))
        # update lr
        # adjust_learning_rate(optimizer, config.learning_rate / (1 + (epoch + 1) * config.decay))
        # acc = float(correct_num) / float(gold_num)
        # print('\nepoch is {}, accuracy is {}'.format(epoch, acc))
        print('the {} epoch training costs time: {} s '.format(
            epoch,
            time.time() - start_time))

        print('\nDev...')
        dev_eval_seg.clear()
        dev_eval_pos.clear()
        test_eval_seg.clear()
        test_eval_pos.clear()

        start_time = time.time()
        dev_f1_seg, dev_f1_pos = eval_batch(dev_insts, encode, decode, config,
                                            params, test_eval_seg,
                                            test_eval_pos)
        print('the {} epoch dev costs time: {} s'.format(
            epoch,
            time.time() - start_time))

        if dev_f1_seg > best_dev_f1_seg:
            best_dev_f1_seg = dev_f1_seg
            if dev_f1_pos > best_dev_f1_pos: best_dev_f1_pos = dev_f1_pos
            print('\nTest...')
            start_time = time.time()
            test_f1_seg, test_f1_pos = eval_batch(test_insts, encode, decode,
                                                  config, params,
                                                  test_eval_seg, test_eval_pos)
            print('the {} epoch testing costs time: {} s'.format(
                epoch,
                time.time() - start_time))

            if test_f1_seg > best_test_f1_seg:
                best_test_f1_seg = test_f1_seg
            if test_f1_pos > best_test_f1_pos:
                best_test_f1_pos = test_f1_pos
            print(
                'now, test fscore of seg is {}, test fscore of pos is {}, best test fscore of seg is {}, best fscore of pos is {} '
                .format(test_f1_seg, test_f1_pos, best_test_f1_seg,
                        best_test_f1_pos))
            torch.save(encode.state_dict(), config.save_encode_path)
            torch.save(decode.state_dict(), config.save_decode_path)
        else:
            if dev_f1_pos > best_dev_f1_pos:
                best_dev_f1_pos = dev_f1_pos
                print('\nTest...')
                start_time = time.time()
                test_f1_seg, test_f1_pos = eval_batch(test_insts, encode,
                                                      decode, config, params,
                                                      test_eval_seg,
                                                      test_eval_pos)
                print('the {} epoch testing costs time: {} s'.format(
                    epoch,
                    time.time() - start_time))

                if test_f1_seg > best_test_f1_seg:
                    best_test_f1_seg = test_f1_seg
                if test_f1_pos > best_test_f1_pos:
                    best_test_f1_pos = test_f1_pos
                print(
                    'now, test fscore of seg is {}, test fscore of pos is {}, best test fscore of seg is {}, best fscore of pos is {} '
                    .format(test_f1_seg, test_f1_pos, best_test_f1_seg,
                            best_test_f1_pos))
                torch.save(encode.state_dict(), config.save_encode_path)
                torch.save(decode.state_dict(), config.save_decode_path)
        print(
            'now, dev fscore of seg is {}, dev fscore of pos is {}, best dev fscore of seg is {}, best dev fscore of pos is {}, best test fscore of seg is {}, best test fscore of pos is {}'
            .format(dev_f1_seg, dev_f1_pos, best_dev_f1_seg, best_test_f1_pos,
                    best_test_f1_seg, best_test_f1_pos))
Esempio n. 8
0
def train_att(train_insts, train_insts_index, dev_insts, dev_insts_index,
              test_insts, test_insts_index, model_att, config, params):
    print('training...')
    # parameters = filter(lambda p: p.requires_grad, model.parameters())
    parameters_att = filter(lambda p: p.requires_grad, model_att.parameters())
    # optimizer = torch.optim.Adam(params=parameters, lr=config.learning_rate, weight_decay=config.decay)
    # optimizer_att = torch.optim.Adam(params=parameters_att, lr=config.learning_rate, weight_decay=config.decay)
    # optimizer_att = torch.optim.SGD(params=parameters_att, lr=config.learning_rate, momentum=0.9)
    optimizer_att = torch.optim.SGD(params=parameters_att,
                                    lr=config.learning_rate,
                                    momentum=0.9,
                                    weight_decay=config.decay)
    #optimizer_att = torch.optim.Adagrad(params=parameters_att, lr=config.learning_rate, weight_decay=config.decay)

    best_micro_f1 = float('-inf')
    best_macro_f1 = float('-inf')

    for epoch in range(config.maxIters):
        start_time = time.time()
        model_att.train()
        train_insts, train_insts_index = utils.random_data(
            train_insts, train_insts_index)
        epoch_loss_e = 0
        train_buckets, train_labels_raw, train_category_raw, train_target_start, train_target_end = params.generate_batch_buckets(
            config.train_batch_size, train_insts_index, char=params.add_char)

        for index in range(len(train_buckets)):
            batch_length = np.array(
                [np.sum(mask) for mask in train_buckets[index][-1]])
            fea_v, label_v, mask_v, length_v, target_v, start_v, end_v = utils.patch_var(
                train_buckets[index], batch_length.tolist(),
                train_category_raw[index], train_target_start[index],
                train_target_end[index], params)
            model_att.zero_grad()

            if mask_v.size(0) != config.train_batch_size:
                model_att.hidden = model_att.init_hidden(
                    mask_v.size(0), config.lstm_layers)
            else:
                model_att.hidden = model_att.init_hidden(
                    config.train_batch_size, config.lstm_layers)

            logit = model_att.forward(fea_v, batch_length.tolist(), start_v,
                                      end_v, label_v)
            # print(target_v)
            loss_e = F.cross_entropy(logit, target_v)
            loss_e.backward()
            optimizer_att.step()
            epoch_loss_e += to_scalar(loss_e)

            # # nn.utils.clip_grad_norm(model.parameters(), config.clip_grad)
        # print('\nepoch is {}, average loss_c is {} '.format(epoch, (epoch_loss_c / config.train_batch_size)))
        print('\nepoch is {}, average loss_e is {} '.format(
            epoch,
            (epoch_loss_e / (config.train_batch_size * len(train_buckets)))))
        # update lr
        adjust_learning_rate(
            optimizer_att,
            config.learning_rate / (1 + (epoch + 1) * config.decay))
        print('Dev...')
        # dev_acc, dev_f1 = eval_att_single(dev_insts, dev_insts_index, model_att, config, params)
        # if dev_f1 > best_f1 or dev_acc > best_acc:
        #     if dev_f1 > best_f1: best_f1 = dev_f1
        #     if dev_acc > best_acc: best_acc = dev_acc
        #     print('\nTest...')
        #     test_acc, test_f1 = eval_att_single(test_insts, test_insts_index, model_att, config, params)
        # print('now, best fscore is {}, best accuracy is {}'.format(best_f1, best_acc))
        dev_micro_fscore, dev_macro_fscore = eval_att(dev_insts,
                                                      dev_insts_index,
                                                      model_att, config,
                                                      params)
        if dev_micro_fscore > best_micro_f1:
            best_micro_f1 = dev_micro_fscore
            # print('\nTest...')
            # test_acc = eval_att(test_insts, test_insts_index, model_att, config, params)
        if dev_macro_fscore > best_macro_f1:
            best_macro_f1 = dev_macro_fscore
        print('now, best micro fscore is {}%, best macro fscore is {}%'.format(
            best_micro_f1, best_macro_f1))
def eval_att_nobatch(insts, insts_index, model, config, params):
    model.eval()
    insts, insts_index = utils.random_data(insts, insts_index)
    buckets, labels_raw, categorys_raw, target_start, target_end = params.generate_batch_buckets(
        config.train_batch_size, insts_index, char=params.add_char)

    size = len(insts)
    logit_total = []
    target_total = []
    for index in range(size):
        batch_length = np.array([np.sum(mask) for mask in buckets[index][-1]])
        fea_v, label_v, pos_v, parse_v, rel_v, mask_v, length_v, target_v, start_v, end_v = utils.patch_var(
            buckets[index], batch_length.tolist(), categorys_raw[index],
            target_start[index], target_end[index], params)

        target_v = target_v.squeeze(0)
        start_v = start_v.squeeze(0)
        end_v = end_v.squeeze(0)

        logit, _ = model.forward(fea_v, parse_v, rel_v, label_v, pos_v,
                                 batch_length.tolist(), start_v, end_v, mask_v)
        logit_total.append(logit)
        target_total.append(target_v)
    logit_total = torch.cat(logit_total, 0)
    target_total = torch.cat(target_total, 0)
    ##### logit_total: variable(size, category_num)
    ##### target_total: variable(size)

    micro_fscore, macro_fscore = calc_fscore(logit_total, target_total, size,
                                             params)
    return micro_fscore, macro_fscore
def train_att(train_insts, train_insts_index, dev_insts, dev_insts_index,
              test_insts, test_insts_index, model_att, config, params):
    print('training...')
    # parameters = filter(lambda p: p.requires_grad, model.parameters())
    parameters_att = filter(lambda p: p.requires_grad, model_att.parameters())
    # optimizer = torch.optim.Adam(params=parameters, lr=config.learning_rate, weight_decay=config.decay)
    optimizer_att = torch.optim.Adam(params=parameters_att,
                                     lr=config.learning_rate,
                                     weight_decay=config.decay)
    best_micro_f1 = float('-inf')
    best_macro_f1 = float('-inf')
    # fl = FocalLoss(class_num=params.label_num)

    for epoch in range(config.maxIters):
        correct = total = 0
        model_att.train()
        train_insts, train_insts_index = utils.random_data(
            train_insts, train_insts_index)

        epoch_loss_e = 0
        train_buckets, train_labels_raw, train_category_raw, train_target_start, train_target_end = params.generate_batch_buckets(
            config.train_batch_size, train_insts_index, char=params.add_char)

        for index in range(len(train_buckets)):
            batch_length = np.array(
                [np.sum(mask) for mask in train_buckets[index][-1]])
            fea_v, label_v, pos_v, parse_v, rel_v, mask_v, length_v, target_v, start_v, end_v = utils.patch_var(
                train_buckets[index], batch_length.tolist(),
                train_category_raw[index], train_target_start[index],
                train_target_end[index], params)
            model_att.zero_grad()

            out, loss_e = model_att.forward(fea_v, parse_v, rel_v,
                                            label_v, pos_v,
                                            batch_length.tolist(), start_v,
                                            end_v, mask_v)

            loss_e.backward()
            optimizer_att.step()
            epoch_loss_e += to_scalar(loss_e)

            # pre = to_scalar(torch.max(out, dim=1)[1])
            # tar = to_scalar(target_v)
            #
            # if pre == tar:
            #     correct += 1
            # total += 1
            # print('sentence is {}, loss is {}'.format(index, to_scalar(loss_e)))

        # print('\nepoch is {}, average loss_c is {} '.format(epoch, (epoch_loss_c / config.train_batch_size)))
        print('\nepoch is {}, average loss_e is {}, train_accuracy is {} '.
              format(epoch, (epoch_loss_e /
                             (config.train_batch_size * len(train_buckets))),
                     (correct / total)))

        print('Dev...')
        dev_micro, dev_macro = eval_att_nobatch(dev_insts, dev_insts_index,
                                                model_att, config, params)
        if dev_micro > best_micro_f1:
            best_micro_f1 = dev_micro
            # print('\nTest...')
            # test_acc = eval_att(test_insts, test_insts_index, model_att, config, params)
        if dev_macro > best_macro_f1:
            best_macro_f1 = dev_macro
        print('now, best micro fscore is {}, best macro fscore is {}'.format(
            best_micro_f1, best_macro_f1))
def eval_att(insts, insts_index, model, config, params):
    model.eval()
    insts, insts_index = utils.random_data(insts, insts_index)
    buckets, labels_raw, categorys_raw, target_start, target_end = params.generate_batch_buckets(
        len(insts), insts_index, char=params.add_char)

    size = len(insts)
    batch_length = np.array([np.sum(mask) for mask in buckets[0][-1]])
    fea_v, label_v, pos_v, parse_v, rel_v, mask_v, length_v, target_v, start_v, end_v = utils.patch_var(
        buckets[0], batch_length.tolist(), categorys_raw, target_start,
        target_end, params)

    target_v = target_v.squeeze(0)
    start_v = start_v.squeeze(0)
    end_v = end_v.squeeze(0)

    # if mask_v.size(0) != config.test_batch_size:
    #     model.hidden = model.init_hidden(mask_v.size(0), config.lstm_layers)
    # else:
    #     model.hidden = model.init_hidden(config.test_batch_size, config.lstm_layers)
    # if mask_v.size(0) != config.test_batch_size:
    #     model_e.hidden = model_e.init_hidden(mask_v.size(0), config.lstm_layers)
    # else:
    #     model_e.hidden = model_e.init_hidden(config.test_batch_size, config.lstm_layers)
    # fea_v, parse_v, rel_v, label_v, pos_v, batch_length.tolist(), start_v, end_v, mask_v
    logit = model.forward(fea_v, parse_v, rel_v, label_v, pos_v,
                          batch_length.tolist(), start_v, end_v, mask_v)
    ##### lstm_out: (seq_length, batch_size, label_num)
    ##### label_v: (batch_size, seq_length)
    # lstm_out_e, lstm_out_h = model_e.forward(fea_v, batch_length.tolist())

    # max_index = torch.max(logit, dim=1)[1].view(target_v.size())
    # rel_list = [[], [], [], []]
    # pre_list = [[], [], [], []]
    # corrects_list = [[], [], [], []]
    #
    # corrects = 0
    # for x in range(max_index.size(0)):
    #     y = int(params.category_alphabet.id2word[to_scalar(target_v[x])]) - 1
    #     rel_list[y].append(1)
    #     # print(to_scalar(max_index[x]) == to_scalar(target_v[x]))
    #     # print(type(to_scalar(max_index[x]) == to_scalar(target_v[x])))
    #     if to_scalar(max_index[x]) == to_scalar(target_v[x]):
    #         corrects += 1
    #         y = int(params.category_alphabet.id2word[to_scalar(target_v[x])]) - 1
    #         corrects_list[y].append(1)
    #     r = int(params.category_alphabet.id2word[to_scalar(max_index[x])]) - 1
    #     pre_list[r].append(1)
    # c_list = [len(ele) for ele in corrects_list]
    # r_list = [len(ele) for ele in rel_list]
    # p_list = [len(ele) for ele in pre_list]
    # # assert (torch.max(logit, 1)[1].view(target_v.size()).data == target_v.data).sum() == corrects
    #
    # recall = [float(x) / r_list[id] * 100.0 for id, x in enumerate(c_list)]
    # precision = [float(x) / p_list[id] * 100.0 for id, x in enumerate(c_list)]
    # f_score = []
    # for idx, p in enumerate(precision):
    #     if p + recall[idx] == 0:
    #         f_score.append(0.0)
    #     else:
    #         f_score.append(2 * p * recall[idx] / (p + recall[idx]))
    # for i in range(len(c_list)):
    #     print('category {}: precision: {:.4f}, recall: {}, fscore: {}% ({}/{}/{})'.format(i + 1, precision[i], recall[i], f_score[i], c_list[i], p_list[i], r_list[i]))
    #
    # micro_fscore = float(corrects) / size * 100.0
    # print('\nEvaluation - acc: {:.4f}%({}/{}) \n'.format(micro_fscore, corrects, size))
    # macro_fscore = (f_score[0]+f_score[1]+f_score[2]+f_score[3])/4
    # return micro_fscore, macro_fscore
    micro_fscore, macro_fscore = calc_fscore(logit, target_v, size, params)
    return micro_fscore, macro_fscore