Beispiel #1
0
def evaluate_model(config: Config, model: TransformersCRF, data_loader: DataLoader, name: str, insts: List, print_each_type_metric: bool = False):
    ## evaluation
    p_dict, total_predict_dict, total_entity_dict = Counter(), Counter(), Counter()
    batch_size = data_loader.batch_size
    with torch.no_grad():
        for batch_id, batch in tqdm(enumerate(data_loader, 0), desc="--evaluating batch", total=len(data_loader)):
            one_batch_insts = insts[batch_id * batch_size:(batch_id + 1) * batch_size]
            batch_max_scores, batch_max_ids = model.decode(words= batch.input_ids.to(config.device),
                    word_seq_lens = batch.word_seq_len.to(config.device),
                    orig_to_tok_index = batch.orig_to_tok_index.to(config.device),
                    input_mask = batch.attention_mask.to(config.device))
            batch_p , batch_predict, batch_total = evaluate_batch_insts(one_batch_insts, batch_max_ids, batch.label_ids, batch.word_seq_len, config.idx2labels)
            p_dict += batch_p
            total_predict_dict += batch_predict
            total_entity_dict += batch_total
            batch_id += 1
    f1Scores = []
    if print_each_type_metric or config.print_detail_f1 or (config.earlystop_atr == "macro"):
        for key in total_entity_dict:
            precision_key, recall_key, fscore_key = get_metric(p_dict[key], total_entity_dict[key], total_predict_dict[key])
            print(f"[{key}] Prec.: {precision_key:.2f}, Rec.: {recall_key:.2f}, F1: {fscore_key:.2f}")
            f1Scores.append(fscore_key)
        if len(f1Scores) > 0:
            print(f"[{name} set Total] Macro F1: {sum(f1Scores) / len(f1Scores):.2f}")

    total_p = sum(list(p_dict.values()))
    total_predict = sum(list(total_predict_dict.values()))
    total_entity = sum(list(total_entity_dict.values()))
    precision, recall, fscore = get_metric(total_p, total_entity, total_predict)
    print(colored(f"[{name} set Total] Prec.: {precision:.2f}, Rec.: {recall:.2f}, Micro F1: {fscore:.2f}", 'blue'), flush=True)

    if config.earlystop_atr == "macro" and len(f1Scores) > 0:
        fscore = sum(f1Scores) / len(f1Scores)

    return [precision, recall, fscore]
Beispiel #2
0
class TransformersNERPredictor:

    def __init__(self, model_archived_file:str,
                 cuda_device: str = "cpu"):
        """
        model_archived_file: ends with "tar.gz"
        OR
        directly use the model folder patth
        """
        device = torch.device(cuda_device)
        if model_archived_file.endswith("tar.gz"):
            tar = tarfile.open(model_archived_file)
            self.conf = pickle.load(tar.extractfile(tar.getnames()[1])) ## config file
            self.model = TransformersCRF(self.conf)
            self.model.load_state_dict(torch.load(tar.extractfile(tar.getnames()[2]), map_location=device)) ## model file
        else:
            folder_name = model_archived_file
            assert os.path.isdir(folder_name)
            f = open(folder_name + "/config.conf", 'rb')
            self.conf = pickle.load(f)
            f.close()
            self.model = TransformersCRF(self.conf)
            self.model.load_state_dict(torch.load(f"{folder_name}/lstm_crf.m", map_location=device))
        self.conf.device = device
        self.model.to(device)
        self.model.eval()

        print(colored(f"[Data Info] Tokenizing the instances using '{self.conf.embedder_type}' tokenizer", "blue"))
        self.tokenizer = context_models[self.conf.embedder_type]["tokenizer"].from_pretrained(self.conf.embedder_type)

    def predict(self, sents: List[List[str]], batch_size = -1):
        batch_size = len(sents) if batch_size == -1 else batch_size

        dataset = TransformersNERDataset(file=None, sents=sents, tokenizer=self.tokenizer, label2idx=self.conf.label2idx, is_train=False)
        loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn)

        all_predictions = []
        for batch_id, batch in tqdm(enumerate(loader, 0), desc="--evaluating batch", total=len(loader)):
            one_batch_insts = dataset.insts[batch_id * batch_size:(batch_id + 1) * batch_size]
            batch_max_scores, batch_max_ids = self.model.decode(words= batch.input_ids.to(self.conf.device),
                    word_seq_lens = batch.word_seq_len.to(self.conf.device),
                    orig_to_tok_index = batch.orig_to_tok_index.to(self.conf.device),
                    input_mask = batch.attention_mask.to(self.conf.device))

            for idx in range(len(batch_max_ids)):
                length = batch.word_seq_len[idx]
                prediction = batch_max_ids[idx][:length].tolist()
                prediction = prediction[::-1]
                prediction = [self.conf.idx2labels[l] for l in prediction]
                one_batch_insts[idx].prediction = prediction
                all_predictions.append(prediction)
        return all_predictions
Beispiel #3
0
def evaluate_model(config: Config,
                   model: TransformersCRF,
                   data_loader: DataLoader,
                   name: str,
                   insts: List,
                   print_each_type_metric: bool = False):
    ## evaluation
    #p_dict, total_predict_dict, total_entity_dict = Counter(), Counter(), Counter()
    f1_metrics = F1Measure()
    batch_size = data_loader.batch_size
    with torch.no_grad():
        with tqdm(enumerate(data_loader, 0),
                  desc="--evaluating batch",
                  total=len(data_loader)) as teval:
            for batch_id, batch in teval:
                one_batch_insts = insts[batch_id * batch_size:(batch_id + 1) *
                                        batch_size]
                batch_max_scores, batch_max_ids = model.decode(
                    words=batch.input_ids.to(config.device),
                    word_seq_lens=batch.word_seq_len.to(config.device),
                    orig_to_tok_index=batch.orig_to_tok_index.to(
                        config.device),
                    input_mask=batch.attention_mask.to(config.device))
                batch_p, batch_predict, batch_total = evaluate_batch_insts(
                    one_batch_insts, batch_max_ids, batch.label_ids,
                    batch.word_seq_len, config.idx2labels)
                #p_dict += batch_p
                #total_predict_dict += batch_predict
                #total_entity_dict += batch_total
                f1_metrics.update(batch_p, batch_predict, batch_total)
                teval.set_postfix(**f1_metrics.get_metric(
                    print_each_type_metric=False)[0])
                batch_id += 1
    final_metrics, final_metrics_key = f1_metrics.get_metric(
        print_each_type_metric)
    '''
    if print_each_type_metric:
        for key in total_entity_dict:
            precision_key, recall_key, fscore_key = get_metric(p_dict[key], total_entity_dict[key], total_predict_dict[key])
            print(f"[{key}] Prec.: {precision_key:.2f}, Rec.: {recall_key:.2f}, F1: {fscore_key:.2f}")
    '''
    if final_metrics_key is not None:
        for key in final_metrics_key:
            precision_key, recall_key, fscore_key = final_metrics_key[key][
                "Prec."], final_metrics_key[key]["Recl."], final_metrics_key[
                    key]["F1"]
            print(
                f"[{key}] Prec.: {precision_key:.2f}, Rec.: {recall_key:.2f}, F1: {fscore_key:.2f}"
            )

    #total_p = sum(list(p_dict.values()))
    #total_predict = sum(list(total_predict_dict.values()))
    #total_entity = sum(list(total_entity_dict.values()))
    #precision, recall, fscore = get_metric(total_p, total_entity, total_predict)
    precision, recall, fscore = final_metrics["Prec"], final_metrics[
        'Recl'], final_metrics["F1"]
    print(colored(
        f"[{name} set Total] Prec.: {precision:.2f}, Rec.: {recall:.2f}, F1: {fscore:.2f}",
        'blue'),
          flush=True)

    return [precision, recall, fscore]