def benchmark_bert_mdl(): bert = load_bert_ner_model() start = time.time() predictions = [] for i, sentence in enumerate(sentences_tokens): _, pred_ents = bert.predict(sentence) predictions.append(pred_ents) print('BERT:') print_speed_performance(start, num_sentences, num_tokens) assert len(predictions) == num_sentences print(f1_report(sentences_entities, remove_miscs(predictions), bio=True))
def benchmark_bert_mdl(): bert = load_bert_ner_model() start = time.time() predictions = [] for i, sentence in enumerate(sentences_tokens): _, pred_ents = bert.predict(sentence) predictions.append(pred_ents) print('bert:') print("Made predictions on {} sentences and {} tokens in {}s".format( num_sentences, num_tokens, time.time() - start)) assert len(predictions) == num_sentences print( classification_report(sentences_entities, remove_miscs(predictions), digits=4))
def setup(self): self.model = dm.load_bert_ner_model()
# general import numpy as np import time import torch # models from danlp.models import load_bert_ner_model #, load_flair_ner_model # dataset from danlp.datasets import DDT # utils #from flair.data import Sentence, Token # load models bert = load_bert_ner_model() '''flair = load_flair_ner_model()''' # CUDA for PyTorch use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if use_cuda else "cpu") torch.backends.cudnn.benchmark = True # get data (splitted into a training set, a validation set, and a test set) ddt = DDT() train, valid, test = ddt.load_as_simple_ner(True) # divide the observations and the targets of the testset into new variables sentences, categories = test batch_size = 64