コード例 #1
0
 def create_and_check_bert_for_question_answering(
     self,
     config,
     input_ids,
     token_type_ids,
     input_mask,
     sequence_labels,
     token_labels,
     choice_labels,
 ):
     model = BertForQuestionAnswering(config=config)
     model.eval()
     loss, start_logits, end_logits = model(input_ids, token_type_ids,
                                            input_mask, sequence_labels,
                                            sequence_labels)
     result = {
         "loss": loss,
         "start_logits": start_logits,
         "end_logits": end_logits
     }
     self.parent.assertListEqual(list(result["start_logits"].size()),
                                 [self.batch_size, self.seq_length])
     self.parent.assertListEqual(list(result["end_logits"].size()),
                                 [self.batch_size, self.seq_length])
     self.check_loss_output(result)
コード例 #2
0
ファイル: predict.py プロジェクト: martian-ai/Dialogue-Robot
 def __init__(self, model_state_dict) -> None:
     no_cuda = True
     self.device = torch.device(
         "cuda" if torch.cuda.is_available() and not no_cuda else "cpu")
     self.tokenizer = BertTokenizer.from_pretrained('bert-base-chinese',
                                                    do_lower_case=False)
     config = BertConfig.from_pretrained('bert-base-chinese')
     self.model = BertForQuestionAnswering(config)
     self.model.load_state_dict(
         torch.load(model_state_dict, map_location='cpu'))
     self.model.to(self.device)
     self.model.eval()  # TODO
コード例 #3
0
 def load_model(self, model_path: str, do_lower_case=False):
     config = BertConfig.from_pretrained(model_path + "/config.json")
     tokenizer = BertTokenizer.from_pretrained(model_path,
                                               do_lower_case=do_lower_case)
     model = BertForQuestionAnswering.from_pretrained(model_path,
                                                      from_tf=False,
                                                      config=config)
     return model, tokenizer
コード例 #4
0
 def load_model(model_name: str, do_lower_case=False):
     config = BertConfig.from_pretrained(model_name)
     tokenizer = BertTokenizer.from_pretrained(model_name,
                                               do_lower_case=do_lower_case)
     model = BertForQuestionAnswering.from_pretrained(model_name,
                                                      from_tf=False,
                                                      config=config)
     return model, tokenizer
コード例 #5
0
ファイル: bert.py プロジェクト: zzozzolev/claf
    def __init__(self, token_makers, lang_code="en", pretrained_model_name=None, answer_maxlen=30):
        super(BertForQA, self).__init__(token_makers)

        self.lang_code = lang_code
        self.use_pytorch_transformers = True  # for optimizer's model parameters
        self.answer_maxlen = answer_maxlen

        self.model = BertForQuestionAnswering.from_pretrained(
            pretrained_model_name, cache_dir=str(CachePath.ROOT)
        )
        self.criterion = nn.CrossEntropyLoss()
コード例 #6
0
    def load_pretrained_model(model_path: str, lower_case=True):
        """
        Imports pretrained BERT model from the official format as seen on:
        https://github.com/google-research/bert

        :param model_path: Path to the model checkpoint file
        :param lower_case: select False if loading cased model
        :return: pretrained model and its tokenizer
        """
        config = BertConfig.from_pretrained(model_path + "/bert_config.json")
        tokenizer = BertTokenizer.from_pretrained(model_path, do_lower_case=lower_case)
        model = BertForQuestionAnswering.from_pretrained(model_path, from_tf=False, config=config)
        return model, tokenizer
コード例 #7
0
def load_model(model_file, model_type, cache_dir):
    start_time = current_milli_time()

    # Load a pretrained model that has been fine-tuned
    config = BertConfig.from_pretrained(model_type, output_hidden_states=True, cache_dir=cache_dir)

    pretrained_weights = torch.load(model_file, map_location=torch.device('cpu'))
    model = BertForQuestionAnswering.from_pretrained(model_type,
                                                     state_dict=pretrained_weights,
                                                     config=config,
                                                     cache_dir=cache_dir)

    end_time = current_milli_time()
    logger.info("Model Loading Time: {} ms".format(end_time - start_time))

    return model
コード例 #8
0
ファイル: config_loader.py プロジェクト: yumoxu/querysum
def load_bert_passage():
    if meta_model_name != 'bert_passage':
        raise ValueError('Invalid meta_model_name: {}'.format(meta_model_name))

    tokenizer_dir = path_parser.bert_passage_tokenizer
    checkpoint_dir = path_parser.bert_passage_checkpoint.format(
        bert_passage_iter)

    print('Load PyTorch model from {}, vocab: {}'.format(
        checkpoint_dir, tokenizer_dir))

    model_params = {
        'pretrained_model_name_or_path': checkpoint_dir,
    }
    bert_model = BertForQuestionAnswering.from_pretrained(**model_params)

    tokenizer = BertTokenizer.from_pretrained(tokenizer_dir,
                                              do_lower_case=True,
                                              do_basic_tokenize=True)

    return bert_model, tokenizer
コード例 #9
0
                    type=str,
                    required=True,
                    help="model para after pretrained")

args = parser.parse_args()
args.n_gpu = torch.cuda.device_count()
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
device = torch.device(
    "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.device = device
tokenizer = BertTokenizer.from_pretrained(
    args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
    do_lower_case=False)
config = BertConfig.from_pretrained(
    args.tokenizer_name if args.tokenizer_name else args.model_name_or_path)
model = BertForQuestionAnswering(config)
model_state_dict = args.state_dict
model.load_state_dict(torch.load(model_state_dict))
model.to(args.device)
model.eval()
input_file = args.predict_file


def handle_file(input_file, context, question):
    with open(input_file, "r") as reader:
        orig_data = json.load(reader)
        orig_data["data"][0]['paragraphs'][0]['context'] = context
        for i in range(len(question)):
            orig_data["data"][0]['paragraphs'][0]['qas'][i][
                'question'] = question[i]
    with open(input_file, "w") as writer:
コード例 #10
0
ファイル: predict.py プロジェクト: martian-ai/Dialogue-Robot
class QA_BERT_SingleSpan():
    def __init__(self, model_state_dict) -> None:
        no_cuda = True
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() and not no_cuda else "cpu")
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-chinese',
                                                       do_lower_case=False)
        config = BertConfig.from_pretrained('bert-base-chinese')
        self.model = BertForQuestionAnswering(config)
        self.model.load_state_dict(
            torch.load(model_state_dict, map_location='cpu'))
        self.model.to(self.device)
        self.model.eval()  # TODO

    def predict_old(self,
                    query,
                    search_size,
                    max_query_length,
                    max_answer_length,
                    max_seq_length,
                    n_best_size,
                    doc_stride,
                    verbose_logging,
                    es_index,
                    null_score_diff_threshold,
                    prefix=""):
        dataset, examples, features, recall_scores = build_dataset_example_feature(
            query, self.tokenizer, max_query_length, max_seq_length,
            doc_stride, search_size, es_index)
        eval_dataloader = DataLoader(dataset, sampler=None, batch_size=16)
        all_results = []
        for batch in eval_dataloader:
            batch = tuple(t.to(self.device) for t in batch)
            with torch.no_grad():
                inputs = {
                    'input_ids': batch[0],
                    'attention_mask': batch[1],
                    'token_type_ids': batch[2]
                }
                example_indices = batch[3]
                outputs = self.model(**inputs)
            for i, example_index in enumerate(example_indices):
                eval_feature = features[example_index.item()]
                unique_id = int(eval_feature.unique_id)
                result = RawResult(unique_id=unique_id,
                                   start_logits=to_list(outputs[0][i]),
                                   end_logits=to_list(outputs[1][i]))
                all_results.append(result)
        all_predictions = write_predictions(examples, features, all_results,
                                            n_best_size, max_answer_length,
                                            True, None, None, None,
                                            verbose_logging, False,
                                            null_score_diff_threshold)
        return all_predictions, recall_scores

    def predict(self, doc=None, query=None):
        """
        function

        params
            doc : 输入文档
            query : 输入查询话术
        returns

        """

        if doc != None:
            pass
            # TODO
            # 插入文档
        else:
            dataset, examples, features, _ = build_dataset_example_feature_by_context_query(
                query, self.tokenizer, 16, 384, 128)

        eval_dataloader = DataLoader(dataset, sampler=None, batch_size=16)
        all_results = []
        for batch in eval_dataloader:
            self.model.eval()
            batch = tuple(t.to(self.device) for t in batch)
            with torch.no_grad():
                inputs = {
                    'input_ids': batch[0],
                    'attention_mask': batch[1],
                    'token_type_ids': batch[2]
                }
                example_indices = batch[3]
                outputs = self.model(**inputs)
            for i, example_index in enumerate(example_indices):
                eval_feature = features[example_index.item()]
                unique_id = int(eval_feature.unique_id)
                result = RawResult(unique_id=unique_id,
                                   start_logits=to_list(outputs[0][i]),
                                   end_logits=to_list(outputs[1][i]))
                all_results.append(result)
        return write_predictions(examples, features, all_results, 3, 24, True,
                                 None, None, None, False, False, 0.0)
コード例 #11
0
def load_model(model_path, device):
    model = BertForQuestionAnswering.from_pretrained(model_path).to(device)
    tokenizer_dir = '../data/my-bert-large-cased-squad/vocab.txt'
    tokenizer = BertTokenizer.from_pretrained(tokenizer_dir, do_lower_case=False)
    return tokenizer, model
コード例 #12
0
ファイル: qaptnet.py プロジェクト: rohanky/qaptnet
 def _build_model(self):
     print('Building model from:', self.source)
     self.model = BertForQuestionAnswering.from_pretrained(self.source)
コード例 #13
0
ファイル: model_evaluate.py プロジェクト: jianliu-ml/EEasMRC
def load_model(model_path, device):
    model = BertForQuestionAnswering.from_pretrained(model_path).to(device)
    tokenizer_dir = '/home/jliu/data/BertModel/bert-large-cased-squad/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt'
    tokenizer = BertTokenizer.from_pretrained(tokenizer_dir,
                                              do_lower_case=False)
    return tokenizer, model