コード例 #1
0
def task_3():
    # 任务三:句子预测任务
    question, text = "里昂是谁", "里昂是一个杀手"
    sample = (question, text)

    tokenizer = BertTokenizer.from_pretrained(bert_path)
    sen_code = tokenizer.batch_encode_plus(
        [sample])  # 上下句结合可以这样传参 List[Tuple[str, str]]
    tokens_tensor = torch.tensor(sen_code["input_ids"])
    segments_tensor = torch.tensor(sen_code["token_type_ids"])

    model_config = BertConfig.from_pretrained(bert_path)
    # model_config.num_labels = 2  # 最终有两个输出,初始位置和结束位置
    # model = BertForQuestionAnswering.from_pretrained(bert_path)  # 这是一种加载方式
    model = BertForQuestionAnswering(model_config)  # 这是另一种加载方式

    model.eval()
    outputs = model(tokens_tensor, segments_tensor)
    start_pos, end_pos = outputs.start_logits, outputs.end_logits

    for idx, (start, end) in enumerate(
            zip(start_pos.argmax(axis=1), end_pos.argmax(axis=1))):
        all_tokens = tokenizer.convert_ids_to_tokens(
            sen_code["input_ids"][idx])  # 进行逆编码,得到原始的token
        print(
            all_tokens
        )  # ['[CLS]', '里', '昂', '是', '谁', '[SEP]', '里', '昂', '是', '一', '个', '杀', '手', '[SEP]']
        if start <= end:
            answer = " ".join(all_tokens[start:end + 1])  # 对输出的答案进行解码的过程
            # 每次执行的结果不一致,这里因为没有经过微调,所以效果不是很好,输出结果不佳,下面的输出是其中的一种。
            print(answer)  # 一 个 杀 手 [SEP]
        else:
            print("预测的有问题哦!")
コード例 #2
0
def load_from_tf(config, tf_path):
    model = BertForQuestionAnswering(config)
    model.classifier = model.qa_outputs

    # This part is copied from HuggingFace Transformers with a fix to bypass an error
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for name, shape in init_vars:
        # print("Loading TF weight {} with shape {}".format(name, shape))
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
        arrays.append(array)

    for name, array in zip(names, arrays):
        name = name.split("/")
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
        if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
            print("Skipping {}".format("/".join(name)))
            continue
        pointer = model
        for m_name in name:
            if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
                scope_names = re.split(r"_(\d+)", m_name)
            else:
                scope_names = [m_name]
            if scope_names[0] == "kernel" or scope_names[0] == "gamma":
                pointer = getattr(pointer, "weight")
            elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
                pointer = getattr(pointer, "bias")
            elif scope_names[0] == "output_weights":
                pointer = getattr(pointer, "weight")
            elif scope_names[0] == "squad":
                pointer = getattr(
                    pointer, "classifier")  # This line is causing the issue
            else:
                try:
                    pointer = getattr(pointer, scope_names[0])
                except AttributeError:
                    print("Skipping {}".format("/".join(name)))
                    continue
            if len(scope_names) >= 2:
                num = int(scope_names[1])
                pointer = pointer[num]
        if m_name[-11:] == "_embeddings":
            pointer = getattr(pointer, "weight")
        elif m_name == "kernel":
            array = np.transpose(array)
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        print("Initialize PyTorch weight {}".format(name))
        pointer.data = torch.from_numpy(array)

    model.qa_outputs = model.classifier
    del model.classifier
    return model
コード例 #3
0
    def test_patch_module_ampere(self):
        config = BertConfig.from_pretrained("bert-base-uncased")
        model = BertForQuestionAnswering(config)

        parameters = LinearPruningArgs(
            method="topK",
            submethod="default",
            ampere_method="annealing",
            block_rows=32,
            block_cols=32,
            min_elements=0.005,
        )

        context = PatcherContext()

        p = LinearPruningModulePatcher(context, parameters, self.MODEL_STRUCTURE)

        module_patchers = dict(query=p, key=p, value=p, att_dense=p, interm_dense=p, output_dense=p)

        patcher = LinearModelPatcher(module_patchers, self.MODEL_STRUCTURE)
        patcher.patch(model)

        self.assertEqual(patcher.stats["patched"], 72)
        key_sizes = {k: len(v) for k, v in context.context_modules.items()}

        self.assertEqual(key_sizes, {"ampere_mask": 72, "mask": 72})
コード例 #4
0
ファイル: test_patch.py プロジェクト: sz128/nn_pruning
    def test_patch_module_tied_attention(self):
        config = BertConfig.from_pretrained("bert-base-uncased")
        model = BertForQuestionAnswering(config)

        parameters = LinearPruningParameters(
            method="topK",
            submethod="default",
            ampere_method="annealing",
            block_rows=32,
            block_cols=32,
        )

        context = PatcherContext()

        p_attention = JointPruningModulePatcher(context, parameters, "attention")
        p_dense = LinearPruningModulePatcher(context, parameters)

        module_patchers = dict(
            query=p_attention,
            key=p_attention,
            value=p_attention,
            att_dense=p_dense,
            interm_dense=p_dense,
            output_dense=p_dense,
        )

        patcher = BertLinearModelPatcher(module_patchers)
        patcher.patch(model)

        self.assertEqual(patcher.stats["patched"], 72)
        key_sizes = {k: len(v) for k, v in context.context_modules.items()}

        self.assertEqual(key_sizes, {"ampere_mask": 72, "mask": 48})
コード例 #5
0
    def __init__(self, args):
        print("Loading BERT configs...")
        with open("bert_config.json") as f:
            config_json = json.load(f)

        config = BertConfig(
            attention_probs_dropout_prob=config_json[
                "attention_probs_dropout_prob"],
            hidden_act=config_json["hidden_act"],
            hidden_dropout_prob=config_json["hidden_dropout_prob"],
            hidden_size=config_json["hidden_size"],
            initializer_range=config_json["initializer_range"],
            intermediate_size=config_json["intermediate_size"],
            max_position_embeddings=config_json["max_position_embeddings"],
            num_attention_heads=config_json["num_attention_heads"],
            num_hidden_layers=config_json["num_hidden_layers"],
            type_vocab_size=config_json["type_vocab_size"],
            vocab_size=config_json["vocab_size"])

        print("Loading PyTorch model...")
        self.model = BertForQuestionAnswering(config)
        self.model.eval()
        self.model.cuda()
        self.model.load_state_dict(
            torch.load(
                "build/data/bert_tf_v1_1_large_fp32_384_v2/model.pytorch"))

        print("Constructing SUT...")
        self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries,
                                   self.process_latencies)
        print("Finished constructing SUT.")

        self.qsl = get_squad_QSL(args.max_examples)
コード例 #6
0
    def load(self, fname=None):
        if fname is not None:
            self.load_path = fname

        if self.pretrained_bert and not Path(self.pretrained_bert).is_file():
            self.model = BertForQuestionAnswering.from_pretrained(
                self.pretrained_bert,
                output_attentions=False,
                output_hidden_states=False)
        elif self.bert_config_file and Path(self.bert_config_file).is_file():
            self.bert_config = BertConfig.from_json_file(
                str(expand_path(self.bert_config_file)))

            if self.attention_probs_keep_prob is not None:
                self.bert_config.attention_probs_dropout_prob = 1.0 - self.attention_probs_keep_prob
            if self.hidden_keep_prob is not None:
                self.bert_config.hidden_dropout_prob = 1.0 - self.hidden_keep_prob
            self.model = BertForQuestionAnswering(config=self.bert_config)
        else:
            raise ConfigError("No pre-trained BERT model is given.")

        self.model.to(self.device)
        self.optimizer = getattr(torch.optim, self.optimizer_name)(
            self.model.parameters(), **self.optimizer_parameters)
        if self.lr_scheduler_name is not None:
            self.lr_scheduler = getattr(torch.optim.lr_scheduler,
                                        self.lr_scheduler_name)(
                                            self.optimizer,
                                            **self.lr_scheduler_parameters)

        if self.load_path:
            logger.info(f"Load path {self.load_path} is given.")
            if isinstance(self.load_path,
                          Path) and not self.load_path.parent.is_dir():
                raise ConfigError("Provided load path is incorrect!")

            weights_path = Path(self.load_path.resolve())
            weights_path = weights_path.with_suffix(f".pth.tar")
            if weights_path.exists():
                logger.info(f"Load path {weights_path} exists.")
                logger.info(
                    f"Initializing `{self.__class__.__name__}` from saved.")

                # now load the weights, optimizer from saved
                logger.info(f"Loading weights from {weights_path}.")
                checkpoint = torch.load(weights_path, map_location=self.device)
                self.model.load_state_dict(checkpoint["model_state_dict"])
                self.optimizer.load_state_dict(
                    checkpoint["optimizer_state_dict"])
                self.epochs_done = checkpoint.get("epochs_done", 0)
            else:
                logger.info(
                    f"Init from scratch. Load path {weights_path} does not exist."
                )
コード例 #7
0
ファイル: model.py プロジェクト: mengshiY/RCSF
 def __init__(self, bert_dir, args):
     super(BERTPretrainedMRC, self).__init__()
     if args.load_pretrainedBERT:
         self.bert = BertForQuestionAnswering.from_pretrained(bert_dir)
     else:
         self.bert_config = BertQueryNerConfig.from_pretrained(
             bert_dir,
             hidden_dropout_prob=args.bert_dropout,
             attention_probs_dropout_prob=args.bert_dropout,
             mrc_dropout=args.mrc_dropout)
         self.bert = BertForQuestionAnswering(self.bert_config)
コード例 #8
0
def model_fn(model_dir):
    config_path = model_dir + '/config_file.json'
    model_path = model_dir + '/pytorch_model.bin'

    config = BertConfig.from_json_file(config_path)
    model = BertForQuestionAnswering(config)

    # Checks GPU state
    model.load_state_dict(
        torch.load(model_path,
                   map_location=torch.device(
                       'cuda' if torch.cuda.is_available() else 'cpu')))
    return model
コード例 #9
0
def construct_qa_transformer(options: KaggleEvaluationOptions) -> Reranker:
    # We load a sequence classification model first -- again, as a workaround. Refactor.
    try:
        model = AutoModelForSequenceClassification.from_pretrained(options.model_name)
    except OSError:
        model = AutoModelForSequenceClassification.from_pretrained(options.model_name, from_tf=True)
    fixed_model = BertForQuestionAnswering(model.config)
    fixed_model.qa_outputs = model.classifier
    fixed_model.bert = model.bert
    device = torch.device(options.device)
    model = fixed_model.to(device).eval()
    tokenizer = AutoTokenizer.from_pretrained(options.tokenizer_name, do_lower_case=options.do_lower_case)
    return QuestionAnsweringTransformerReranker(model, tokenizer)
コード例 #10
0
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path:str, bert_config_file:str, pytorch_dump_path:str)->None:
    """
    Updated function to convert a Tensorflow checkpoint to compatible model.
    """
    # Initialise PyTorch model
    config = BertConfig.from_json_file(bert_config_file)
    print("Building PyTorch model from configuration: {}".format(str(config)))
    
    model = BertForQuestionAnswering(config)
    # Load weights from tf checkpoint
    load_tf_weights_in_bert(model, config, tf_checkpoint_path)

    # Save pytorch-model
    print("Save PyTorch model to {}".format(pytorch_dump_path))
    torch.save(model.state_dict(), pytorch_dump_path)
コード例 #11
0
 def create_and_check_for_question_answering(
     self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
 ):
     model = BertForQuestionAnswering(config=config)
     model.to(torch_device)
     model.eval()
     result = model(
         input_ids,
         attention_mask=input_mask,
         token_type_ids=token_type_ids,
         start_positions=sequence_labels,
         end_positions=sequence_labels,
     )
     self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
     self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
コード例 #12
0
 def create_and_check_bert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
     model = BertForQuestionAnswering(config=config)
     model.eval()
     loss, start_logits, end_logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids,
                                            start_positions=sequence_labels, end_positions=sequence_labels)
     result = {
         "loss": loss,
         "start_logits": start_logits,
         "end_logits": end_logits,
     }
     self.parent.assertListEqual(
         list(result["start_logits"].size()),
         [self.batch_size, self.seq_length])
     self.parent.assertListEqual(
         list(result["end_logits"].size()),
         [self.batch_size, self.seq_length])
     self.check_loss_output(result)
コード例 #13
0
    def test_patch_tiedattention_line_pruning(self):
        config = BertConfig.from_pretrained("bert-base-uncased")
        model = BertForQuestionAnswering(config)

        parameters_attention = LinearPruningArgs(
            method="topK",
            submethod="default",
            ampere_method="annealing",
            block_rows=32,
            block_cols=32,
            min_elements=0.005,
        )

        parameters_dense = LinearPruningArgs(
            method="topK", submethod="1d", ampere_method="annealing", block_rows=32, block_cols=32, min_elements=0.005
        )

        context = PatcherContext()

        p_attention = JointPruningModulePatcher(context, parameters_attention, self.MODEL_STRUCTURE, suffix=".attention")
        p_dense = ChannelPruningModulePatcher(context, parameters_dense, self.MODEL_STRUCTURE, suffix="dense")

        module_patchers = dict(
            query=p_attention,
            key=p_attention,
            value=p_attention,
            att_dense=p_dense,
            interm_dense=p_dense,
            output_dense=p_dense,
        )

        patcher = LinearModelPatcher(module_patchers, self.MODEL_STRUCTURE)
        patcher.patch(model)

        self.assertEqual(patcher.stats["patched"], 72)
        key_sizes = {k: len(v) for k, v in context.context_modules.items()}

        for k, v in key_sizes.items():
            print(k, v)

        for k, v in context.context_modules.items():
            print(k, v)
        self.assertEqual(key_sizes, {"ampere_mask": 72, "mask": 12, "mask_1d": 48})
コード例 #14
0
def demo4():
    from transformers import BertTokenizer, BertForQuestionAnswering
    import torch

    MODEL_PATH = r"D:\transformr_files\bert-base-uncased/"
    # 实例化tokenizer
    tokenizer = BertTokenizer.from_pretrained(
        r"D:\transformr_files\bert-base-uncased\bert-base-uncased-vocab.txt")
    # 导入bert的model_config
    model_config = transformers.BertConfig.from_pretrained(MODEL_PATH)
    # 首先新建bert_model
    bert_model = transformers.BertModel.from_pretrained(MODEL_PATH,
                                                        config=model_config)
    # 最终有两个输出,初始位置和结束位置(下面有解释)
    model_config.num_labels = 2
    # 同样根据bert的model_config新建BertForQuestionAnswering
    model = BertForQuestionAnswering(model_config)
    model.bert = bert_model

    # 设定模式
    model.eval()
    question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
    # 获取input_ids编码
    input_ids = tokenizer.encode(question, text)
    # 手动进行token_type_ids编码,可用encode_plus代替
    # input_ids = tokenizer.encode_plus("i like you", "but not him")
    token_type_ids = [
        0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))
    ]
    # 得到评分,
    start_scores, end_scores = model(torch.tensor([input_ids]),
                                     token_type_ids=torch.tensor(
                                         [token_type_ids]))
    # 进行逆编码,得到原始的token
    all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
    # ['[CLS]', 'who', 'was', 'jim', 'henson', '?', '[SEP]', 'jim', 'henson', 'was', 'a', 'nice', 'puppet', '[SEP]']
    # 对输出的答案进行解码的过程
    answer = ' '.join(
        all_tokens[torch.argmax(start_scores):torch.argmax(end_scores) + 1])
    # assert answer == "a nice puppet"
    # 这里因为没有经过微调,所以效果不是很好,输出结果不佳。
    print(answer)
コード例 #15
0
ファイル: __main__.py プロジェクト: n60512/BERT-QA
def interaction(args):

    while True:
        qa_text = input("------------------\nPlease Enter :\n")

        if qa_text == 'exit':
            break

        # qa_text = '{ "sentence":"在下薩克森邦留下歷史印記的主要建築風格是文藝復興主義的一個分支[UNK][UNK]「威悉河文藝復興風格」。此外,漢諾瓦著名的海恩豪森王宮花園是歐洲巴洛克風格的典型代表。在歐斯納布魯克,人們可以找到很多古典主義和洛可可風格的建築物。這座城市的著名景點包括大教堂、威斯伐倫和約的簽署地市政廳、許多石雕和木桁架建築。下薩克森邦最大的巴洛克城堡[UNK][UNK]歐斯納布魯克城堡和最高的中世紀後哥德式建築[UNK][UNK]聖凱薩琳教堂也坐落在歐斯納布魯克。巴特伊堡的伊堡城堡和本篤會修道院在建築學和藝術史學上具有重要意義。19世紀以來,下薩克森邦造就了多位享有國際聲譽的藝術家,其中的代表性人物是畫家威廉•布施。", "question":"歐斯納布魯克有哪一座中世紀後哥德式建築是這類建築中最高的?"}'
        # qa_text = '{ "sentence":"蔡英文從小備受父母親、兄姐寵愛[26]。早期就讀臺北市私立雙連幼稚園[47],啟蒙教育完成後,便接受國民教育[29]。1963年,就讀臺北市中山區長安國民小學[48]。1966年,四年級的她轉學到新成立的臺北市中山區吉林國民小學[48]。1971年,她以臺北市立北安國民中學第一屆畢業生畢業[48]。高級中學時,就讀臺北市立中山女子高級中學[49],前立法院副院長、中國國民黨主席洪秀柱是大她八屆的學姐[50]。 ", "question":"誰是蔡英文總統的學姊?"}'
        # qa_test = '{ "sentence":"辛普森家庭是馬特·格朗寧為美國福斯廣播公司創作的一部成人動畫情景喜劇。該劇透過展現荷馬、美枝、霸子、花枝和奶嘴一家五口的日常生活,諷刺性地描繪了美國中產階級的生活方式。空間設定於虛構小鎮內糊的辛普森家庭,幽默地嘲諷了美國文化、社會、電視節目和人生百態。為了給製片人詹姆斯·L·布魯克斯製作一出動畫短劇,馬特·格朗寧構思出了辛普森一家人的形象。格朗寧用自己家族成員的名字逐一地給他們命名,而自己的名字則用「霸子」替代。1987年4月19日短劇成為了《特蕾西·厄爾曼秀》的一部分。在播映三季後,《辛普森家庭》得以轉正進入半小時的黃金時段,並成為了福克斯在早期達成的成功之一。", "question":"辛普森家庭是哪家公司的創作?"}'
        # qa_test = '{ "sentence":"海賊王的世界觀舞台是由世界各地的加盟國與所組成的國際組織「世界政府」所共同管理。然而,由於「海賊王」哥爾·D·羅傑被執行死刑後迎來了「大海賊時代」,結果海賊們於世界各地擴展權力,並直接與直屬世界政府的海軍作戰。本作是以島上的國家為單位,也有的島嶼只有村子、城鎮存在,大部分主要國家加入世界政府聯盟,並支持海軍討伐海賊。至於生活方式和科學技術,基本上是以現實世界海賊的「黃金時代」(17世紀到18世紀)為藍本,但是與現實世界而言還是擁有很大的差別,以作品中世界固有的獨特設定。惡魔果實服用後會依不同的果實而對應獲得不可思議的特殊能力,許多角色因其能力都擁有了超人般的戰鬥力。", "question":"在海賊王中如何得到超人般的戰鬥力?"}'

        qa_text = json.loads(qa_text)

        config = BertConfig.from_pretrained('bert-base-chinese')
        model = BertForQuestionAnswering(config)
        model.load_state_dict(
            torch.load('{}/model/best_model.bin'.format(args.load_model_path)))

        BertQA = BertQATrainer(args, model, None, None, None)
        BertQA.interaction(tokenizer, qa_text)

        pass
コード例 #16
0
def bert_model():
    config = BertConfig()
    model = BertForQuestionAnswering(config=config)
    return model
コード例 #17
0
 def __init__(self, config):
     super(QAmodel, self).__init__(config)
     self.bert = BertForQuestionAnswering(config).from_pretrained(
         'bert-base-chinese', config=config)
コード例 #18
0
ファイル: test_patch.py プロジェクト: sz128/nn_pruning
    def test_base(self):
        config = BertConfig.from_pretrained("bert-base-uncased")
        model = BertForQuestionAnswering(config)

        patcher = BertLinearModelPatcher({})
        layers = patcher.get_patchable_layers(model)
コード例 #19
0
parser.add_argument("--state_dict",
                    default=None,
                    type=str,
                    required=True,
                    help="model para after pretrained")

args = parser.parse_args()
args.n_gpu = torch.cuda.device_count()
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
device = torch.device(
    "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.device = device
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese',
                                          do_lower_case=False)
config = BertConfig.from_pretrained('bert-base-chinese')
model = BertForQuestionAnswering(config)
model_state_dict = args.state_dict
model.load_state_dict(torch.load(model_state_dict))
model.to(args.device)
model.eval()
input_file = args.predict_file


def handle_file(input_file, context, question):
    orig_data = {"data": [{"paragraphs": [{"context": context, "qas": []}]}]}
    for i in range(len(question)):
        orig_data["data"][0]['paragraphs'][0]['qas'].append({
            'question':
            question[i],
            'id':
            str(i)
コード例 #20
0
print("Torch execution device: " + TORCH_DEVICE)

if BERT_MODEL_HUB_NAME:
    print("Loading BERT model {} from the hub ...".format(BERT_MODEL_HUB_NAME))
    model = BertForQuestionAnswering.from_pretrained(BERT_MODEL_HUB_NAME)
    bert_config_obj = model.config
    model.eval()
    model.to(TORCH_DEVICE)
else:
    print("Loading BERT config from {} ...".format(BERT_MODEL_CONFIG_PATH))
    with open(BERT_MODEL_CONFIG_PATH) as bert_config_file:
        bert_config_dict = json.load(bert_config_file)
        bert_config_obj = BertConfig(**bert_config_dict)

    model = BertForQuestionAnswering(bert_config_obj)
    model.eval()
    model.to(TORCH_DEVICE)
    print("Loading BERT model weights from {} ...".format(
        BERT_MODEL_WEIGHTS_PATH))
    model.load_state_dict(torch.load(BERT_MODEL_WEIGHTS_PATH))

print("Vocabulary size: {}".format(bert_config_obj.vocab_size))

print("Loading tokenized SQuAD dataset as features from {} ...".format(
    SQUAD_DATASET_TOKENIZED_PATH))
with open(SQUAD_DATASET_TOKENIZED_PATH, 'rb') as tokenized_features_file:
    eval_features = pickle.load(tokenized_features_file)

print("Example width: {}".format(len(eval_features[0].input_ids)))