示例#1
0
def build_vocab_bert(data, threshold):
    counter = collections.Counter()
    sys.stderr.write('building vocab...\n')
    word2idx = {}
    idx2word = {}
    add_word(word2idx, idx2word, '[PAD]')  # padding
    add_word(word2idx, idx2word, '[CLS]')  # start of poem
    add_word(word2idx, idx2word, '[SEP]')  # end of sentence (end of poem)
    # add_word(word2idx, idx2word, '<EOL>')  # end of line
    add_word(word2idx, idx2word, '[UNK]')  # known

    basic_tokenizer = BasicTokenizer()

    sys.stderr.write('Parsing data...\n')
    for entry in tqdm(data):
        # tokens = process_one_poem(entry['poem'])
        tokens = basic_tokenizer.tokenize(entry['poem'].replace('\n', ' ; '))
        # tokens = entry['poem'].replace('\n', ' ; ').split()
        counter.update(tokens)
        # [add_word(word2idx, idx2word, word) for word in tokens]

    words = [word for word, cnt in counter.items() if cnt >= threshold]

    sys.stderr.write('Adding words...\n')
    for word in tqdm(words):
        add_word(word2idx, idx2word, word)

    return word2idx, idx2word
示例#2
0
    def __init__(self):
        if GloveEncoder.GLOVE_DICT is None:

            def get_coefs(word, *arr):
                return word, np.asarray(arr, dtype='float32')

            print('loading GloVe file...')
            GloveEncoder.GLOVE_DICT = dict(
                get_coefs(*o.strip().split())
                for o in open(GloveEncoder.EMB_FILE))
            print('GloVe data has been loaded')
        self.tokenizer = BasicTokenizer()
示例#3
0
def clean_text(text):
    cleaner = BasicTokenizer()
    ret_text = cleaner._run_strip_accents(text)
    ret_text = _tokenize_chinese_chars(ret_text)

    ret_text = ret_text.replace("[UNK]", " ")
    ret_text = ret_text.replace("[SEP]", " ")
    ret_text = ret_text.replace("[PAD]", " ")
    ret_text = ret_text.replace("[CLS]", " ")
    ret_text = ret_text.replace("[MASK]", " ")

    if ret_text.startswith("##"):
        ret_text = ret_text[2:]

    return ret_text.lower()
示例#4
0
def process(cased_file,
            output_file,
            bert_model_type='bert-base-cased',
            total=180378072,
            chunk_size=1000000,
            workers=16):  # workers 40
    results = list(range(workers))
    results = {k: None for k in results}
    tokenizer = BertTokenizer.from_pretrained(bert_model_type)
    basic_tokenizer = BasicTokenizer(do_lower_case=False)
    fout = open(output_file, 'w')
    offset = 0

    def merge_fn(result):
        worker_id, tokenized, batch_offset = result
        results[worker_id] = tokenized, batch_offset

    for cased_lines in tqdm(get_chunks(cased_file, chunk_size),
                            total=total // chunk_size):
        pool = multiprocessing.Pool()
        size = (len(cased_lines) //
                workers) if len(cased_lines) % workers == 0 else (
                    1 + (len(cased_lines) // workers))
        for i in range(workers):
            start = i * size
            pool.apply_async(tokenize,
                             args=(cased_lines[start:start + size], tokenizer,
                                   basic_tokenizer, i, start),
                             callback=merge_fn)
        pool.close()
        pool.join()
        for lines, batch_offset in results.values():
            for i, line in enumerate(lines):
                fout.write(' '.join(line) + '\n')
        offset += len(cased_lines)
示例#5
0
class GloveEncoder(BasicEncoder):
    EMB_FILE = 'tool/GloVe/glove.42B.300d.txt'
    GLOVE_DICT = None

    def __init__(self):
        if GloveEncoder.GLOVE_DICT is None:

            def get_coefs(word, *arr):
                return word, np.asarray(arr, dtype='float32')

            print('loading GloVe file...')
            GloveEncoder.GLOVE_DICT = dict(
                get_coefs(*o.strip().split())
                for o in open(GloveEncoder.EMB_FILE))
            print('GloVe data has been loaded')
        self.tokenizer = BasicTokenizer()

    def encode(self, newslist):
        sen_vec_list = []
        for news in newslist:
            words = self.tokenizer.tokenize(news)
            vec_list = []
            for word in words:
                vec = self.GLOVE_DICT.get(word)
                if vec is not None:
                    vec_list.append(vec)
            if len(vec_list) > 0:
                sen_vec = np.mean(np.array(vec_list), axis=0)
            else:
                sen_vec = np.zeros(GloveEncoder.EMBEDDING_SIZE)
            sen_vec_list.append(sen_vec.tolist())
        return sen_vec_list
示例#6
0
def main(args):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    with open('data/multim_poem.json') as f, open(
            'data/unim_poem.json') as unif:
        multim = json.load(f)
        unim = json.load(unif)

    with open('data/poem_features.pkl', 'rb') as f:
        poem_features = pickle.load(f)

    with open('data/img_features.pkl', 'rb') as f:
        img_features = pickle.load(f)

    word2idx, idx2word = util.read_vocab_pickle(args.vocab_path)

    print('vocab size:', len(word2idx))
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
    basic_tokenizer = BasicTokenizer()

    model = BertGenerator(len(word2idx))
    model = DataParallel(model)
    model.load_state_dict(torch.load(args.load))
    model.to(device)
    model.eval()

    encoder = PoemImageEmbedModel(device)
    encoder = DataParallel(encoder)
    encoder.load_state_dict(torch.load('saved_model/embedder.pth'))
    encoder = encoder.module.img_embedder.to(device)

    examples = [
        img_features[0], img_features[1], img_features[2], img_features[8],
        poem_features[0]
    ]

    for feature in examples:
        feature = torch.tensor(feature)
        feature = feature.unsqueeze(0).to(device)
        pred_words = model.module.generate(feature, 70, basic_tokenizer,
                                           tokenizer, word2idx, idx2word, 200,
                                           device, args.temp)
        print(' '.join(pred_words).replace(';', '\n'))
        print()

    test_transform = transforms.Compose(
        [transforms.Resize((224, 224)),
         transforms.ToTensor()])
    test_images = glob2.glob('data/test_image_random/*.jpg')
    test_images.sort()
    for test_image in test_images:
        print('img', test_image)
        pred_words = util.generate_from_one_img_bert(test_image, device,
                                                     encoder, model,
                                                     basic_tokenizer,
                                                     tokenizer, word2idx,
                                                     idx2word, args.temp)
        print(' '.join(pred_words).replace(';', ';\n'))
        print()
示例#7
0
def clean_sentences():
    train_df = pd.read_csv('../../comments_sentiment/data/train.tsv',
                           sep='\t',
                           index_col=0)
    dev_df = pd.read_csv('../../comments_sentiment/data/dev.tsv',
                         sep='\t',
                         index_col=0)
    test_df = pd.read_csv('../../comments_sentiment/data/test.tsv',
                          sep='\t',
                          index_col=0)

    basic_tokenizer = BasicTokenizer(do_lower_case=False)

    train_df = modify_df(train_df, basic_tokenizer)
    dev_df = modify_df(dev_df, basic_tokenizer)
    test_df = modify_df(test_df, basic_tokenizer)

    train_df.to_csv('../../comments_sentiment/data/train_cleaned.tsv',
                    sep='\t')
    dev_df.to_csv('../../comments_sentiment/data/dev_cleaned.tsv', sep='\t')
    test_df.to_csv('../../comments_sentiment/data/test_cleaned.tsv', sep='\t')
示例#8
0
text2 = "The coronavirus disease 2019 (COVID-19) outbreak is an ongoing global health emergence, but the pathogenesis remains unclear. We revealed blood cell immune response profiles using 5' mRNA, TCR and BCR V(D)J transcriptome analysis with single-cell resolution. Data from 134,620 PBMCs and 83,387 TCR and 12,601 BCR clones was obtained, and 56 blood cell subtypes and 23 new cell marker genes were identified from 16 participants. The number of specific subtypes of immune cells changed significantly when compared patients with controls. Activation of the interferon-MAPK pathway is the major defense mechanism, but MAPK transcription signaling is inhibited in cured patients. TCR and BCR V(D)J recombination is highly diverse in generating different antibodies against SARS-CoV-2. Therefore, the interferon-MAPK pathway and TCR- and BCR-produced antibodies play important roles in the COVID-19 immune response. Immune deficiency or immune over-response may result"

text3 = "GEO (P=3.60E-02) and Cordoba ( P =8.02E-03) datasets and confirmed by qPCR ( P =0.001). The most significant SNP, rs3741869 ( P =3.2E-05) in OASIS locus 12p11.21,"

# 加载词典 pre-trained model tokenizer (vocabulary)
EN_VOCAB = './bert-large-cased-vocab.txt'

CN_VOCAB = './vocab.txt'

tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenized_text = tokenizer.tokenize(text2)
print(tokenized_text)

# tokenizer = BasicTokenizer.from_pretrained(VOCAB)
basicTokenizer = BasicTokenizer()
tokenized_text = basicTokenizer.tokenize(text2)
print(tokenized_text)

# tokenizer = WordpieceTokenizer.from_pretrained(VOCAB)
wordpieceTokenizer = WordpieceTokenizer(CN_VOCAB)
tokenized_text = wordpieceTokenizer.tokenize(text2)
print(tokenized_text)

# Mask a token that we will try to predict back with `BertForMaskedLM`
masked_index = 8
tokenized_text[masked_index] = '[MASK]'
# assert tokenized_text == ['[CLS]', 'who', 'was', 'jim', 'henson', '?', '[SEP]', 'jim', '[MASK]', 'was', 'a', 'puppet',
#                           '##eer', '[SEP]']

# 将 token 转为 vocabulary 索引
示例#9
0
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
    """Project the tokenized prediction back to the original text."""

    # When we created the data, we kept track of the alignment between original
    # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
    # now `orig_text` contains the span of our original text corresponding to the
    # span that we predicted.
    #
    # However, `orig_text` may contain extra characters that we don't want in
    # our prediction.
    #
    # For example, let's say:
    #   pred_text = steve smith
    #   orig_text = Steve Smith's
    #
    # We don't want to return `orig_text` because it contains the extra "'s".
    #
    # We don't want to return `pred_text` because it's already been normalized
    # (the SQuAD eval script also does punctuation stripping/lower casing but
    # our tokenizer does additional normalization like stripping accent
    # characters).
    #
    # What we really want to return is "Steve Smith".
    #
    # Therefore, we have to apply a semi-complicated alignment heruistic between
    # `pred_text` and `orig_text` to get a character-to-charcter alignment. This
    # can fail in certain cases in which case we just return `orig_text`.

    def _strip_spaces(text):
        ns_chars = []
        ns_to_s_map = collections.OrderedDict()
        for (i, c) in enumerate(text):
            if c == " ":
                continue
            ns_to_s_map[len(ns_chars)] = i
            ns_chars.append(c)
        ns_text = "".join(ns_chars)
        return (ns_text, ns_to_s_map)

    # We first tokenize `orig_text`, strip whitespace from the result
    # and `pred_text`, and check if they are the same length. If they are
    # NOT the same length, the heuristic has failed. If they are the same
    # length, we assume the characters are one-to-one aligned.
    tokenizer = BasicTokenizer(do_lower_case=do_lower_case)

    tok_text = " ".join(tokenizer.tokenize(orig_text))

    start_position = tok_text.find(pred_text)
    if start_position == -1:
        if verbose_logging:
            logger.info("Unable to find text: '%s' in '%s'" %
                        (pred_text, orig_text))
        return orig_text
    end_position = start_position + len(pred_text) - 1

    (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
    (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)

    if len(orig_ns_text) != len(tok_ns_text):
        if verbose_logging:
            logger.info(
                "Length not equal after stripping spaces: '%s' vs '%s'",
                orig_ns_text, tok_ns_text)
        return orig_text

    # We then project the characters in `pred_text` back to `orig_text` using
    # the character-to-character alignment.
    tok_s_to_ns_map = {}
    for (i, tok_index) in tok_ns_to_s_map.items():
        tok_s_to_ns_map[tok_index] = i

    orig_start_position = None
    if start_position in tok_s_to_ns_map:
        ns_start_position = tok_s_to_ns_map[start_position]
        if ns_start_position in orig_ns_to_s_map:
            orig_start_position = orig_ns_to_s_map[ns_start_position]

    if orig_start_position is None:
        if verbose_logging:
            logger.info("Couldn't map start position")
        return orig_text

    orig_end_position = None
    if end_position in tok_s_to_ns_map:
        ns_end_position = tok_s_to_ns_map[end_position]
        if ns_end_position in orig_ns_to_s_map:
            orig_end_position = orig_ns_to_s_map[ns_end_position]

    if orig_end_position is None:
        if verbose_logging:
            logger.info("Couldn't map end position")
        return orig_text

    output_text = orig_text[orig_start_position:(orig_end_position + 1)]
    return output_text
示例#10
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# Author: Xiaoy LI
# Description:
# mrc_utils.py

import json
import numpy
import numpy as np

# from data_loader.bert_tokenizer import whitespace_tokenize
from pytorch_pretrained_bert import BasicTokenizer

basicTokenizer = BasicTokenizer()


class InputExample(object):
    def __init__(self,
                 qas_id,
                 query_item,
                 context_item,
                 doc_tokens=None,
                 orig_answer_text=None,
                 start_position=None,
                 end_position=None,
                 span_position=None,
                 is_impossible=None,
                 ner_cate=None):
        """
        Desc: