예제 #1
0
class VNNewsDataset(Dataset):
    def __init__(self, data_dir, max_length=150, remove_negative_pair=True):
        super(VNNewsDataset, self).__init__()
        self.data_dir = data_dir
        self.max_length = max_length

        self.sentence_1 = open(os.path.join(self.data_dir, 'Sentences_1.txt'),
                               mode='r',
                               encoding='utf-8-sig').read().split('\n')

        self.sentence_2 = open(os.path.join(self.data_dir, 'Sentences_2.txt'),
                               mode='r',
                               encoding='utf-8-sig').read().split('\n')

        self.labels = open(os.path.join(self.data_dir, 'Labels.txt'),
                           mode='r',
                           encoding='utf-8-sig').read().split('\n')

        self.bpe = fastBPE(BPEConfig)
        self.vocab = Dictionary()
        self.vocab.add_from_file(
            os.path.join(os.getcwd(), '../pretrained',
                         'PhoBERT_base_transformers', 'dict.txt'))
        self.rdr_segmenter = VnCoreNLP(os.path.join('../vncorenlp',
                                                    'VnCoreNLP-1.1.1.jar'),
                                       annotators='wseg',
                                       max_heap_size='-Xmx500m')

        if remove_negative_pair is True:
            self.remove_negative_pair()

    def remove_negative_pair(self):
        self.sentence_1 = [
            sent for idx, sent in enumerate(self.sentence_1)
            if self.labels[idx] == '1'
        ]
        self.sentence_2 = [
            sent for idx, sent in enumerate(self.sentence_2)
            if self.labels[idx] == '1'
        ]

    def encode(self, raw_text):
        line = self.rdr_segmenter.tokenize(raw_text)
        line = ' '.join([' '.join(sent) for sent in line])
        line = re.sub(r' _ ', '_', line)
        subwords = '<s> ' + self.bpe.encode(line) + ' </s>'
        input_ids = self.vocab.encode_line(subwords,
                                           append_eos=False,
                                           add_if_not_exist=False)
        return padding(input_ids, self.max_length)

    def __len__(self):
        assert self.sentence_1.__len__() == self.sentence_2.__len__()
        return self.sentence_1.__len__()

    def __getitem__(self, item):
        sent_1 = self.encode(self.sentence_1[item])
        sent_2 = self.encode(self.sentence_2[item])
        lb = self.labels[item]
        return sent_1, sent_2, lb
예제 #2
0
def simple_usage():
    # Uncomment this line for debugging
    # logging.basicConfig(level=logging.DEBUG)

   
    vncorenlp_file = 'D:\study\PlagismDetector\PlagismDetector/VnCoreNLP/VnCoreNLP-1.1.1.jar'
    
    sentences = 'VTV đồng ý chia sẻ bản quyền World Cup 2018 cho HTV để khai thác. ' \
                'Nhưng cả hai nhà đài đều phải chờ sự đồng ý của FIFA mới thực hiện được điều này.'

    # Use "with ... as" to close the server automatically
    with VnCoreNLP(vncorenlp_file) as vncorenlp:
        print('Tokenizing:', vncorenlp.tokenize(sentences))
        print('POS Tagging:', vncorenlp.pos_tag(sentences))
        print('Named-Entity Recognizing:', vncorenlp.ner(sentences))
        print('Dependency Parsing:', vncorenlp.dep_parse(sentences))
        print('Annotating:', vncorenlp.annotate(sentences))
        print('Language:', vncorenlp.detect_language(sentences))

    # In this way, you have to close the server manually by calling close function
    vncorenlp = VnCoreNLP(vncorenlp_file)

    print('Tokenizing:', vncorenlp.tokenize(sentences))
    print('POS Tagging:', vncorenlp.pos_tag(sentences))
    print('Named-Entity Recognizing:', vncorenlp.ner(sentences))
    print('Dependency Parsing:', vncorenlp.dep_parse(sentences))
    print('Annotating:', vncorenlp.annotate(sentences))
    print('Language:', vncorenlp.detect_language(sentences))

    # Do not forget to close the server
    vncorenlp.close()
예제 #3
0
class VnSegmentNLP:
    def __init__(self, jar_file='./tokenizer/VnCoreNLP-1.1.1.jar'):
        self.annotator = VnCoreNLP(jar_file, annotators="wseg", max_heap_size='-Xmx2g')

    def word_segment(self, inp: str):
        word_segmented_text = self.annotator.tokenize(inp)
        sentences = [' '.join(word) for word in word_segmented_text]
        return ' '.join(sentences)
예제 #4
0
def main(args):

    print(
        "-" * 20,
        "START",
        "-" * 20,
    )
    nlp = args.nlp
    print("Initialize annotator...")
    #Change this to real path of VnCoreNLP file
    annotator = VnCoreNLP(nlp,
                          annotators="wseg,pos,ner,parse",
                          max_heap_size='-Xmx2g')
    DATA_PATH, MODEL_PATH = args.i, args.o

    # Variables
    num_feature = args.nfeature if args.nfeature else 256
    min_word_count = args.mincount if args.mincount else 2
    window_size = args.window if args.window else 2
    num_epochs = args.nepoch if args.nepoch else 50
    num_worker = multiprocessing.cpu_count()

    # Read corpus
    print("Reading data file...")
    raw_data = ut.read(DATA_PATH).split('\n')
    sentences_tokenized = []

    print("Tokenazing...")
    for line in raw_data:
        line = line.lower()
        word_segmented_text = annotator.tokenize(line)
        # f.write("%s\n" % word_segmented_text)
        for tokens in word_segmented_text:
            sentences_tokenized.append(tokens)

    print('Building model...')
    model = w2v.Word2Vec(size=num_feature,
                         min_count=min_word_count,
                         workers=num_worker,
                         window=window_size)

    model.build_vocab(sentences_tokenized)
    print("Vocabularies count is: %d" % len(model.wv.vocab))

    print("Training word2vec...")
    model.train(sentences=sentences_tokenized,
                total_examples=model.corpus_count,
                epochs=num_epochs)

    print('Build model successfully')
    print('Saving model...')
    if not os.path.exists(MODEL_PATH): os.makedirs(MODEL_PATH)
    model.save(os.path.join(MODEL_PATH, 'word2vec.w2v'))

    print('Done')
    return None
예제 #5
0
파일: PhoBert.py 프로젝트: bhieu79/senalign
class PhoBert(EmbeddingModel):
    def __init__(self, device: torch.device):
        # print(os.getcwd())
        self.__device = device
        # print(device)
        self.__rdrsegmenter = VnCoreNLP(VnCoreNLP_JAR_PATH, annotators="wseg", max_heap_size='-Xmx500m')
        self.__tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base")
        self.__model = AutoModel.from_pretrained("vinai/phobert-base", output_hidden_states=True).to(self.__device)

    def embed_text(self, text: str) -> torch.Tensor:
        """
        Tokenize and embed a sentence with PhoBERT
        """

        # print(sentence)
        line = self.__tokenize(text)
        # print(line)
        # mapping words and their ids in vncorenlp dictionary
        # print(self.__tokenizer.encode(line))
        input_ids = torch.tensor([self.__tokenizer.encode(line)])
        # print(input_ids)
        input_ids.to(self.__device)
        with torch.no_grad():
            features = self.__model(input_ids)
        embeddings = self.__to_embedding(features)
        # cleanup
        del input_ids
        return embeddings

    def __tokenize(self, text: str):
        """
        To perform word segmentation
        """
        segments = self.__rdrsegmenter.tokenize(text)
        segmentation = None
        if len(segments) > 1:
            segmentation = " ".join(
                [" ".join(segment) for segment in segments]
            )
        elif len(segments) == 1:
            segmentation = " ".join(segments[0])
        return segmentation

    def __to_embedding(self, features):
        """
        Convert features to sentence embedding
        """
        hidden_states = features[2]
        last_four_layers = [hidden_states[i] for i in (-1, -2, -3, -4)]
        return torch.mean(
            torch.cat(
                tuple(last_four_layers),
                dim=-1
            ),
            dim=1
        ).squeeze()
예제 #6
0
def read_pages(start_page, end_page, doc_file):
    VNCORENLP_FILE_PATH = 'VnCoreNLP/VnCoreNLP-1.1.1.jar'
    vncorenlp = VnCoreNLP(VNCORENLP_FILE_PATH)

    words = []
    doc = pdf2txt(doc_file, range(start_page - 1, end_page))
    for para in doc:
        words.extend(vncorenlp.tokenize(para))

    return words
예제 #7
0
def analyse_data(corpus_path):
    '''
    return output data folder path
    '''

    try:

        vncorenlp_file = r'./VnCoreNLP/VnCoreNLP-1.1.1.jar'
        vncorenlp = VnCoreNLP(vncorenlp_file)
        print('Create VNCoreNLP Object.')

        path = corpus_path.split('/')
        corpus_folder_path = '/'.join(path[:-1])
        corpus_filename = path[-1]
        print("corpus folder: %s" % corpus_folder_path)
        print("corpus filename: %s" % corpus_filename)
        output_data_folder_path = corpus_folder_path + '/output-data/'
        if not os.path.exists(output_data_folder_path):
            os.makedirs(output_data_folder_path)
            print("Created %s folder" % output_data_folder_path)

        fi = open(corpus_path, 'r')
        fo_token = open(output_data_folder_path + corpus_filename + '-token',
                        'w')
        print("Open %s" % corpus_path)
        print("Open %s" % fo_token.name)

        line_number = 0
        for line in fi:
            line_number += 1
        fi.close()
        fi = open(corpus_path, 'r')
        print('We have %d in our corpus.' % line_number)

        for count in tqdm(range(line_number)):

            sentences = fi.readline()
            fo_token.write(str(vncorenlp.tokenize(sentences)) + '\n')

        print('Finish analysis data.')

    except Exception as e:
        raise
    finally:
        fi.close()
        fo_token.close()

        print("Close %s" % corpus_path)
        print("Close %s" % fo_token.name)

    return output_data_folder_path
class VnCoreTokenizer():
    def __init__(
        self,
        path="/home/thanh/DATN/FakeNewDetection/vncorenlp/VnCoreNLP-1.1.1.jar"
    ):
        self.rdrsegmenter = VnCoreNLP(path,
                                      annotators="wseg",
                                      max_heap_size='-Xmx500m')

    def tokenize(self, text: str) -> str:
        sentences = self.rdrsegmenter.tokenize(text)
        output = ""
        for sentence in sentences:
            output += " ".join(sentence)
        return output
예제 #9
0
def Main():
    vncorenlp_file = VNCORENLP_FILE_PATH
    vncorenlp = VnCoreNLP(vncorenlp_file)
    
    f = open(TEXT_FILE_PATH, 'r', encoding='utf-8')
    text = f.read()
    f.close()

    tokenize = vncorenlp.tokenize(text)
    words, len = total_words_and_len(tokenize, punc, stopwords)

    tf = TF(words, len)
    idf = IDF(words, tokenize)
    tfidf = TFIDF(tf, idf)

    N = 20
    print(get_top(tfidf, N))
예제 #10
0
class PhoBertTokenizer:
    def __init__(self, vncore=True):
        """
        Hacky way to run VnCoreNLP tokenizer with PhoBERT
        :param vncore: Set it to `False` if your sentences are already tokenized by VnCoreNLP
        """
        self.dictionary = Dictionary.load(open(DICT_PATH))
        self.annotator = None
        self.vncore = vncore
        self.bpe = fastBPE(args)
        
    def convert_tokens_to_ids(self, text_spans_bpe):
        return self.dictionary.encode_line(
            '<s> ' + text_spans_bpe + ' </s>',
            append_eos=False,
            add_if_not_exist=False)
    
    def tokenize(self, raw_sentence: str):
        if self.vncore:
            if self.annotator is None:
                self.annotator = VnCoreNLP(VNCORENLP_ADDRESS, port=VNCORENLP_PORT)
            word_tokenizes = ' '.join(sum(self.annotator.tokenize(raw_sentence), []))
        else:
            word_tokenizes = raw_sentence
        return self.bpe.encode(word_tokenizes)
    
    def encode(self, raw_sentence: str):
        return self.convert_tokens_to_ids(self.tokenize(raw_sentence)).long()
    
    def decode(self, tokens: torch.LongTensor, remove_underscore=True):
        assert tokens.dim() == 1
        tokens = tokens.numpy()
        if tokens[0] == self.dictionary.bos():
            tokens = tokens[1:]  # remove <s>
        eos_mask = (tokens == self.dictionary.eos())
        doc_mask = eos_mask[1:] & eos_mask[:-1]
        sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)
        if remove_underscore:
            sentences = [self.bpe.decode(self.dictionary.string(s)).replace("_", " ") for s in sentences]
        else:
            sentences = [self.bpe.decode(self.dictionary.string(s)) for s in sentences]
            
        if len(sentences) == 1:
            return sentences[0]
        return sentences
예제 #11
0
def vn_format_to_json(args):
    stories_dir = os.path.abspath(args.raw_path)
    tokenized_stories_dir = os.path.abspath(args.save_path)

    print("Preparing to tokenize %s to %s..." % (stories_dir, tokenized_stories_dir))
    stories = glob.glob(pjoin(args.raw_path, '*.txt'))
    annotator = VnCoreNLP("./vncorenlp/VnCoreNLP-1.1.1.jar", annotators="wseg", max_heap_size='-Xmx500m')

    dataset = []
    for s in stories:
        tgt = []
        source = []
        flag = False
        f = open(pjoin(stories_dir, s), encoding='utf-8')
        for line in f:
            if line == '\n':
                continue
            if line == '@highlight\n':
                flag = True
                continue
            tokens = annotator.tokenize(line)
            if flag:
                tgt.extend(tokens)
            else:
                source = tokens
        dataset.append({"src": [clean(' '.join(sent)).split() for sent in source],
                        "tgt": [clean(' '.join(sent)).split() for sent in tgt]})

    print("Tokenizing %i files in %s" % (len(stories), stories_dir))
    print("VNCoreNLP Tokenizer has finished.")

    valid_test_ratio = 0.1
    all_size = len(dataset)
    test_sets = dataset[:int(all_size * valid_test_ratio)]
    valid_sets = dataset[int(all_size * valid_test_ratio):int(all_size * valid_test_ratio * 2)]
    train_sets = dataset[int(all_size * valid_test_ratio * 2):]
    corpora = {'train': train_sets, 'valid': valid_sets, 'test': test_sets}
    for corpus_type in ['train', 'valid', 'test']:
        p_ct = 0
        for split in [corpora[corpus_type][i * args.shard_size:(i + 1) * args.shard_size] for i in range((len(corpora[corpus_type]) + args.shard_size - 1) // args.shard_size)]:
            pt_file = pjoin(args.save_path, corpus_type + '.' + str(p_ct) + '.json')
            with codecs.open(pt_file, 'w', encoding='utf-8') as save:
                json.dump(split, save, ensure_ascii=False)
            p_ct += 1
예제 #12
0
class VnCoreTokenizer:
    def __init__(self, path="vncorenlp/VnCoreNLP-1.1.1.jar"):
        self.rdrsegmenter = VnCoreNLP(path,
                                      annotators="wseg",
                                      max_heap_size="-Xmx500m")

    def tokenize(self, text: str, return_sentences=False) -> str:
        sentences = self.rdrsegmenter.tokenize(text)
        if return_sentences:
            return [" ".join(sentence) for sentence in sentences]
        # print(sentences)
        output = ""
        for sentence in sentences:
            output += " ".join(sentence) + " "

        return self._strip_white_space(output)

    def _strip_white_space(self, text):
        text = re.sub("\n+", "\n", text).strip()
        text = re.sub(" +", " ", text).strip()
        return text
예제 #13
0
class PhoBertTokenizer:
    def __init__(self, max_length=512):
        self.bpe = fastBPE(BPEConfig)
        self.vocab = Dictionary()
        self.vocab.add_from_file(os.path.join(os.getcwd(),
                                              'pretrained',
                                              'PhoBERT_base_transformers',
                                              'dict.txt'))
        self.rdr_segmenter = VnCoreNLP(
            os.path.join('vncorenlp', 'VnCoreNLP-1.1.1.jar'),
            annotators='wseg',
            max_heap_size='-Xmx500m'
        )
        self.max_length = max_length

    def __call__(self, x):
        line = self.rdr_segmenter.tokenize(x)
        line = ' '.join([' '.join(sent) for sent in line])
        line = re.sub(r' _ ', '_', line)
        subwords = '<s> ' + self.bpe.encode(line) + ' </s>'
        input_ids = self.vocab.encode_line(subwords, append_eos=False, add_if_not_exist=False)
        return padding(input_ids, self.max_length)
예제 #14
0
def nlp_tokenize(path):
    data = pd.read_excel(path)
    data = data[['ID', 'Content', 'ID người đăng']]
    data = data.dropna()
    data['Content'] = data['Content'].str.strip()
    data['Content'] = data['Content'].str.lower()
    data['status'] = data['Content']

    for i in range(len(data['status'])):
        data['status'].iloc[i] = re.sub('\W+', ' ', data['Content'].iloc[i])
        data['Content'].iloc[i] = data['status'].iloc[i]
    vncorenlp_file = r'VnCoreNLP/VnCoreNLP-1.1.1.jar'
    vncorenlp = VnCoreNLP(vncorenlp_file)
    # content = vncorenlp.tokenize(content)
    for i in range(len(data['status'])):
        data['status'].iloc[i] = vncorenlp.tokenize(data['status'].iloc[i])
    key_word = []
    for i in data['status']:
        key_word = key_word + i

    vncorenlp.close()
    return key_word, data[['Content', 'ID', 'ID người đăng']]
예제 #15
0
class VietnameseTokenizer(WordTokenizer):
    """
    Simple and fast white-space tokenizer. Splits sentence based on white spaces.
    Punctuation are stripped from tokens.
    """
    def __init__(self,
                 vocab: Iterable[str] = [],
                 stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
                 do_lower_case: bool = False,
                 vncorenlp_path=None):
        self.stop_words = set(stop_words)
        self.do_lower_case = do_lower_case
        self.set_vocab(vocab)
        self.vncorenlp_path = vncorenlp_path
        self.rdrsegmenter = VnCoreNLP(vncorenlp_path,
                                      annotators="wseg",
                                      max_heap_size='-Xmx1g')

    def get_vocab(self):
        return self.vocab

    def set_vocab(self, vocab: Iterable[str]):
        self.vocab = vocab
        self.word2idx = collections.OrderedDict([
            (word, idx) for idx, word in enumerate(vocab)
        ])

    def segment(self, text: str) -> str:
        ''' Segment words in text and then flat the list '''
        segmented_word = self.rdrsegmenter.tokenize(text)
        return ' '.join(reduce(operator.concat, segmented_word))

    def tokenize(self, text: str) -> List[int]:
        #segment words in text
        text = self.segment(text)

        if self.do_lower_case:
            text = text.lower()

        tokens = text.split()

        tokens_filtered = []
        for token in tokens:
            if token in self.stop_words:
                continue
            elif token in self.word2idx:
                tokens_filtered.append(self.word2idx[token])
                continue

            token = token.strip(string.punctuation)
            if token in self.stop_words:
                continue
            elif len(token) > 0 and token in self.word2idx:
                tokens_filtered.append(self.word2idx[token])
                continue

            token = token.lower()
            if token in self.stop_words:
                continue
            elif token in self.word2idx:
                tokens_filtered.append(self.word2idx[token])
                continue
            tokens_filtered.append(0)
        return tokens_filtered

    def save(self, output_path: str):
        with open(os.path.join(output_path, 'VietnameseTokenizer_config.json'),
                  'w') as fOut:
            json.dump(
                {
                    'vocab': list(self.word2idx.keys()),
                    'stop_words': list(self.stop_words),
                    'do_lower_case': self.do_lower_case,
                    'vncorenlp_path': self.vncorenlp_path
                }, fOut)

    @staticmethod
    def load(input_path: str):
        with open(os.path.join(input_path, 'VietnameseTokenizer_config.json'),
                  'r') as fIn:
            config = json.load(fIn)

        return VietnameseTokenizer(**config)
예제 #16
0
os.system("split -l 10000 './corpus.2M.shuf.txt' 'vi_data/vi'")

from os import listdir
from os.path import isfile, join
_path = 'vi_data'
link = [join(_path, f) for f in listdir(_path) if isfile(join(_path, f))]

os.system("pip install vncorenlp")
from vncorenlp import VnCoreNLP
os.system(
    "wget 'https://github.com/vncorenlp/VnCoreNLP/archive/v1.1.1.zip' -O ./models.$$ && unzip -o ./models.$$ && rm -r ./models.$$."
)
from vncorenlp import VnCoreNLP
segmentNLP = VnCoreNLP('./VnCoreNLP-1.1.1/VnCoreNLP-1.1.1.jar',
                       port=9001,
                       annotators="wseg,pos,ner,parse",
                       quiet=False)

f_w = open('corpus.tokened.2M.shuf.txt', 'w', encoding='utf8')
for file in tqdm(link):
    f = open(file, 'r', encoding='utf8')
    a = f.read().strip().split('\n')
    for i in a:
        s = ''
        tmp = segmentNLP.tokenize(i)
        for j in tmp:
            s += ' '.join(j) + ' '
        f_w.write(s + '\n')
    f.close()
f_w.close()
예제 #17
0
list_dir = os.listdir(
    "/Users/ntdat/Tài liệu/Nghiên Cứu Khoa Học/DataCrawl_TuoiTre/Details/")
details_dir = "/Users/ntdat/Tài liệu/Nghiên Cứu Khoa Học/DataCrawl_TuoiTre/Details/"
descriptions_dir = "/Users/ntdat/Tài liệu/Nghiên Cứu Khoa Học/DataCrawl_TuoiTre/Descriptions/"
count = 0
for dir in list_dir:
    try:
        print(dir)
        save_file = open(
            "/Users/ntdat/Tài liệu/Nghiên Cứu Khoa Học/DataCrawl_TuoiTre/RawDataForBertSum/"
            + str(count) + ".story", "w")
        r_file = open(details_dir + dir, "r")
        lines = r_file.readlines()
        content_body = " ".join(lines)
        sentents = rdrsegmenter.tokenize(content_body)
        content_body = ""
        for s in sentents:
            content_body += " ".join(s) + "\n"

        r_file = open(descriptions_dir + dir, "r")
        lines = r_file.readlines()
        content_summ = ""
        for line in lines:
            if line[-2].isalpha():
                if line[-1] == " ":
                    line[-1] = "."
                else:
                    line += "."
            content_summ += line + " "
        content_summ = content_summ.replace("\n", " ")
예제 #18
0
if torch.cuda.device_count():
    print(f"Training using {torch.cuda.device_count()} gpus")
    model_bert = nn.DataParallel(model_bert)
    tsfm = model_bert.module.roberta
else:
    tsfm = model_bert.roberta

# Load the dictionary  
vocab = Dictionary()
vocab.add_from_file(args.dict_path)

# Load training data
train_df = pd.read_csv(args.train_path,sep='\t').fillna("###")
train_df.text = train_df.text.progress_apply(
    lambda x: ' '.join(
        [' '.join(sent) for sent in rdrsegmenter.tokenize(x)]
    )
)

y = train_df.label.values
X_train = convert_lines(
    train_df,
    vocab,
    bpe,
    args.max_sequence_length
)
print(y)
print(X_train)

# Creating optimizer and lr schedulers
param_optimizer = list(model_bert.named_parameters())
예제 #19
0
    for i, label in enumerate(labels)
}

label2query = {
    'LOC':
    "thực thể địa danh bao gồm tên gọi các hành tinh, thực thể tự nhiên, địa lí lịch sử, vùng quần cư, công trình kiến trúc xây dụng, địa điểm, địa chỉ",
    'PER':
    "thực thể người bao gồm tên, tên đệm và họ của người, tên động vật, nhân vật hư cấu, bí danh",
    'ORG':
    "thực thể tổ chức bao gồm các cơ quan chính phủ, công ty, thương hiệu, tổ chức chính trị, ấn phẩm, tổ chức công cộng",
    'MISC':
    "thực thể bao gồm quốc tịch, ngôn ngữ, môn học, danh hiệu, cuộc thi",
}

for k in label2query.keys():
    label2query[k] = ' '.join(sum(annotator.tokenize(label2query[k]), []))


def process_sentence(context: str, positions: dict, sample_idx):
    res = []
    for label in entity_labels:
        poses = positions.get(label, [])
        new_poses = [(s, e - 1) for s, e in poses]
        label = map2true_labels.get(label, label)
        qas_id = f'{sample_idx}.{label2idx[label]}'
        start_position, end_position = zip(
            *new_poses) if len(new_poses) > 0 else ([], [])
        span_position = [f"{s};{e}" for s, e in new_poses]
        res.append({
            'qas_id': qas_id,
            'context': context,
예제 #20
0
    "/hdd/Zalo_Team_HoaChan/resources/VnCoreNLP/VnCoreNLP-1.1.1.jar",
    annotators="wseg,pos,ner,parse",
    max_heap_size='-Xmx2g')

# To perform word segmentation, POS tagging and then NER
# annotator = VnCoreNLP("<FULL-PATH-to-VnCoreNLP-jar-file>", annotators="wseg,pos,ner", max_heap_size='-Xmx2g')
# To perform word segmentation and then POS tagging
# annotator = VnCoreNLP("<FULL-PATH-to-VnCoreNLP-jar-file>", annotators="wseg,pos", max_heap_size='-Xmx2g')
# To perform word segmentation only
# annotator = VnCoreNLP("<FULL-PATH-to-VnCoreNLP-jar-file>", annotators="wseg", max_heap_size='-Xmx500m')

# Input
text = "Tuy vậy, 1 phút sau, Chelsea đã cụ thể hóa sức ép liên tục bằng bàn thắng. Người lập công cho The Blues là đội trưởng Cahill với cú đánh đầu cận thành từ tình huống phạt góc."

# To perform word segmentation, POS tagging, NER and then dependency parsing
annotated_text = annotator.annotate(text)

# To perform word segmentation only
word_segmented_text = annotator.tokenize(text)

print('annotated_text: ', annotated_text)
print('word_segmented_text: ', word_segmented_text)

{
    'index': 7,
    'form': 'Cahill',
    'posTag': 'Np',
    'nerLabel': 'B-PER',
    'head': 6,
    'depLabel': 'nmod'
}
예제 #21
0
파일: infer.py 프로젝트: tda1312/NLP_zalo
model_bert = RobertaForAIViVN.from_pretrained(args.pretrained_path, config=config)
model_bert.cuda()

# Load the dictionary  
vocab = Dictionary()
vocab.add_from_file(args.dict_path)

if torch.cuda.device_count():
    print(f"Testing using {torch.cuda.device_count()} gpus")
    model_bert = nn.DataParallel(model_bert)
    tsfm = model_bert.module.roberta
else:
    tsfm = model_bert.roberta

test_df = pd.read_csv(args.test_path,sep='\t').fillna("###")
test_df.text = test_df.text.progress_apply(lambda x: ' '.join([' '.join(sent) for sent in rdrsegmenter.tokenize(x)]))
X_test = convert_lines(test_df, vocab, bpe,args.max_sequence_length)

preds_en = []
for fold in range(1):
    print(f"Predicting for fold {fold}")
    preds_fold = []
    #model_bert.load_state_dict(torch.load(os.path.join(args.ckpt_path, f"model_{fold}.bin")))
    model_bert.load_state_dict(torch.load(os.path.join(args.ckpt_path, f"model_0.bin")))
    test_dataset = torch.utils.data.TensorDataset(torch.tensor(X_test,dtype=torch.long))
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
    model_bert.eval()
    pbar = tqdm(enumerate(test_loader),total=len(test_loader),leave=False)
    for i, (x_batch,) in pbar:
        y_pred = model_bert(x_batch.cuda(), attention_mask=(x_batch>0).cuda())
        y_pred = y_pred.detach().cpu().numpy()
def dataloader_from_text(text_file=None,
                         tokenizer=None,
                         classes=[],
                         savetodisk=None,
                         loadformdisk=None,
                         segment=False,
                         max_len=256,
                         batch_size=16,
                         infer=False):
    ids_padded, masks, labels = [], [], []
    if loadformdisk == None:
        #segementer
        if segment:
            rdrsegmenter = VnCoreNLP("./vncorenlp/VnCoreNLP-1.1.1.jar",
                                     annotators="wseg",
                                     max_heap_size='-Xmx500m')
        texts = []
        print("LOADDING TEXT FILE")
        with open(text_file, 'r') as f_r:
            for sample in tqdm(f_r):
                if infer:
                    text = sample.strip()
                    if segment:
                        text = rdrsegmenter.tokenize(text)
                        text = ' '.join([' '.join(x) for x in text])
                    texts.append(text)
                else:
                    splits = sample.strip().split(" ", 1)
                    label = classes.index(splits[0])
                    text = splits[1]
                    if segment:
                        text = rdrsegmenter.tokenize(text)
                        text = ' '.join([' '.join(x) for x in text])
                    labels.append(label)
                    texts.append(text)

        print("TEXT TO IDS")
        ids = []
        for text in tqdm(texts):
            encoded_sent = tokenizer.encode(text)
            ids.append(encoded_sent)

        del texts
        # print("PADDING IDS")
        ids_padded = pad_sequences(ids,
                                   maxlen=max_len,
                                   dtype="long",
                                   value=0,
                                   truncating="post",
                                   padding="post")
        del ids
        # print("CREATE MASK")
        # for sent in tqdm(ids_padded):
        #     masks.append(make_mask(sent))

        if savetodisk != None and not infer:
            with open(savetodisk, 'wb') as f:
                pickle.dump(ids_padded, f)
                # pickle.dump(masks, f)
                pickle.dump(labels, f)
            print("SAVED IDS DATA TO DISK")
    else:
        print("LOAD FORM DISK")
        if loadformdisk != None:
            try:
                with open(savetodisk, 'rb') as f:
                    ids_padded = pickle.load(ids_padded, f)
                    # masks = pickle.load(masks, f)
                    labels = pickle.load(labels, f)
                print("LOADED IDS DATA FORM DISK")
            except:
                print("LOAD DATA FORM DISK ERROR!")

    print("CONVERT TO TORCH TENSOR")
    ids_inputs = torch.tensor(ids_padded)
    del ids_padded
    # masks = torch.tensor(masks)
    if not infer:
        labels = torch.tensor(labels)

    print("CREATE DATALOADER")
    if infer:
        # input_data = TensorDataset(ids_inputs, masks)
        input_data = TensorDataset(ids_inputs)
    else:
        input_data = TensorDataset(ids_inputs, labels)
        # input_data = TensorDataset(ids_inputs, masks, labels)
    input_sampler = SequentialSampler(input_data)
    dataloader = DataLoader(input_data,
                            sampler=input_sampler,
                            batch_size=batch_size)

    print("len dataloader:", len(dataloader))
    print("LOAD DATA ALL DONE")
    return dataloader
예제 #23
0
 ]:
     try:
         if sent.replace('.', '') in temp:
             t = list(
                 filter(
                     lambda x:
                     (x['text'].replace('.', '') == sent.replace(
                         '.', '')), dict_))[0]
             t_ = {
                 'event_type':
                 t['event_type'],
                 'event_id':
                 t['event_id'],
                 'text':
                 ' '.join([
                     ' '.join(u) for u in annotator.tokenize(t['text'])
                 ]),
                 'event_label': [0] * 6
             }
             if 'goal_info' in t['event_type']:
                 t_['event_label'][1] = 1
             if 'match_info' in t['event_type']:
                 t_['event_label'][2] = 1
             if 'match_result' in t['event_type']:
                 t_['event_label'][3] = 1
             if 'card_info' in t['event_type']:
                 t_['event_label'][4] = 1
             if 'substitution' in t['event_type']:
                 t_['event_label'][5] = 1
             list_.append(t_)
         else:
예제 #24
0
from vncorenlp import VnCoreNLP
rdrsegmenter = VnCoreNLP("./VnCoreNLP-1.1.1.jar",
                         annotators="wseg", max_heap_size='-Xmx500m')

# Input
text = """Sau thành công ở các phim Giã Từ Dĩ Vãng, Đồng Tiền Xương Máu, Lục Vân Tiên .. và đặc biệt là vai cô Dần trong  phim điện ảnh Áo Lụa Hà Đông, Trương Ngọc Ánh dường như “quên” hẳn chốn phim trường, cô dành thời gian cho bé gái Bảo Tiên và ông xã Trần Bảo Sơn. Vai diễn mới nhất của cô trong bộ phim truyền hình Tình Yêu Và Tham Vọng dài 80 tập, hứa hẹn những bất ngờ và sự thay đổi lớn.
Với "Tình Yêu Và Tham Vọng", Trương Ngọc Ánh đã mất một tháng để đọc đi đọc lại kịch bản, trao đổi với đạo diễn, quyết định nhận vai và trở lại với màn ảnh nhỏ. Bởi nhân vật chính trong phim, ngoài cái tên Ánh như một sự trùng hợp thú vị, thì những trải nghiệm của cô ấy cũng gần như là những gì chị đã nhìn thấy, đã cảm nhận, đã trải qua. Phim được Việt hóa kịch bản từ kịch bản cùng tên của Hàn Quốc.
Cô gái Nguyễn Thị Ánh quê ở miền Trung lên thành phố làm diễn viên, nhanh chóng có được hào quang nhưng kèm theo đó là nhiều thị phi, ganh ghét. Ðây cũng là lần đầu tiên Trương Ngọc Ánh sẽ "sống" hết cuộc đời của nhân vật suốt từ khi cô ấy 18 tuổi cho đến lúc đã thành người đàn bà trên 50.
Tính cách của nhân vật lần này cũng khác, ẩn chứa bên trong vẻ ngoài mạnh mẽ, bất cần sẽ là một tâm hồn cô đơn, một trái tim mềm yếu và cần được che chở.
Phim sẽ được quay trong sáu tháng tại Phan Rang, Bình Dương và TP.HCM. Ðể chuẩn bị cho sự trở lại với "tình yêu sâu đậm" trong sáu tháng tới, hiện Trương Ngọc Ánh đã sắp xếp công việc kinh doanh, đưa mẹ vào Sài Gòn để giúp trông cháu, tranh thủ đưa cả gia đình đi chơi biển để "đền bù" trước. Có một điều chị hơi băn khoăn là nhân vật lần này nghiện rượu nặng, phải diễn thế nào cho đạt sẽ là một thử thách."""
# print(text)

# To perform word (and sentence) segmentation
sentences = rdrsegmenter.tokenize(text)
for sentence in sentences:
    print(" ".join(sentence))
예제 #25
0
class PhoTokenizer(object):
    """ Process input for PhoBERT

        Segment words and then convert them into ids
    """

    model_input_names: List[str] = ["attention_mask"]

    model_max_length: int = 258
    padding_side: str = "right"
    _pad_token_type_id: int = 0
    

    @property
    def pad_token_type_id(self):
        """ Id of the padding token type in the vocabulary."""
        return self._pad_token_type_id

    @property
    def bos_token_id(self):
        """ Id of the beginning of sentence token in the vocabulary."""
        return self.vocab.bos_index

    @property
    def eos_token_id(self):
        """ Id of the end of sentence token in the vocabulary."""
        return self.vocab.eos_index

    @property
    def unk_token_id(self):
        """ Id of the unknown token in the vocabulary."""
        return self.vocab.unk_index

    @property
    def sep_token_id(self):
        """ Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence."""
        return self.vocab.eos_index

    @property
    def pad_token_id(self):
        """ Id of the padding token in the vocabulary."""
        return self.vocab.pad_index

    @property
    def cls_token_id(self):
        """ Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model."""
        return self.vocab.bos_index

    def __init__(self, bpe_path: str, vncorenlp_path: str, do_lower_case: bool = False):
        bpe_codes_path = os.path.join(bpe_path, BPECODE_FILE)
        vocab_file_path = os.path.join(bpe_path, VOCAB_FILE)
        
        if not os.path.isfile(bpe_codes_path):
            raise EnvironmentError(f"{BPECODE_FILE} not found in {bpe_path}")
            
        if not os.path.isfile(vocab_file_path):
            raise EnvironmentError(f"{VOCAB_FILE} not found in {bpe_path}")

        self.do_lower_case = do_lower_case
        
        BPEConfig = namedtuple('BPEConfig', 'vncorenlp bpe_codes vocab')

        self.pho_config = BPEConfig(vncorenlp=vncorenlp_path, bpe_codes=bpe_codes_path, vocab=vocab_file_path)
        self.rdrsegmenter = VnCoreNLP(self.pho_config.vncorenlp, annotators="wseg", max_heap_size='-Xmx1g')
        self.bpe = fastBPE(self.pho_config)
        self.vocab = Dictionary()
        self.vocab.add_from_file(self.pho_config.vocab)

    @staticmethod
    def load(model_path: str, **kwargs):
        config_path = os.path.join(model_path, CONFIG_FILE)
        config = {}
        if os.path.exists(config_path):
            with open(config_path, 'r') as fIn:
                config = json.load(fIn)
        elif len(kwargs) == 0:
            raise EnvironmentError("{CONFIG_FILE} not found. Please initialize model instead of using load method.")
        
        config.update(kwargs)
        
        return PhoTokenizer(**config)

    def save(self, output_path: str):
        with open(os.path.join(output_path, CONFIG_FILE), 'w') as fOut:
            json.dump({'bpe_path': self.pho_config.vocab.replace(VOCAB_FILE, ''), 'vncorenlp_path': self.pho_config.vncorenlp, 'do_lower_case': self.do_lower_case}, fOut)

    def segment(self, text: str) -> str:
        ''' Segment words in text and then flat the list '''
        segmented_word = self.rdrsegmenter.tokenize(text)
        return ' '.join(reduce(operator.concat, segmented_word))
        
    def convert_tokens_to_ids(self, text: str) -> List[int]:
        return self.vocab.encode_line(text, append_eos=False, add_if_not_exist=False).tolist()

    def tokenize(self, text: str) -> str:
        if self.do_lower_case:
            map(str.lower, text)

        sent = self.segment(text)
        return self.bpe.encode(sent)
    
    # def encode(self, text: str) -> List[int]:
    #     return self.convert_tokens_to_ids(self.tokenize(text))
         

    def prepare_for_model(
        self,
        ids: List[int],
        pair_ids: Optional[List[int]] = None,
        max_length: Optional[int] = None,
        add_special_tokens: bool = True,
        stride: int = 0,
        truncation_strategy: str = "longest_first",
        pad_to_max_length: bool = False,
        return_tensors: Optional[str] = None,
        return_token_type_ids: Optional[bool] = None,
        return_attention_mask: Optional[bool] = None,
        return_overflowing_tokens: bool = False,
        return_special_tokens_mask: bool = False,
        return_lengths: bool = False,
    ) -> BatchEncoding:
        """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
        It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
        manages a moving window (with user defined stride) for overflowing tokens

        Args:
            ids: list of tokenized input ids. Can be obtained from a string by chaining the
                `tokenize` and `convert_tokens_to_ids` methods.
            pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
                `tokenize` and `convert_tokens_to_ids` methods.
            max_length: maximum length of the returned list. Will truncate by taking into account the special tokens.
            add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
                to their model.
            stride: window stride for overflowing tokens. Can be useful to remove edge effect when using sequential
                list of inputs. The overflowing token will contains a part of the previous window of tokens.
            truncation_strategy: string selected in the following options:
                - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
                    starting from the longest one at each token (when there is a pair of input sequences)
                - 'only_first': Only truncate the first sequence
                - 'only_second': Only truncate the second sequence
                - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
            pad_to_max_length: if set to True, the returned sequences will be padded according to the model's padding side and
                padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length.
                The tokenizer padding sides are handled by the following strings:
                - 'left': pads on the left of the sequences
                - 'right': pads on the right of the sequences
                Defaults to False: no padding.
            return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
                or PyTorch torch.Tensor instead of a list of python integers.
            return_token_type_ids: (optional) Set to False to avoid returning token_type_ids (default: set to model specifics).
            return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics)
            return_overflowing_tokens: (optional) Set to True to return overflowing token information (default False).
            return_special_tokens_mask: (optional) Set to True to return special tokens mask information (default False).
            return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`):
                If set the resulting dictionary will include the length of each encoded inputs

        Return:
            A Dictionary of shape::

                {
                    input_ids: list[int],
                    token_type_ids: list[int] if return_token_type_ids is True (default)
                    overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
                    num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
                    special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
                    length: int if return_lengths is True
                }

            With the fields:
                - ``input_ids``: list of token ids to be fed to a model
                - ``token_type_ids``: list of token type ids to be fed to a model

                - ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
                - ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
                - ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
                    tokens and 1 specifying sequence tokens.
                - ``length``: this is the length of ``input_ids``
        """
        pair = bool(pair_ids is not None)
        len_ids = len(ids)
        len_pair_ids = len(pair_ids) if pair else 0

        # Load from model defaults
        if return_token_type_ids is None:
            return_token_type_ids = "token_type_ids" in self.model_input_names
        if return_attention_mask is None:
            return_attention_mask = "attention_mask" in self.model_input_names

        encoded_inputs = {}

        # Truncation: Handle max sequence length
        total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
        if max_length and total_len > max_length:
            ids, pair_ids, overflowing_tokens = self.truncate_sequences(
                ids,
                pair_ids=pair_ids,
                num_tokens_to_remove=total_len - max_length,
                truncation_strategy=truncation_strategy,
                stride=stride,
            )
            if return_overflowing_tokens:
                encoded_inputs["overflowing_tokens"] = overflowing_tokens
                encoded_inputs["num_truncated_tokens"] = total_len - max_length

        # Add special tokens
        if add_special_tokens:
            sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
            token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
        else:
            sequence = ids + pair_ids if pair else ids
            token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])

        # Build output dictionnary
        encoded_inputs["input_ids"] = sequence
        if return_token_type_ids:
            encoded_inputs["token_type_ids"] = token_type_ids
        if return_special_tokens_mask:
            if add_special_tokens:
                encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
            else:
                encoded_inputs["special_tokens_mask"] = [0] * len(sequence)

        # Check lengths
        assert max_length is None or len(encoded_inputs["input_ids"]) <= max_length
        if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length:
            logger.warning(
                "Token indices sequence length is longer than the specified maximum sequence length "
                "for this model ({} > {}). Running this sequence through the model will result in "
                "indexing errors".format(len(ids), self.model_max_length)
            )

        # Padding
        needs_to_be_padded = pad_to_max_length and (
            max_length
            and len(encoded_inputs["input_ids"]) < max_length
            or max_length is None
            and len(encoded_inputs["input_ids"]) < self.model_max_length
            and self.model_max_length <= LARGE_INTEGER
        )

        if pad_to_max_length and max_length is None and self.model_max_length > LARGE_INTEGER:
            logger.warning(
                "Sequence can't be padded as no maximum length is specified and the model maximum length is too high."
            )

        if needs_to_be_padded:
            difference = (max_length if max_length is not None else self.model_max_length) - len(
                encoded_inputs["input_ids"]
            )
            if self.padding_side == "right":
                if return_attention_mask:
                    encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) + [0] * difference
                if return_token_type_ids:
                    encoded_inputs["token_type_ids"] = (
                        encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
                    )
                if return_special_tokens_mask:
                    encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
                encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
            elif self.padding_side == "left":
                if return_attention_mask:
                    encoded_inputs["attention_mask"] = [0] * difference + [1] * len(encoded_inputs["input_ids"])
                if return_token_type_ids:
                    encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
                        "token_type_ids"
                    ]
                if return_special_tokens_mask:
                    encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
                encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
            else:
                raise ValueError("Invalid padding strategy:" + str(self.padding_side))
        else:
            if return_attention_mask:
                encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])

        if return_lengths:
            encoded_inputs["length"] = len(encoded_inputs["input_ids"])

        # Prepare model inputs as tensors if asked
        if return_tensors == "pt":
            encoded_inputs["input_ids"] = torch.tensor([encoded_inputs["input_ids"]])

            if "token_type_ids" in encoded_inputs:
                encoded_inputs["token_type_ids"] = torch.tensor([encoded_inputs["token_type_ids"]])

            if "attention_mask" in encoded_inputs:
                encoded_inputs["attention_mask"] = torch.tensor([encoded_inputs["attention_mask"]])
        elif return_tensors is not None:
            logger.warning(
                "Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(
                    return_tensors
                )
            )

        return BatchEncoding(encoded_inputs)

    def num_special_tokens_to_add(self, pair=False):
        """
        Returns the number of added tokens when encoding a sequence with special tokens.

        Note:
            This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
            inside your training loop.

        Args:
            pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
                number of added tokens in the case of a single sequence if set to False.

        Returns:
            Number of tokens added to sequences
        """
        token_ids_0 = []
        token_ids_1 = []
        return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))

    def build_inputs_with_special_tokens(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks
        by concatenating and adding special tokens.
        A RoBERTa sequence has the following format:

        - single sequence: ``<s> X </s>``
        - pair of sequences: ``<s> A </s></s> B </s>``

        Args:
            token_ids_0 (:obj:`List[int]`):
                List of IDs to which the special tokens will be added
            token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
                Optional second list of IDs for sequence pairs.

        Returns:
            :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
        """
        if token_ids_1 is None:
            return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
        cls = [self.cls_token_id]
        sep = [self.sep_token_id]
        return cls + token_ids_0 + sep + sep + token_ids_1 + sep


    def truncate_sequences(
        self,
        ids: List[int],
        pair_ids: Optional[List[int]] = None,
        num_tokens_to_remove: int = 0,
        truncation_strategy: str = "longest_first",
        stride: int = 0,
    ) -> Tuple[List[int], List[int], List[int]]:
        """ Truncates a sequence pair in place to the maximum length.

        Args:
            ids: list of tokenized input ids. Can be obtained from a string by chaining the
                `tokenize` and `convert_tokens_to_ids` methods.
            pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
                `tokenize` and `convert_tokens_to_ids` methods.
            num_tokens_to_remove (:obj:`int`, `optional`, defaults to ``0``):
                number of tokens to remove using the truncation strategy
            truncation_strategy: string selected in the following options:
                - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
                    starting from the longest one at each token (when there is a pair of input sequences).
                    Overflowing tokens only contains overflow from the first sequence.
                - 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove.
                - 'only_second': Only truncate the second sequence
                - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
            stride (:obj:`int`, `optional`, defaults to ``0``):
                If set to a number along with max_length, the overflowing tokens returned will contain some tokens
                from the main sequence returned. The value of this argument defines the number of additional tokens.
        """
        if num_tokens_to_remove <= 0:
            return ids, pair_ids, []

        if truncation_strategy == "longest_first":
            overflowing_tokens = []
            for _ in range(num_tokens_to_remove):
                if pair_ids is None or len(ids) > len(pair_ids):
                    overflowing_tokens = [ids[-1]] + overflowing_tokens
                    ids = ids[:-1]
                else:
                    pair_ids = pair_ids[:-1]
            window_len = min(len(ids), stride)
            if window_len > 0:
                overflowing_tokens = ids[-window_len:] + overflowing_tokens
        elif truncation_strategy == "only_first":
            assert len(ids) > num_tokens_to_remove
            window_len = min(len(ids), stride + num_tokens_to_remove)
            overflowing_tokens = ids[-window_len:]
            ids = ids[:-num_tokens_to_remove]
        elif truncation_strategy == "only_second":
            assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove
            window_len = min(len(pair_ids), stride + num_tokens_to_remove)
            overflowing_tokens = pair_ids[-window_len:]
            pair_ids = pair_ids[:-num_tokens_to_remove]
        elif truncation_strategy == "do_not_truncate":
            raise ValueError("Input sequence are too long for max_length. Please select a truncation strategy.")
        else:
            raise ValueError(
                "Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']"
            )
        return (ids, pair_ids, overflowing_tokens)

    def create_token_type_ids_from_sequences(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
        RoBERTa does not make use of token type ids, therefore a list of zeros is returned.

        Args:
            token_ids_0 (:obj:`List[int]`):
                List of ids.
            token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
                Optional second list of IDs for sequence pairs.

        Returns:
            :obj:`List[int]`: List of zeros.

        """
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]

        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]

    def get_special_tokens_mask(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
    ) -> List[int]:
        """
        Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.

        Args:
            token_ids_0 (:obj:`List[int]`):
                List of ids.
            token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
                Optional second list of IDs for sequence pairs.
            already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Set to True if the token list is already formatted with special tokens for the model

        Returns:
            :obj:`List[int]`: A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token.
        """
        if already_has_special_tokens:
            if token_ids_1 is not None:
                raise ValueError(
                    "You should not supply a second sequence if the provided sequence of "
                    "ids is already formated with special tokens for the model."
                )
            return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))

        if token_ids_1 is None:
            return [1] + ([0] * len(token_ids_0)) + [1]
        return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]

    def batch_encode_plus(
        self,
        batch_text_or_text_pairs: Union[
            List[TextInput],
            List[TextInputPair],
            List[PreTokenizedInput],
            List[PreTokenizedInputPair],
            List[EncodedInput],
            List[EncodedInputPair],
        ],
        add_special_tokens: bool = True,
        max_length: Optional[int] = None,
        stride: int = 0,
        truncation_strategy: str = "longest_first",
        pad_to_max_length: bool = False,
        is_pretokenized: bool = False,
        return_tensors: Optional[str] = None,
        return_token_type_ids: Optional[bool] = None,
        return_attention_masks: Optional[bool] = None,
        return_overflowing_tokens: bool = False,
        return_special_tokens_masks: bool = False,
        return_offsets_mapping: bool = False,
        return_lengths: bool = False,
        **kwargs
    ) -> BatchEncoding:
        """
        Returns a dictionary containing the encoded sequence or sequence pair and additional information:
        the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.

        Args:
            batch_text_or_text_pairs (:obj:`List[str]`,  :obj:`List[Tuple[str, str]]`,
                                      :obj:`List[List[str]]`,  :obj:`List[Tuple[List[str], List[str]]]`,
                                      and for not-fast tokenizers, also:
                                      :obj:`List[List[int]]`,  :obj:`List[Tuple[List[int], List[int]]]`):
                Batch of sequences or pair of sequences to be encoded.
                This can be a list of string/string-sequences/int-sequences or a list of pair of
                string/string-sequences/int-sequence (see details in encode_plus)
            add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
                If set to ``True``, the sequences will be encoded with the special tokens relative
                to their model.
            max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
                If set to a number, will limit the total sequence returned so that it has a maximum length.
                If there are overflowing tokens, those will be added to the returned dictionary
            stride (:obj:`int`, `optional`, defaults to ``0``):
                If set to a number along with max_length, the overflowing tokens returned will contain some tokens
                from the main sequence returned. The value of this argument defines the number of additional tokens.
            truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
                String selected in the following options:

                - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
                  starting from the longest one at each token (when there is a pair of input sequences)
                - 'only_first': Only truncate the first sequence
                - 'only_second': Only truncate the second sequence
                - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
            pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
                If set to True, the returned sequences will be padded according to the model's padding side and
                padding index, up to their max length. If no max length is specified, the padding is done up to the
                model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
                which can be set to the following strings:

                - 'left': pads on the left of the sequences
                - 'right': pads on the right of the sequences
                Defaults to False: no padding.
            is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
                Set to True to indicate the input is already tokenized
            return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
                Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
                or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
            return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`):
                Whether to return token type IDs. If left to the default, will return the token type IDs according
                to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.

                `What are token type IDs? <../glossary.html#token-type-ids>`_
            return_attention_masks (:obj:`bool`, `optional`, defaults to :obj:`none`):
                Whether to return the attention mask. If left to the default, will return the attention mask according
                to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.

                `What are attention masks? <../glossary.html#attention-mask>`__
            return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Set to True to return overflowing token information (default False).
            return_special_tokens_masks (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Set to True to return special tokens mask information (default False).
            return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Set to True to return (char_start, char_end) for each token (default False).
                If using Python's tokenizer, this method will raise NotImplementedError. This one is only available on
                Rust-based tokenizers inheriting from PreTrainedTokenizerFast.
            return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`):
                If set the resulting dictionary will include the length of each encoded inputs
            **kwargs: passed to the `self.tokenize()` method

        Return:
            A Dictionary of shape::

                {
                    input_ids: list[List[int]],
                    token_type_ids: list[List[int]] if return_token_type_ids is True (default)
                    attention_mask: list[List[int]] if return_attention_mask is True (default)
                    overflowing_tokens: list[List[int]] if a ``max_length`` is specified and return_overflowing_tokens is True
                    num_truncated_tokens: List[int] if a ``max_length`` is specified and return_overflowing_tokens is True
                    special_tokens_mask: list[List[int]] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
                }

            With the fields:

            - ``input_ids``: list of token ids to be fed to a model
            - ``token_type_ids``: list of token type ids to be fed to a model
            - ``attention_mask``: list of indices specifying which tokens should be attended to by the model
            - ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
            - ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
            - ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
              tokens and 1 specifying sequence tokens.
        """

        def get_input_ids(text):
            if isinstance(text, str):
                tokens = self.tokenize(text)
                return self.convert_tokens_to_ids(tokens)
            elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
                return self.convert_tokens_to_ids(text)
            elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
                return text
            else:
                raise ValueError(
                    "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
                )

        # Throw an error if we can pad because there is no padding token
        if pad_to_max_length and self.pad_token_id is None:
            raise ValueError(
                "Unable to set proper padding strategy as the tokenizer does not have a padding token. In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via the function add_special_tokens if you want to use a padding strategy"
            )

        if return_offsets_mapping:
            raise NotImplementedError(
                "return_offset_mapping is not available when using Python tokenizers."
                "To use this feature, change your tokenizer to one deriving from "
                "transformers.PreTrainedTokenizerFast."
                "More information on available tokenizers at "
                "https://github.com/huggingface/transformers/pull/2674"
            )

        input_ids = []
        for ids_or_pair_ids in batch_text_or_text_pairs:
            if isinstance(ids_or_pair_ids, (list, tuple)) and len(ids_or_pair_ids) == 2 and not is_pretokenized:
                ids, pair_ids = ids_or_pair_ids
            else:
                ids, pair_ids = ids_or_pair_ids, None

            first_ids = get_input_ids(ids)
            second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
            input_ids.append((first_ids, second_ids))

        if max_length is None and pad_to_max_length:

            def total_sequence_length(input_pairs):
                first_ids, second_ids = input_pairs
                return len(first_ids) + (
                    self.num_special_tokens_to_add()
                    if second_ids is None
                    else (len(second_ids) + self.num_special_tokens_to_add(pair=True))
                )

            max_length = max([total_sequence_length(ids) for ids in input_ids])

        batch_outputs = {}
        for first_ids, second_ids in input_ids:
            # Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by
            # the model. It adds special tokens, truncates sequences if overflowing while taking into account
            # the special tokens and manages a window stride for overflowing tokens
            outputs = self.prepare_for_model(
                first_ids,
                pair_ids=second_ids,
                max_length=max_length,
                pad_to_max_length=pad_to_max_length,
                add_special_tokens=add_special_tokens,
                stride=stride,
                truncation_strategy=truncation_strategy,
                return_attention_mask=return_attention_masks,
                return_token_type_ids=return_token_type_ids,
                return_overflowing_tokens=return_overflowing_tokens,
                return_special_tokens_mask=return_special_tokens_masks,
                return_lengths=return_lengths,
                return_tensors=None,  # We will convert the whole batch to tensors at the end
            )

            for key, value in outputs.items():
                if key not in batch_outputs:
                    batch_outputs[key] = []
                batch_outputs[key].append(value)

        if return_tensors is not None:

            self.convert_to_tensors_(batch_outputs, return_tensors)
        return BatchEncoding(batch_outputs)

    def convert_ids_to_tokens(
        self, ids: Union[int, List[int]]
    ) -> Union[int, List[int]]:
        """ Converts a single index or a sequence of indices (integers) in a token "
            (resp.) a sequence of tokens (str), using the vocabulary and added tokens.
            Args:
                skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
        """
        if isinstance(ids, int):
            return self.vocab[ids]

        tokens = []
        for index in ids:
            index = int(index)
            tokens.append(self.vocab[index])
        return tokens

    def save_pretrained(self, *args, **kwargs):
        return ('', '', '', '')
예제 #26
0
# object tokenizer
vn_tokenizer = VnCoreNLP(paths.vncore_jar_path,
                         annotators="wseg",
                         max_heap_size='-Xmx500m')

print("==================== Updating Train Dataset ====================")
train_df = pd.read_excel(paths.train_path)

# label encoder
y_train = train_df["label"].values
y_train = le.transform(y_train)

# tach tu cho du lieu
train_df["cau_hoi"] = train_df["cau_hoi"].progress_apply(
    lambda x: ' '.join([' '.join(sent) for sent in vn_tokenizer.tokenize(x)]))

X_train = train_df["cau_hoi"].values

# tien xu ly , agumentation du lieu
X_pos = X_train[y_train == 0]
X_neg = X_train[y_train == 1]
X_neu = X_train[y_train == 2]

pos_list = [ViTokenizer.tokenize(w) for w in constant.tu_dien_tich_cuc]
neg_list = [ViTokenizer.tokenize(w) for w in constant.tu_dien_tieu_cuc]

print(X_pos)
print(pos_list)

X_positive = np.concatenate((X_pos, np.asarray(pos_list)), axis=0)
예제 #27
0
from vncorenlp import VnCoreNLP

txt = 'học sinh học sinh học'

# Init & load model
vncore_nlp = VnCoreNLP(annotators="wseg pos ner parse")

# Use tokenize only
print(vncore_nlp.tokenize(txt, str=True))
print()
print(vncore_nlp.tokenize(txt, str=False))
print()
print(vncore_nlp.extract(txt))
예제 #28
0
parser.add_argument('--bpe-codes', default="./phobert/bpe.codes",type=str, help='path to fastBPE BPE')

args = parser.parse_args()
bpe = AutoTokenizer.from_pretrained("vinai/phobert-base", use_fast=False)
rdrsegmenter = VnCoreNLP(args.rdrsegmenter_path, annotators="wseg", max_heap_size='-Xmx500m') 

seed_everything(69)

# Load training data
print('load training data')
train_df = pd.read_csv(args.train_path)

if os.path.isfile('./dataset/X_train.npy') :
    X_train = np.load('./dataset/X_train.npy')
else:
    train_df.text = train_df['text'].progress_apply(lambda x: ' '.join([' '.join(sent) for sent in rdrsegmenter.tokenize(x)]))
    X_train = convert_text_to_input_ids(train_df, bpe, args.max_sequence_length)
    np.save('./dataset/X_train.npy', X_train)

if os.path.isfile('./dataset/y.npy') :
    y = np.load('./dataset/y.npy')
else:
    y = convert_labels(train_df)
    np.save('./dataset/y.npy', y)

# Load model
config = RobertaConfig.from_pretrained(
    args.config_path,
    output_hidden_states=True,
    num_labels=5,
)
예제 #29
0
if torch.cuda.device_count():
    print(f"Training using {torch.cuda.device_count()} gpus")
    model_bert = nn.DataParallel(model_bert)
    tsfm = model_bert.module.roberta
else:
    tsfm = model_bert.roberta

# Load the dictionary
vocab = Dictionary()
vocab.add_from_file(args.dict_path)

# Load training data
train_df = pd.read_csv(args.train_path, sep='\t').fillna("###")
train_df.text = train_df.text.progress_apply(
    lambda x: ' '.join([' '.join(sent) for sent in rdrsegmenter.tokenize(x)]))
y = train_df.label.values
X_train = convert_lines(train_df, vocab, bpe, args.max_sequence_length)
criterion = nn.CrossEntropyLoss()
# Creating optimizer and lr schedulers
param_optimizer = list(model_bert.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{
    'params':
    [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
    'weight_decay':
    0.01
}, {
    'params':
    [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
    'weight_decay':
# xoa cac ky tu dac biet
data_post['text'] = data_post['text'].str.replace(
    r"[\^\^\[\]&*:;,?\\~\"\'\"{}\(\)\-$@#]+", "")
data_post['text'] = data_post['text'].str.replace(
    r"[(\U0001F600-\U0001F92F|\U0001F300-\U0001F5FF|\U0001F680-\U0001F6FF|\U0001F190-\U0001F1FF|\U00002702-\U000027B0|\U0001F926-\U0001FA9F|\u200d|\u2640-\u2642|\u2600-\u2B55|\u23cf|\u23e9|\u231a|\ufe0f)]+",
    "")
#%% search dataframe
data_search = pd.read_csv(
    r'D:\Hahalolo\projects\sd_train\training\ai.hungvy\DataR8.csv')
data_search.drop(['Unnamed: 0'], axis=1, inplace=True)
data_search['keys'] = data_search['keys'].str.replace(r'+', ' ')
#%% use vietnamese NLP library
vncorenlp_file = r'./VnCoreNLP-1.1.1.jar'
vncorenlp = VnCoreNLP(vncorenlp_file)
# tokenize
data_post['nlp'] = data_post['text'].apply(lambda i: vncorenlp.tokenize(i))
data_search['vnnlp'] = data_search['keys'].apply(
    lambda x: vncorenlp.tokenize(x))
#%% convert to list of words
from nltk import flatten
data_search['vnnlp'] = data_search['vnnlp'].apply(lambda x: flatten(x))
data_post['nlp'] = data_post['nlp'].apply(lambda i: flatten(i))
#%% frequency
list_count = []
for search in data_search['vnnlp']:
    count = 0
    for single in search:
        for lis in data_post['nlp']:
            for ele in lis:
                if single == ele:
                    count += 1