Exemplo n.º 1
0
 def test_sent_tokenization(self):
     assert len(
         tokenize_sents(
             """Результати цих досліджень опубліковано в таких колективних працях, як «Статистичні параметри 
     стилів», «Морфемна структура слова», «Структурна граматика української мови Проспект», «Частотний словник сучасної української художньої прози», «Закономірності структурної організації науково-реферативного тексту», «Морфологічний аналіз наукового тексту на ЕОМ», «Синтаксичний аналіз наукового тексту на ЕОМ», «Використання ЕОМ у лінгвістичних дослідженнях» та ін. за участю В.І.Перебийніс, 
     М.М.Пещак, М.П.Муравицької, Т.О.Грязнухіної, Н.П.Дарчук, Н.Ф.Клименко, Л.І.Комарової, В.І.Критської, 
     Т.К.Пуздирєвої, Л.В.Орлової, Л.А.Алексієнко, Т.І.Недозим.""")) == 1
Exemplo n.º 2
0
def tokenize_texts_to_sentences(texts):
    text_sentences = []
    for text in texts:
        text_to_add = []
        text_to_add += tokenize_uk.tokenize_sents(text)
        text_sentences.append(text_to_add)
    return text_sentences
def reformat_split_sents():
    with open('../data/scraped.txt', 'r') as inf, \
            open('../data/new_scraped.txt', 'w') as outf:
        new_sents = [
            s for snip in inf.readlines()
            for s in tokenize_uk.tokenize_sents(snip)
        ]
        for sent in new_sents:
            outf.write(sent + "\n")
Exemplo n.º 4
0
 def tokenize_sentences(raw, is_tokenize_uk=False):
     """
     uses nltk by default
     if 'is_tokenize_uk' is True, then uses tokenize_uk
     """
     if is_tokenize_uk:
         return tokenize_uk.tokenize_sents(raw)
     else:
         return nltk.sent_tokenize(raw)
Exemplo n.º 5
0
def transform_lines_into_rythm(lines, accent_vocab):
    rythm = ''
    for line in lines.split('\n'):
        sent_tokens = tokenize_sents(line)
        for sent in sent_tokens:
            for word in filter(lambda w: w in accent_vocab,
                               tokenize_words(sent)):
                accent_options = accent_vocab[word]

                # use only first one, f**k other options
                word, index = accent_options[0]
                rythm_map = transform_into_rythm_map(word, index)
                rythm += rythm_map
        rythm += '\n'
    return rythm
def main():
    with open('scopus-3.csv',
              'rb') as csvfile:  #open the citation and abstract data file
        citation_set = {}
        data_file = csv.reader(csvfile)  #create a csv file object
        for data in data_file:
            if data[1] != "Cited by" and data[
                    3] != "[No abstract available]":  #checking to make sure it isn't the header row
                #or a useless row
                title = data[0]
                if data[1] == "":  #dealing with empty citation data
                    citations = 0
                else:
                    citations = eval(
                        data[1])  #converting the citations to numbers
                abstract = data[3]
                citation_set[title] = (
                    citations, abstract
                )  #creating a dictionary of titles, citations, and abstracts
    data_set = {}
    for key in citation_set:
        (citation, abstract) = citation_set[key]
        try:
            text = tokenize_uk.tokenize_sents(
                abstract)  #prepping the abstract for analysis
            read_scores = readability.getmeasures(
                text)  #analyzing the abstract
            data_dict = read_scores[u'readability grades']
            data_set[key] = (citation, data_dict[u'FleschReadingEase']
                             )  #getting a chosen metric
        except UnicodeDecodeError:  #dealing with errors raised by the tokenizer and readability tool
            pass
    citation_scores = []
    readability_scores = []
    for key in data_set:
        (cit, read) = data_set[key]
        citation_scores.append(cit)  #creating a list of citation scores
        readability_scores.append(read)  #creating a list of readability scores
    x = numpy.array(readability_scores)  #turning the lists into arrays
    y = numpy.array(citation_scores)
    stat = stats.linregress(x, y)  #basic statistical analysis
    print stat
Exemplo n.º 7
0
def filter_words_with_pos(tokens, useful_tags):
    filtered_words = []
    for t in tokens:
        tag = morph.parse(t)[0].tag.POS
        if tag_mapping.get(tag, str(tag)) in useful_tags:
            filtered_words.append(t)

    return filtered_words


### Preprocessing

tokenized_data = []
for item in dataset:
    sents_list = []
    sents = tokenize_uk.tokenize_sents(item[1])
    for s in sents:
        sents_list.append(tokenize_uk.tokenize_words(s))
    tokenized_data.append(sents_list)

tokenized_lengths = [len(t) for t in tokenized_data]
print("tokenized")
lemmatized_data = [[lemmatize_tokens(i) for i in t] for t in tokenized_data]
print("lemmatized")
digits_cleared_data = [[[item for item in i if not item.isdigit()] for i in l]
                       for l in lemmatized_data]
print("digits")
punct_cleared_data = [[[
    item for item in i if item not in f'{string.punctuation}”№«»'
] for i in d] for d in digits_cleared_data]
print("punct")
Exemplo n.º 8
0
 def test_sent_tokenization(self):
     assert len(tokenize_sents("""Результати цих досліджень опубліковано в таких колективних працях, як «Статистичні параметри 
     стилів», «Морфемна структура слова», «Структурна граматика української мови Проспект», «Частотний словник сучасної української художньої прози», «Закономірності структурної організації науково-реферативного тексту», «Морфологічний аналіз наукового тексту на ЕОМ», «Синтаксичний аналіз наукового тексту на ЕОМ», «Використання ЕОМ у лінгвістичних дослідженнях» та ін. за участю В.І.Перебийніс, 
     М.М.Пещак, М.П.Муравицької, Т.О.Грязнухіної, Н.П.Дарчук, Н.Ф.Клименко, Л.І.Комарової, В.І.Критської, 
     Т.К.Пуздирєвої, Л.В.Орлової, Л.А.Алексієнко, Т.І.Недозим.""")) == 1
Exemplo n.º 9
0
sents_folder = data_folder + 'sents/'
sents_per_chunk = int(7e5)  # 700 000
log_interval = 10000

files = [raw_data_folder + 'ukr_lit.txt', raw_data_folder + 'td.txt']

log('starting')

for src_file in files:
    with open(src_file, 'rb') as f:
        data = f.read()

    log('processing file ' + src_file)

    text = data.decode('utf-8')
    tokens_text = tokenize_uk.tokenize_sents(text)

    log('tokenization finished')

    sents_number = int(math.ceil(len(tokens_text) / float(sents_per_chunk)))

    for i in range(0, sents_number):
        sentences = []
        chunk = tokens_text[i * sents_per_chunk:(i + 1) * sents_per_chunk]

        for sentence in chunk:
            sentences.append(tokenize_uk.tokenize_words(sentence))

            if items_processed % log_interval == 0:
                log('items processed {}'.format(items_processed))
Exemplo n.º 10
0
sents_folder = data_folder + 'sents/'
log_interval = 10000

log('starting')

start_time = time.time()

for chunk_file in os.listdir(chunks_folder):
    data = pd.read_csv(chunks_folder + chunk_file)
    sentences = []

    for row in data.itertuples():
        title = row[3].decode('utf-8') if isinstance(row[3], basestring) else ''
        text = row[2].decode('utf-8') if isinstance(row[2], basestring) else ''

        tokens_title = tokenize_uk.tokenize_sents(title)
        tokens_text = tokenize_uk.tokenize_sents(text)

        for sentence in tokens_title + tokens_text:
            sentences.append(tokenize_uk.tokenize_words(sentence))

        total_time = time.time() - start_time

        if items_processed % log_interval == 0:
            log('items processed {}, running time {}'.format(items_processed, total_time))

        items_processed += 1

    log('saving chunk ')

    result_file = os.path.splitext(chunk_file)[0] + '.msg'
Exemplo n.º 11
0
def ner_nlp_extracting(text, model, vesum, word2indx, tag2indx, sess, graph):

    X = list(
        map(
            lambda sentence: tokenize_uk.tokenize_words(sentence),
            tokenize_uk.tokenize_sents(' '.join(
                tokenize_uk.tokenize_words(text)))))

    X_tokenized = np.array([[word for word in sentence] for sentence in X])
    X = [[
        word2indx.get(vesum.get_main_form_from_vesum(word),
                      word2indx['UNKNOWN']) for word in sentence
    ] for sentence in X]
    X = pad_sequences(X,
                      maxlen=70,
                      padding='post',
                      truncating='post',
                      value=word2indx['ENDPAD'])

    with graph.as_default():
        set_session(sess)
        pred = np.argmax(model.predict(X), axis=-1)

    res = [(sent,
            list(
                map(
                    lambda tag: list(
                        filter(lambda key: tag2indx[key] == tag, tag2indx))[0],
                    tags[:len(sent)])))
           for sent, tags in zip(X_tokenized, pred)]

    tokens = list()
    tags = list()

    for tokens_tmp, tags_tmp in res:
        tokens.extend(tokens_tmp)
        tags.extend(tags_tmp)

    find_tags = list()

    start_index = 0
    finish_index = 0

    for ind, tag in enumerate(tags):
        if (ind == 0 or ((ind > 0) and tags[ind - 1] == 'O')) and tag != 'O':
            token = tokens[ind]
            start_index = text.index(token, finish_index)
            finish_index = text.index(token, finish_index) + len(token)
        elif tag != 'O':
            token = tokens[ind]
            finish_index = text.index(token, finish_index) + len(token)
        elif ind > 0 and (tags[ind - 1][0] == 'B'
                          or tags[ind - 1][0] == 'I') and tag == 'O':
            ner = tags[ind - 1][2:]
            ner_dict = dict()
            ner_dict['entity_type'] = ner
            ner_dict['start_index'] = start_index
            ner_dict['finish_index'] = finish_index
            ner_dict['text_entity'] = text[start_index:finish_index]
            find_tags.append(ner_dict)

    return find_tags
Exemplo n.º 12
0
import pymorphy2
import os
import docx2txt
import sqlite3
import tokenize_uk
from string import Template
import time

start = time.time()
texts = []
with os.scandir('C:/Users/user/Desktop/Навчання/Тексти/') as entries:
    for entry in entries:
        text = docx2txt.process("C:/Users/user/Desktop/Навчання/Тексти/" +
                                entry.name)
        sentences = tokenize_uk.tokenize_sents(text)
        texts.append(sentences)

db = sqlite3.connect('dictionary.db')
cursor = db.cursor()

word_forms = {}
count = 1
index = 1
for sents in texts:
    for sentence in sents:
        sentence = sentence.replace("\n", " ").replace("\"", "")
        t = Template(
            'INSERT INTO sentences VALUES ($index_of_sent , "$sentence");')
        command = t.substitute(index_of_sent=index, sentence=sentence)
        cursor.execute(command)