Example #1
0
def nltk_tokenize_words(string, attached_period=False, language=None):
    """Wrap NLTK's tokenizer PunktLanguageVars(), but make final period
    its own token.
    >>> nltk_punkt("Sentence 1. Sentence 2.")
    >>> ['Sentence', 'one', '.', 'Sentence', 'two', '.']
    Optionally keep the NLTK's output:
    >>> nltk_punkt("Sentence 1. Sentence 2.", attached_period=True)
    >>> ['Sentence', 'one.', 'Sentence', 'two.']
    TODO: Run some tests to determine whether there is a large penalty for
    re-calling PunktLanguageVars() for each use of this function. If so, this
    will need to become a class, perhaps inheriting from the PunktLanguageVars
    object. Maybe integrate with WordTokenizer.
    """
    assert isinstance(string, str), "Incoming string must be type str."
    if language=='sanskrit':
        periods = ['.', 'ред','рее']
    else:
        periods = ['.']
    punkt = PunktLanguageVars()
    tokens = punkt.word_tokenize(string)
    if attached_period:
        return tokens
    new_tokens = []
    for word in tokens:
        for char in periods:
            if word.endswith(char):
                new_tokens.append(word[:-1])
                new_tokens.append(char)
                break
        else:
            new_tokens.append(word)
    return new_tokens
Example #2
0
def tag_ner(lang, input_text, output_type=list):
    """Run NER for chosen language.
    """

    _check_latest_data(lang)

    assert lang in NER_DICT.keys(), "Invalid language. Choose from: {}".format(
        ", ".join(NER_DICT.keys()))
    types = [str, list]
    assert type(input_text) in types, "Input must be: {}.".format(
        ", ".join(types))
    assert output_type in types, "Output must be a {}.".format(
        ", ".join(types))

    if type(input_text) == str:
        punkt = PunktLanguageVars()
        tokens = punkt.word_tokenize(input_text)
        new_tokens = []
        for word in tokens:
            if word.endswith("."):
                new_tokens.append(word[:-1])
                new_tokens.append(".")
            else:
                new_tokens.append(word)
        input_text = new_tokens

    ner_file_path = os.path.expanduser(NER_DICT[lang])
    with open(ner_file_path) as file_open:
        ner_str = file_open.read()
    ner_list = ner_str.split("\n")

    ner_tuple_list = []
    for count, word_token in enumerate(input_text):
        match = False
        for ner_word in ner_list:
            # the replacer slows things down, but is necessary
            if word_token == ner_word:
                ner_tuple = (word_token, "Entity")
                ner_tuple_list.append(ner_tuple)
                match = True
                break
        if not match:
            ner_tuple_list.append((word_token, ))

    if output_type is str:
        string = ""
        for tup in ner_tuple_list:
            start_space = " "
            final_space = ""
            # this is some mediocre string reconstitution
            # maybe not worth the effort
            if tup[0] in [",", ".", ";", ":", "?", "!"]:
                start_space = ""
            if len(tup) == 2:
                string += start_space + tup[0] + "/" + tup[1] + final_space
            else:
                string += start_space + tup[0] + final_space
        return string

    return ner_tuple_list
Example #3
0
    def rm_stopwords(self, stoplist=[]):
        """Removes words or phrases from the text.

        Given a list of words or phrases, gives new text with those phrases
        removed.

        Args:
            stoplist (:obj:`list`) List of words or phrases to filter from text

        Returns:
            :obj:`self.__class__` New version of text, with stop words/phrases removed

        Example:
            >>> stopwords = ['ipsum', 'sit']
            >>> text = EnglishText('Lorem ipsum dolor sit amet...')
            >>> text.rm_stopwords(stoplist=stopwords)
            >>> print(modified_text)
            'Lorem dolor amet...'
        """ # noqa
        filtered_words = []
        # converts text to list of words with NLTK tokenizer
        tokenizer = PunktLanguageVars()
        tokens = tokenizer.word_tokenize(str(self.data))
        # loop through each word, if not in stoplist, append
        for word in tokens:
            not_found = True
            for stopword in stoplist:
                if str(word).strip().lower() == str(stopword).strip().lower():
                    not_found = False
            if not_found:
                filtered_words.append(word)
        # return rejoined word
        return self.__class__(" ".join(filtered_words), self.options)
Example #4
0
    def _tokenize(self, text):
        """
        Use NLTK's standard tokenizer, rm punctuation.
        :param text: pre-processed text
        :return: tokenized text
        :rtype : list
        """
        sentence_tokenizer = TokenizeSentence('latin')
        sentences = sentence_tokenizer.tokenize_sentences(text.lower())

        sent_words = []
        punkt = PunktLanguageVars()
        for sentence in sentences:
            words = punkt.word_tokenize(sentence)

            assert isinstance(words, list)
            words_new = []
            for word in words:
                if word not in self.punctuation or self.abbreviations or self.numbers or self.abbreviations:  # pylint: disable=line-too-long
                    words_new.append(word)

            # rm all numbers here with: re.compose(r'[09]')
            sent_words.append(words_new)

        return sent_words
Example #5
0
 def __init__(self):
     """Language taken as argument, necessary used when saving word frequencies to
     ``cltk_data/user_data``."""
     self.punkt = PunktLanguageVars()
     self.punctuation = [
         ',', '.', ';', ':', '"', "'", '?', '-', '!', '*', '[', ']', '{',
         '}'
     ]
Example #6
0
def tokenize(desc):
    '''
	INPUT: List of cleaned descriptions
	OUTPUT: Tokenized and stemmed list of words from the descriptions 
	'''
    plv = PunktLanguageVars()
    snowball = SnowballStemmer('english')
    return [snowball.stem(word) for word in plv.word_tokenize(desc.lower())]
Example #7
0
 def test_latin_stopwords(self):
     """Test filtering Latin stopwords."""
     sentence = 'Quo usque tandem abutere, Catilina, patientia nostra?'
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in LATIN_STOPS]
     target_list = ['usque', 'tandem', 'abutere', ',', 'catilina', ',',
                    'patientia', 'nostra', '?']
     self.assertEqual(no_stops, target_list)
Example #8
0
 def test_french_stopwords(self):
     ##test filtering French stopwords
     sentence = "En pensé ai e en talant que d ’ Yonec vus die avant dunt il fu nez, e de sun pere cum il vint primes a sa mere ."
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in FRENCH_STOPS]
     target_list = ['pensé', 'talant', 'd', '’', 'yonec', 'die', 'avant', 'dunt', 'nez', ',', 'pere', 'cum', 'primes',
                    'mere','.']
     self.assertEqual(no_stops, target_list)
Example #9
0
    def preprocess(self, text):
        if self.token:
            lemma = self.lemmatizer.lemmatize(text)
        else:
            plv = PunktLanguageVars()
            unigrams = plv.word_tokenize(text)
            lemma = self.lemmatizer.lemmatize(unigrams)

        lemma = [t[0] if t[1] == "punc" else t[1] for t in lemma]

        return " ".join(lemma)
Example #10
0
 def test_old_norse_stopwords(self):
     """
     Test filtering Old Norse stopwords
     Sentence extracted from Eiríks saga rauða (http://www.heimskringla.no/wiki/Eir%C3%ADks_saga_rau%C3%B0a)
     """
     sentence = 'Þat var einn morgin, er þeir Karlsefni sá fyrir ofan rjóðrit flekk nökkurn, sem glitraði við þeim'
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in OLD_NORSE_STOPS]
     target_list = ['var', 'einn', 'morgin', ',', 'karlsefni', 'rjóðrit', 'flekk', 'nökkurn', ',', 'glitraði']
     self.assertEqual(no_stops, target_list)
Example #11
0
 def test_greek_stopwords(self):
     """Test filtering Greek stopwords."""
     sentence = 'Ἅρπαγος δὲ καταστρεψάμενος Ἰωνίην ἐποιέετο στρατηίην \
     ἐπὶ Κᾶρας καὶ Καυνίους καὶ Λυκίους, ἅμα ἀγόμενος καὶ Ἴωνας καὶ \
     Αἰολέας.'
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in GREEK_STOPS]
     target_list = ['ἅρπαγος', 'καταστρεψάμενος', 'ἰωνίην', 'ἐποιέετο',
                    'στρατηίην', 'κᾶρας', 'καυνίους', 'λυκίους', ',',
                    'ἅμα', 'ἀγόμενος', 'ἴωνας', 'αἰολέας.']
     self.assertEqual(no_stops, target_list)
Example #12
0
def setup_tokenizers(terminal_punctuation):
	PunktLanguageVars.sent_end_chars = terminal_punctuation
	PunktLanguageVars.re_boundary_realignment = re.compile(r'[›»》’”\'\")\)\]\}\>]+?(?:\s+|(?=--)|$)', re.MULTILINE)
	global word_tokenizer
	global sentence_tokenizers

	#Accessing private variables of PunktLanguageVars because nltk has a faulty design pattern that necessitates it.
	#Issue reported here: https://github.com/nltk/nltk/issues/2068
	word_tokenizer = PunktLanguageVars()
	word_tokenizer._re_word_tokenizer = re.compile(PunktLanguageVars._word_tokenize_fmt % {
	    'NonWord': r"(?:[\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
	    'MultiChar': PunktLanguageVars._re_multi_char_punct,
	    'WordStart': r"[^\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡]",
	}, re.UNICODE | re.VERBOSE)
	word_tokenizer._re_period_context = re.compile(PunktLanguageVars._period_context_fmt % {
		'NonWord': r"(?:[\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
		'SentEndChars': word_tokenizer._re_sent_end_chars, 
	}, re.UNICODE | re.VERBOSE)

	x = PunktLanguageVars()
	x._re_word_tokenizer = re.compile(PunktLanguageVars._word_tokenize_fmt % {
	    'NonWord': r"(?:[\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
	    'MultiChar': PunktLanguageVars._re_multi_char_punct,
	    'WordStart': r"[^\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡]",
	}, re.UNICODE | re.VERBOSE)
	x._re_period_context = re.compile(PunktLanguageVars._period_context_fmt % {
		'NonWord': r"(?:[\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
		'SentEndChars': x._re_sent_end_chars, 
	}, re.UNICODE | re.VERBOSE)

	#Read tokenizers from pickle files (also include an untrained tokenizer). Mapping from language name to tokenizer
	sentence_tokenizers = dict({None: PunktSentenceTokenizer(lang_vars=PunktLanguageVars())}, **{
		current_file_name[:current_file_name.index('.')]: pickle.load(open(join(current_path, current_file_name), mode='rb'))
		for current_path, current_dir_names, current_file_names in os.walk(sentence_tokenizer_dir) 
		for current_file_name in current_file_names if current_file_name.endswith('.pickle')
	})
	for s in sentence_tokenizers.values():
		s._lang_vars._re_period_context = x._re_period_context
		s._lang_vars._re_word_tokenizer = x._re_word_tokenizer
Example #13
0
    def lemmatize(self, input_text, return_raw=False, return_string=False):
        """Take incoming string or list of tokens. Lookup done against a
        key-value list of lemmata-headword. If a string, tokenize with
        ``PunktLanguageVars()``. If a final period appears on a token, remove
        it, then re-add once replacement done.
        TODO: rm check for final period, change PunktLanguageVars()
        """
        assert type(input_text) in [
            list, str
        ], logger.error("Input must be a list or string.")
        if type(input_text) is str:
            punkt = PunktLanguageVars()
            tokens = punkt.word_tokenize(input_text)
        else:
            tokens = input_text

        lemmatized_tokens = []
        for token in tokens:
            # check for final period
            final_period = False
            if token[-1] == ".":
                final_period = True
                token = token[:-1]

            # look for token in lemma dict keys
            if token.lower() in self.lemmata.keys():
                headword = self.lemmata[token.lower()]

                # re-add final period if rm'd
                if final_period:
                    headword += "."

                # append to return list
                if not return_raw:
                    lemmatized_tokens.append(headword)
                else:
                    lemmatized_tokens.append(token + "/" + headword)
            # if token not found in lemma-headword list
            else:
                # re-add final period if rm'd
                if final_period:
                    token += "."

                if not return_raw:
                    lemmatized_tokens.append(token)
                else:
                    lemmatized_tokens.append(token + "/" + token)
        if not return_string:
            return lemmatized_tokens
        elif return_string:
            return " ".join(lemmatized_tokens)
Example #14
0
 def test_akkadian_stopwords(self):
     """
     Test filtering Akkadian stopwrods
     Sentence extracted from the law code of Hammurabi, law 3 (Martha Roth 2nd Edition 1997, Law Collections from
     Mesopotamia and Asia Minor).
     """
     sentence = "šumma awīlum ina dīnim ana šībūt sarrātim ūṣiamma awat iqbû la uktīn šumma dīnum šû dīn napištim awīlum šû iddâk"
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in AKKADIAN_STOPS]
     target_list = ['awīlum', 'dīnim', 'šībūt', 'sarrātim', 'ūṣiamma', 'awat', 'iqbû', 'uktīn', 'dīnum',
                    'dīn', 'napištim', 'awīlum', 'iddâk']
     self.assertEqual(no_stops, target_list)
Example #15
0
    def preprocess(self, text):
        if self.path:
            with open(self.path, "r") as f:
                sw = set(f.read().split("\n")[:-1])
        else:
            sw = set(stopwords.words(self.lang))

        if self.token:
            unigrams = text
        else:
            plv = PunktLanguageVars()
            unigrams = plv.word_tokenize(text.lower())

        return " ".join([unigram for unigram in unigrams if not unigram in sw])
Example #16
0
 def rm_stopwords(self, stoplist=[]):
     filtered_words = []
     # converts text to list of words with NLTK tokenizer
     tokenizer = PunktLanguageVars()
     tokens = tokenizer.word_tokenize(str(self.data))
     # loop through each word, if not in stoplist, append
     for word in tokens:
         not_found = True
         for stopword in stoplist:
             if str(word).strip().lower() == str(stopword).strip().lower():
                 not_found = False
         if not_found:
             filtered_words.append(word)
     # return rejoined word
     return self.__class__(" ".join(filtered_words), self.metadata)
Example #17
0
    def tokenize(self, string):
        """Tokenize incoming string."""
        punkt = PunktLanguageVars()
        generic_tokens = punkt.word_tokenize(string)
        specific_tokens = []
        for generic_token in generic_tokens:
            is_enclitic = False
            if generic_token not in self.exceptions:
                for enclitic in self.enclitics:
                    if generic_token.endswith(enclitic):
                        new_tokens = [generic_token[:-len(enclitic)]
                                      ] + ['-' + enclitic]
                        specific_tokens += new_tokens
                        is_enclitic = True
                        break
            if not is_enclitic:
                specific_tokens.append(generic_token)

        return specific_tokens
Example #18
0
    def _build_concordance(self, text_str):
        """
        Inherit or mimic the logic of ConcordanceIndex() at http://www.nltk.org/_modules/nltk/text.html
        and/or ConcordanceSearchView() & SearchCorpus() at https://github.com/nltk/nltk/blob/develop/nltk/app/concordance_app.py
        :param text_string: Text to be turned into a concordance
        :type text_string: str
        :return: list
        """
        p = PunktLanguageVars()
        orig_tokens = p.word_tokenize(text_str)
        c = ConcordanceIndex(orig_tokens)

        #! rm dupes after index, before loop
        tokens = set(orig_tokens)
        tokens = [
            x for x in tokens
            if x not in [',', '.', ';', ':', '"', "'", '[', ']']
        ]  # this needs to be changed or rm'ed

        return c.return_concordance_all(tokens)
Example #19
0
def build_concordance(text_str: str) -> List[List[str]]:
    """
    Inherit or mimic the logic of ConcordanceIndex() at:
     http://www.nltk.org/_modules/nltk/text.html
    and/or ConcordanceSearchView() & SearchCorpus() at:
     https://github.com/nltk/nltk/blob/develop/nltk/app/concordance_app.py
    :param text_string: Text to be turned into a concordance
    :type text_string: str
    :return: list
    """
    punkt_vars = PunktLanguageVars()  # type: PunktLanguageVars
    orig_tokens = punkt_vars.word_tokenize(text_str)  # type: List[str]
    concordance_index = ConcordanceIndex(orig_tokens)  # type: Any
    #! rm dupes after index, before loop
    tokens_set = set(orig_tokens)  # type: Set[str]
    punct_list = [',', '.', ';', ':', '"', "'", '[', ']']  # type: List[str]
    # this needs to be changed or rm'ed
    tokens = [x for x in tokens_set if x not in punct_list]  # List[str]
    index = concordance_index.return_concordance_all(tokens)  # List[List[str]]
    return index
Example #20
0
 def tokenize(self, string):
     """Tokenize incoming string."""
     punkt = PunktLanguageVars()
     generic_tokens = punkt.word_tokenize(string)
     # Rewrite as an if-else block for exceptions rather than separate list comprehensions
     generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'nec' else ['c', item[:-1]])] # Handle 'nec' as a special case.
     generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'sodes' else [item[0]+'i', 'audes'])] # Handle 'sodes' as a special case.
     generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'sultis' else [item[0]+'i', 'vultis'])] # Handle 'sultis' as a special case.
     generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'satin' else [item[:-1] + 's', 'ne'])] # Handle 'satin' as a special case.
     generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'scin' else [item[:-1] + 's', 'ne'])] # Handle 'scin' as a special case.      
     specific_tokens = []
     for generic_token in generic_tokens:
         is_enclitic = False
         if generic_token not in self.exceptions:
             for enclitic in self.enclitics:
                 if generic_token.endswith(enclitic):
                     if enclitic == 'cum':
                         if generic_token.lower() in self.inclusions:
                             specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
                         else:
                             specific_tokens += [generic_token]
                     elif enclitic == 'n':
                             specific_tokens += [generic_token[:-len(enclitic)]] + ['ne']                                                                                                    
                     elif enclitic == 'st':
                         if generic_token.endswith('ust'):
                             specific_tokens += [generic_token[:-len(enclitic)+1]] + ['est']
                         else:
                             # Does not handle 'similist', 'qualist', etc. correctly
                             specific_tokens += [generic_token[:-len(enclitic)]] + ['est']
                     else:
                         specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
                     is_enclitic = True
                     break
         if not is_enclitic:
             specific_tokens.append(generic_token)
     return specific_tokens
Example #21
0
#!/usr/bin/python3

import concurrent.futures
from PIL import Image
import os
import re
import pickle
import docx
import logging
from pathlib import Path
import pytesseract
from nltk.tokenize.punkt import PunktLanguageVars
from string import punctuation
from multiprocessing import Lock

lemmatizer = PunktLanguageVars()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
format_log = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
log_path = Path(Path.cwd() / 'ocr.log')
fileHandler = logging.FileHandler(log_path)
fileHandler.setFormatter(format_log)
logger.addHandler(fileHandler)
stream = logging.StreamHandler()
stream.setFormatter(format_log)
logger.addHandler(stream)

lock = Lock()
new_paragraphs = []

paragraphs = []
Example #22
0
class LatinLanguageVars(PunktLanguageVars):
    _re_non_word_chars = PunktLanguageVars()._re_non_word_chars.replace(
        "'", "")
Example #23
0
# ( pip install  beautifulsoup4 )
#
# ----------------------------------------------------------------------------

# Convert beta to utf8
try:
    from cltk.corpus.greek.beta_to_unicode import Replacer
    from lxml import etree
    r = Replacer()
    # and 20150804:
    from cltk.tokenize.sentence import TokenizeSentence
    from cltk.stop.greek.stops import STOPS_LIST
    tokenizer = TokenizeSentence('greek')
    #
    from nltk.tokenize.punkt import PunktLanguageVars
    plv = PunktLanguageVars() #not useful
except:
    print(" No CLTK/NTLK toolkits found." )
    r = None

#tokenizer.tokenize_sentences(sentence)
#tokens = plv.word_tokenize(sentence.lower())
    
# Or this way
try:
    import TrieConvert
    t = TrieConvert.beta2unicodeTrie()
except:
    print( "TrieConvert not found." )
    t = None
Example #24
0
import re
import nltk
import os
from bs4 import BeautifulSoup
from nltk.tokenize.punkt import PunktLanguageVars
from cltk.stop.middle_english.stops import STOPS_LIST
import json
STOPS_LIST += ['!', ',' ,'.' ,'?', ';', "'", ':', '--', '[', ']']
with open('./stopwords.txt') as f:
    lines = f.readlines()
    for line in lines:
        STOPS_LIST.append(line.strip('\n'))


p = PunktLanguageVars()

root = "./"
plays = os.listdir(root)
for play in plays:
    if play == 'data' or re.match('\..*', play):
        continue
    playdir = root + play + '/'
    if not os.path.isdir(playdir):
        continue
    print(play + " ...")
    scenes = os.listdir(playdir)
    role = dict()
    speechbuf = []
    name = ""
    for file in scenes:
        scene = playdir + '/' + file
Example #25
0
from extract_features import parse_tess
from cltk.utils.file_operations import open_pickle
from nltk.tokenize.punkt import PunktLanguageVars, PunktTrainer, PunktSentenceTokenizer

PunktLanguageVars.sent_end_chars = ('.', ';', ';')
PunktLanguageVars.internal_punctuation = (',', '·', ':')

text = parse_tess('tesserae/texts/grc/xenophon.anabasis.tess')
tokenizer = open_pickle('tokenizers/ancient_greek.pickle')
print('Xenophon tokens: ' + str(len(tokenizer.tokenize(text))))
print()

trainer = PunktTrainer(lang_vars=PunktLanguageVars())
trainer.INCLUDE_ALL_COLLOCS = True
trainer.INCLUDE_ABBREV_COLLOCS = True
trainer.train(text, verbose=True)

new_tokenizer = PunktSentenceTokenizer(trainer.get_params())
print('tokenizers equal? ' + str(tokenizer == new_tokenizer))
print('tokenization equal? ' +
      str(tokenizer.tokenize(text) == new_tokenizer.tokenize(text)))

old_tok_out = open('feature_data/old_tok.txt', mode='w')
old_tok_out.write('\n'.join(tokenizer.tokenize(text)))
new_tok_out = open('feature_data/new_tok.txt', mode='w')
new_tok_out.write('\n'.join(new_tokenizer.tokenize(text)))
'''
There seem to be very few abbreviations in the tesserae corpus. This means training the PunktSentenceTokenizer might not yield any improvement.
From paper abstract: "[Punkt sentence tokenization training] is based on the assumption that a large number of ambiguities in the determination of sentence boundaries can be eliminated once abbreviations have been identified."

Example #26
0
                else:
                    lemmatized_tokens.append(token + '/' + headword)
            # if token not found in lemma-headword list
            else:
                # re-add final period if rm'd
                if final_period:
                    token += '.'

                if not return_raw:
                    lemmatized_tokens.append(token)
                else:
                    lemmatized_tokens.append(token + '/' + token)
        if not return_string:
            return lemmatized_tokens
        elif return_string:
            return ' '.join(lemmatized_tokens)


if __name__ == '__main__':
    REPLACER = LemmaReplacer('latin')
    PUNKT = PunktLanguageVars()
    #STRING = 'Est interdum praestare mercaturis rem quaerere, nisi tam periculosum sit, et item foenerari, si tam honestum. Maiores nostri sic habuerunt et ita in legibus posiuerunt: furem dupli condemnari, foeneratorem quadrupli. Quanto peiorem ciuem existimarint foeneratorem quam furem, hinc licet existimare. Et uirum bonum quom laudabant, ita laudabant: bonum agricolam bonumque colonum; amplissime laudari existimabatur qui ita laudabatur. Mercatorem autem strenuum studiosumque rei quaerendae existimo, uerum, ut supra dixi, periculosum et calamitosum. At ex agricolis et uiri fortissimi et milites strenuissimi gignuntur, maximeque pius quaestus stabilissimusque consequitur minimeque inuidiosus, minimeque male cogitantes sunt qui in eo studio occupati sunt. Nunc, ut ad rem redeam, quod promisi institutum principium hoc erit.'  # pylint: disable=line-too-long
    STRING = 'hominum divomque voluptas'
    #EX_TOKENS = PUNKT.word_tokenize(UMLEMMATIZED)
    # UMLEMMATIZED = ['τὴν', 'διάγνωσιν', 'αὐτῶν', 'ἔρχεσθαι']
    # LEMMATIZED = REPLACER.lemmatize(UMLEMMATIZED, return_raw=False, return_string=True)
    LEMMATIZED = REPLACER.lemmatize(STRING.split(),
                                    return_raw=False,
                                    return_string=True)
    print(LEMMATIZED)
Example #27
0
def tokenize_latin_words(string):
    from cltk.tokenize.latin_exceptions import latin_exceptions

    assert isinstance(string, str), "Incoming string must be type str."

    def matchcase(word):
        # From Python Cookbook
        def replace(m):
            text = m.group()
            if text.isupper():
                return word.upper()
            elif text.islower():
                return word.lower()
            elif text[0].isupper():
                return word.capitalize()
            else:
                return word

        return replace

    replacements = [(r'mecum', 'cum me'),
                    (r'tecum', 'cum te'),
                    (r'secum', 'cum se'),
                    (r'nobiscum', 'cum nobis'),
                    (r'vobiscum', 'cum vobis'),
                    (r'quocum', 'cum quo'),
                    (r'quacum', 'cum qua'),
                    (r'quicum', 'cum qui'),
                    (r'quibuscum', 'cum quibus'),
                    (r'sodes', 'si audes'),
                    (r'satin', 'satis ne'),
                    (r'scin', 'scis ne'),
                    (r'sultis', 'si vultis'),
                    (r'similist', 'similis est'),
                    (r'qualist', 'qualis est')
                    ]

    for replacement in replacements:
        string = re.sub(replacement[0], matchcase(replacement[1]), string, flags=re.IGNORECASE)


    punkt_param = PunktParameters()
    abbreviations = ['c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', "m'", 'ser', 'ap', 'n', 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul', 'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop']
    punkt_param.abbrev_types = set(abbreviations)
    sent_tokenizer = PunktSentenceTokenizer(punkt_param)

    word_tokenizer = PunktLanguageVars()
    sents = sent_tokenizer.tokenize(string)

    enclitics = ['que', 'n', 'ue', 've', 'st']
    exceptions = enclitics
    exceptions = list(set(exceptions + latin_exceptions))

    tokens = []

    for sent in sents:
        temp_tokens = word_tokenizer.word_tokenize(sent)
        # Need to check that tokens exist before handling them; needed to make stream.readlines work in PlaintextCorpusReader
        
        if temp_tokens:
            if temp_tokens[0].endswith('ne'):
                if temp_tokens[0].lower() not in exceptions:
                    temp = [temp_tokens[0][:-2], '-ne']
                    temp_tokens = temp + temp_tokens[1:]

            if temp_tokens[-1].endswith('.'):
                final_word = temp_tokens[-1][:-1]
                del temp_tokens[-1]
                temp_tokens += [final_word, '.']

            for token in temp_tokens:
                tokens.append(token)

    # Break enclitic handling into own function?
    specific_tokens = []

    for token in tokens:
        is_enclitic = False
        if token.lower() not in exceptions:
            for enclitic in enclitics:
                if token.endswith(enclitic):
                    if enclitic == 'n':
                        specific_tokens += [token[:-len(enclitic)]] + ['-ne']
                    elif enclitic == 'st':
                        if token.endswith('ust'):
                            specific_tokens += [token[:-len(enclitic) + 1]] + ['est']
                        else:
                            specific_tokens += [token[:-len(enclitic)]] + ['est']
                    else:
                        specific_tokens += [token[:-len(enclitic)]] + ['-' + enclitic]
                    is_enclitic = True
                    break
        if not is_enclitic:
            specific_tokens.append(token)

    return specific_tokens
Example #28
0
def tokenize_latin_words(string):
    """
    Tokenizer divides the string into a list of substrings

    >>> from cltk.corpus.utils.formatter import remove_non_ascii
    >>> text =  'Dices ἐστιν ἐμός pulchrum esse inimicos ulcisci.'
    >>> tokenize_latin_words(text)
    ['Dices', 'ἐστιν', 'ἐμός', 'pulchrum', 'esse', 'inimicos', 'ulcisci', '.']

    :param string: This accepts the string value that needs to be tokenized
    :returns: A list of substrings extracted from the string
    """
    from cltk.tokenize.latin_exceptions import latin_exceptions

    assert isinstance(string, str), "Incoming string must be type str."

    def matchcase(word):
        # From Python Cookbook
        def replace(m):
            text = m.group()
            if text.isupper():
                return word.upper()
            elif text.islower():
                return word.lower()
            elif text[0].isupper():
                return word.capitalize()
            else:
                return word

        return replace

    replacements = [(r'\bmecum\b', 'cum me'), (r'\btecum\b', 'cum te'),
                    (r'\bsecum\b', 'cum se'), (r'\bnobiscum\b', 'cum nobis'),
                    (r'\bvobiscum\b', 'cum vobis'), (r'\bquocum\b', 'cum quo'),
                    (r'\bquacum\b', 'cum qua'), (r'\bquicum\b', 'cum qui'),
                    (r'\bquibuscum\b', 'cum quibus'),
                    (r'\bsodes\b', 'si audes'), (r'\bsatin\b', 'satis ne'),
                    (r'\bscin\b', 'scis ne'), (r'\bsultis\b', 'si vultis'),
                    (r'\bsimilist\b', 'similis est'),
                    (r'\bqualist\b', 'qualis est')]

    for replacement in replacements:
        string = re.sub(replacement[0],
                        matchcase(replacement[1]),
                        string,
                        flags=re.IGNORECASE)

    punkt_param = PunktParameters()
    abbreviations = [
        'c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', "m'",
        'ser', 'ap', 'n', 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul',
        'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop'
    ]
    punkt_param.abbrev_types = set(abbreviations)
    sent_tokenizer = PunktSentenceTokenizer(punkt_param)

    word_tokenizer = PunktLanguageVars()
    sents = sent_tokenizer.tokenize(string)

    enclitics = ['que', 'n', 'ue', 've', 'st']
    exceptions = enclitics
    exceptions = list(set(exceptions + latin_exceptions))

    tokens = []

    for sent in sents:
        temp_tokens = word_tokenizer.word_tokenize(sent)
        # Need to check that tokens exist before handling them;
        # needed to make stream.readlines work in PlaintextCorpusReader

        if temp_tokens:
            if temp_tokens[0].endswith('ne') and len(temp_tokens[0]) > 2:
                if temp_tokens[0].lower() not in exceptions:
                    temp = [temp_tokens[0][:-2], '-ne']
                    temp_tokens = temp + temp_tokens[1:]

            if temp_tokens[-1].endswith('.'):
                final_word = temp_tokens[-1][:-1]
                del temp_tokens[-1]
                temp_tokens += [final_word, '.']

            for token in temp_tokens:
                tokens.append(token)

    # Break enclitic handling into own function?
    specific_tokens = []

    for token in tokens:
        is_enclitic = False
        if token.lower() not in exceptions:
            for enclitic in enclitics:
                if token.endswith(enclitic):
                    if enclitic == 'n':
                        specific_tokens += [token[:-len(enclitic)]] + ['-ne']
                    elif enclitic == 'st':
                        if token.endswith('ust'):
                            specific_tokens += [token[:-len(enclitic) + 1]
                                                ] + ['est']
                        else:
                            specific_tokens += [token[:-len(enclitic)]
                                                ] + ['est']
                    else:
                        specific_tokens += [token[:-len(enclitic)]
                                            ] + ['-' + enclitic]
                    is_enclitic = True
                    break
        if not is_enclitic:
            specific_tokens.append(token)

    return specific_tokens
Example #29
0
def tag_ner(lang, input_text, output_type=list):
    """Run NER for chosen language.

    Choosing output_type=list, returns a list of tuples:
    >>> tag_ner('latin', input_text=text_str, output_type=list)
    >>> [('ut',), ('Venus', 'Entity'), (',',), ('ut',), ('Sirius', 'Entity'),
    (',',), ('ut',), ('Spica', 'Entity')]
    """

    _check_latest_data(lang)

    assert lang in NER_DICT.keys(), \
        'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))
    types = [str, list]
    assert type(input_text) in types, 'Input must be: {}.'.format(
        ', '.join(types))
    assert output_type in types, 'Output must be a {}.'.format(
        ', '.join(types))

    if type(input_text) == str:
        punkt = PunktLanguageVars()
        tokens = punkt.word_tokenize(input_text)
        new_tokens = []
        for word in tokens:
            if word.endswith('.'):
                new_tokens.append(word[:-1])
                new_tokens.append('.')
            else:
                new_tokens.append(word)
        input_text = new_tokens

    ner_file_path = os.path.expanduser(NER_DICT[lang])
    with open(ner_file_path, encoding='utf-8') as file_open:
        ner_str = file_open.read()
    ner_list = ner_str.split('\n')

    ner_tuple_list = []
    for count, word_token in enumerate(input_text):
        match = False
        for ner_word in ner_list:
            # the replacer slows things down, but is necessary
            if word_token == ner_word:
                ner_tuple = (word_token, 'Entity')
                ner_tuple_list.append(ner_tuple)
                match = True
                break
        if not match:
            ner_tuple_list.append((word_token, ))

    if output_type is str:
        string = ''
        for tup in ner_tuple_list:
            start_space = ' '
            final_space = ''
            # this is some mediocre string reconstitution
            # maybe not worth the effort
            if tup[0] in [',', '.', ';', ':', '?', '!']:
                start_space = ''
            if len(tup) == 2:
                string += start_space + tup[0] + '/' + tup[1] + final_space
            else:
                string += start_space + tup[0] + final_space
        return string

    return ner_tuple_list
Example #30
0
from textual_feature import sentence_tokenizers
from nltk.tokenize.punkt import PunktLanguageVars, PunktSentenceTokenizer
import re

p = PunktLanguageVars()

p._re_word_tokenizer = re.compile(
    PunktLanguageVars._word_tokenize_fmt % {
        'NonWord': r"(?:[0-9\.?!\-))\"“”‘’`··~,«»;;}\]\*\#:@&\'\(({\[])",
        'MultiChar': PunktLanguageVars._re_multi_char_punct,
        'WordStart': r"[^0-9\.?!\-))\"“”‘’`··~,«»;;}\]\*\#:@&\'\(({\[]",
    }, re.UNICODE | re.VERBOSE)

s = 'Σιδὼν ἐπὶ θαλάττῃ πόλις, ̓Ασσυρίων ἡ θάλασσα. test ""test test? test test. test test! test test. test. test test; test: test; test; test23 test45 89test. test test” test test “test test“. "test test". test test... test‘ test’. 00000test. test.test. .....test. test-test test- test. test) test test( test test. «test test» test.'
print(p.word_tokenize(s))
print()

s = '"ss ss". "ss ss." «s s. abc 123409 abc. 5ff5g s. ~ab cd~ ab~cd s.'
print(p.word_tokenize(s))
print()

# From polybius.histories.tess
s = '1συμμάχοις. ἀποδοῦναι Καρχηδονίους ̔Ρωμαίοις "1χωρὶς λύτρων ἅπαντας τοὺς αἰχμαλώτους. ἀργυ"1ρίου κατενεγκεῖν Καρχηδονίους ̔Ρωμαίοις ἐν ἔτεσιν "1εἴκοσι δισχίλια καὶ διακόσια τάλαντα Εὐβοϊκά."2'
print(p.word_tokenize(s))
print()

# From polybius.histories.tess
s = "διόπερ οὐχ ὁρῶν ποίαν ἄν τις ὀξυτέραν ἢ μείζονα λάβοι μεταβολὴν τῶν καθ' ἡμᾶς τῆς γε ̔Ρωμαίοις συμβάσης, εἰς τοῦτον ἀπεθέμην τὸν καιρὸν τὸν ὑπὲρ τῶν προειρημένων ἀπολογισμόν: γνοίη δ' ἄν τις τὸ μέγεθος τῆς μεταβολῆς ἐκ τούτων. ζήτει ἐν τῷ περὶ στρατηγίας. [εχξ. Vατ. π. 369 μαι. 24, 4 ηεψς.]"
print(p.word_tokenize(s))
print()