def tokenize(self, string):
     """Tokenize incoming string."""
     #punkt = WhitespaceTokenizer()
     punkt= PunktLanguageVars()
     generic_tokens = punkt.word_tokenize(string)
     generic_tokens = [x for item in generic_tokens for x in ([item] if item != 'nec' else ['c', 'ne'])] # Handle 'nec' as a special case.
     specific_tokens = []
     for generic_token in generic_tokens:
         is_enclitic = False
         if generic_token not in self.exceptions:
             for enclitic in self.enclitics:
                 if generic_token.endswith(enclitic):
                     if enclitic == 'cum':
                         if generic_token in self.inclusions:
                             specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
                         else:
                             specific_tokens += [generic_token]                                                                         
                     elif enclitic == 'st':
                         if generic_token.endswith('ust'):
                             specific_tokens += [generic_token[:-len(enclitic)+1]] + ['est']
                         else:
                             # Does not handle 'similist', 'qualist', etc. correctly
                             specific_tokens += [generic_token[:-len(enclitic)]] + ['est']
                     else:
                         specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
                     is_enclitic = True
                     break
         if not is_enclitic:
             specific_tokens.append(generic_token)
     #return iter(specific_tokens) #change this one into an iterator.
     startPoint=0 #this is to accumulate the start point.
     for item in specific_tokens:
         itemLength=len(item)
         yield item, startPoint, startPoint+itemLength
         startPoint=startPoint+itemLength+1
Beispiel #2
0
    def _tokenize(self, text):
        """
        Use NLTK's standard tokenizer, rm punctuation.
        :param text: pre-processed text
        :return: tokenized text
        :rtype : list
        """
        sentence_tokenizer = TokenizeSentence('latin')
        sentences = sentence_tokenizer.tokenize_sentences(text.lower())

        sent_words = []
        punkt = PunktLanguageVars()
        for sentence in sentences:
            words = punkt.word_tokenize(sentence)

            assert isinstance(words, list)
            words_new = []
            for word in words:
                if word not in self.punctuation or self.abbreviations or self.numbers or self.abbreviations:  # pylint: disable=line-too-long
                    words_new.append(word)

            # rm all numbers here with: re.compose(r'[09]')
            sent_words.append(words_new)

        return sent_words
Beispiel #3
0
    def _tokenize(self, text):
        """
        Use NLTK's standard tokenizer, rm punctuation.
        :param text: pre-processed text
        :return: tokenized text
        :rtype : list
        """
        sentence_tokenizer = TokenizeSentence('latin')
        sentences = sentence_tokenizer.tokenize_sentences(text.lower())

        sent_words = []
        punkt = PunktLanguageVars()
        for sentence in sentences:
            words = punkt.word_tokenize(sentence)

            assert isinstance(words, list)
            words_new = []
            for word in words:
                if word not in self.punctuation or self.abbreviations or self.numbers or self.abbreviations:  # pylint: disable=line-too-long
                    words_new.append(word)

            # rm all numbers here with: re.compose(r'[09]')
            sent_words.append(words_new)

        return sent_words
Beispiel #4
0
def tag_ner(lang, input_text, output_type=list):
    """Run NER for chosen language.
    """

    _check_latest_data(lang)

    assert lang in NER_DICT.keys(), "Invalid language. Choose from: {}".format(
        ", ".join(NER_DICT.keys()))
    types = [str, list]
    assert type(input_text) in types, "Input must be: {}.".format(
        ", ".join(types))
    assert output_type in types, "Output must be a {}.".format(
        ", ".join(types))

    if type(input_text) == str:
        punkt = PunktLanguageVars()
        tokens = punkt.word_tokenize(input_text)
        new_tokens = []
        for word in tokens:
            if word.endswith("."):
                new_tokens.append(word[:-1])
                new_tokens.append(".")
            else:
                new_tokens.append(word)
        input_text = new_tokens

    ner_file_path = os.path.expanduser(NER_DICT[lang])
    with open(ner_file_path) as file_open:
        ner_str = file_open.read()
    ner_list = ner_str.split("\n")

    ner_tuple_list = []
    for count, word_token in enumerate(input_text):
        match = False
        for ner_word in ner_list:
            # the replacer slows things down, but is necessary
            if word_token == ner_word:
                ner_tuple = (word_token, "Entity")
                ner_tuple_list.append(ner_tuple)
                match = True
                break
        if not match:
            ner_tuple_list.append((word_token, ))

    if output_type is str:
        string = ""
        for tup in ner_tuple_list:
            start_space = " "
            final_space = ""
            # this is some mediocre string reconstitution
            # maybe not worth the effort
            if tup[0] in [",", ".", ";", ":", "?", "!"]:
                start_space = ""
            if len(tup) == 2:
                string += start_space + tup[0] + "/" + tup[1] + final_space
            else:
                string += start_space + tup[0] + final_space
        return string

    return ner_tuple_list
Beispiel #5
0
def nltk_tokenize_words(string, attached_period=False, language=None):
    """Wrap NLTK's tokenizer PunktLanguageVars(), but make final period
    its own token.
    >>> nltk_punkt("Sentence 1. Sentence 2.")
    >>> ['Sentence', 'one', '.', 'Sentence', 'two', '.']

    Optionally keep the NLTK's output:
    >>> nltk_punkt("Sentence 1. Sentence 2.", attached_period=True)
    >>> ['Sentence', 'one.', 'Sentence', 'two.']

    TODO: Run some tests to determine whether there is a large penalty for
    re-calling PunktLanguageVars() for each use of this function. If so, this
    will need to become a class, perhaps inheriting from the PunktLanguageVars
    object. Maybe integrate with WordTokenizer.
    """
    assert isinstance(string, str), "Incoming string must be type str."
    if language=='sanskrit': 
        periods = ['.', 'ред','рее']
    else:
        periods = ['.']
    punkt = PunktLanguageVars()
    tokens = punkt.word_tokenize(string)
    if attached_period:
        return tokens
    new_tokens = []
    for word in tokens:
        for char in periods:
            if word.endswith(char):
                new_tokens.append(word[:-1])
                new_tokens.append(char)
                break
        else:
            new_tokens.append(word)
    return new_tokens
Beispiel #6
0
 def tokenize(self, string):
     """Tokenize incoming string."""
     punkt = PunktLanguageVars()
     generic_tokens = punkt.word_tokenize(string)
     # Rewrite as an if-else block for exceptions rather than separate list comprehensions
     generic_tokens = [x for item in generic_tokens for x in ([item] if item != 'nec' else ['c', 'ne'])] # Handle 'nec' as a special case.
     generic_tokens = [x for item in generic_tokens for x in ([item] if item != 'sodes' else ['si', 'audes'])] # Handle 'sodes' as a special case.
     generic_tokens = [x for item in generic_tokens for x in ([item] if item != 'sultis' else ['si', 'vultis'])] # Handle 'sultis' as a special case.        
     specific_tokens = []
     for generic_token in generic_tokens:
         is_enclitic = False
         if generic_token not in self.exceptions:
             for enclitic in self.enclitics:
                 if generic_token.endswith(enclitic):
                     if enclitic == 'cum':
                         if generic_token in self.inclusions:
                             specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
                         else:
                             specific_tokens += [generic_token]                                                                         
                     elif enclitic == 'st':
                         if generic_token.endswith('ust'):
                             specific_tokens += [generic_token[:-len(enclitic)+1]] + ['est']
                         else:
                             # Does not handle 'similist', 'qualist', etc. correctly
                             specific_tokens += [generic_token[:-len(enclitic)]] + ['est']
                     else:
                         specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
                     is_enclitic = True
                     break
         if not is_enclitic:
             specific_tokens.append(generic_token)
     return specific_tokens
Beispiel #7
0
    def rm_stopwords(self, stoplist=[]):
        """Removes words or phrases from the text.

        Given a list of words or phrases, gives new text with those phrases
        removed.

        Args:
            stoplist (:obj:`list`) List of words or phrases to filter from text

        Returns:
            :obj:`self.__class__` New version of text, with stop words/phrases removed

        Example:
            >>> stopwords = ['ipsum', 'sit']
            >>> text = EnglishText('Lorem ipsum dolor sit amet...')
            >>> text.rm_stopwords(stoplist=stopwords)
            >>> print(modified_text)
            'Lorem dolor amet...'
        """ # noqa
        filtered_words = []
        # converts text to list of words with NLTK tokenizer
        tokenizer = PunktLanguageVars()
        tokens = tokenizer.word_tokenize(str(self.data))
        # loop through each word, if not in stoplist, append
        for word in tokens:
            not_found = True
            for stopword in stoplist:
                if str(word).strip().lower() == str(stopword).strip().lower():
                    not_found = False
            if not_found:
                filtered_words.append(word)
        # return rejoined word
        return self.__class__(" ".join(filtered_words), self.options)
Beispiel #8
0
def nltk_tokenize_words(string, attached_period=False, language=None):
    """Wrap NLTK's tokenizer PunktLanguageVars(), but make final period
    its own token.
    >>> nltk_punkt("Sentence 1. Sentence 2.")
    >>> ['Sentence', 'one', '.', 'Sentence', 'two', '.']
    Optionally keep the NLTK's output:
    >>> nltk_punkt("Sentence 1. Sentence 2.", attached_period=True)
    >>> ['Sentence', 'one.', 'Sentence', 'two.']
    TODO: Run some tests to determine whether there is a large penalty for
    re-calling PunktLanguageVars() for each use of this function. If so, this
    will need to become a class, perhaps inheriting from the PunktLanguageVars
    object. Maybe integrate with WordTokenizer.
    """
    assert isinstance(string, str), "Incoming string must be type str."
    if language=='sanskrit':
        periods = ['.', 'ред','рее']
    else:
        periods = ['.']
    punkt = PunktLanguageVars()
    tokens = punkt.word_tokenize(string)
    if attached_period:
        return tokens
    new_tokens = []
    for word in tokens:
        for char in periods:
            if word.endswith(char):
                new_tokens.append(word[:-1])
                new_tokens.append(char)
                break
        else:
            new_tokens.append(word)
    return new_tokens
Beispiel #9
0
 def tokenize(self, string):
     """Tokenize incoming string."""
     punkt = PunktLanguageVars()
     generic_tokens = punkt.word_tokenize(string)
     generic_tokens = [x for item in generic_tokens for x in ([item] if item != 'nec' else ['c', 'ne'])] # Handle 'nec' as a special case.
     specific_tokens = []
     for generic_token in generic_tokens:
         is_enclitic = False
         if generic_token not in self.exceptions:
             for enclitic in self.enclitics:
                 if generic_token.endswith(enclitic):
                     if enclitic == 'mst':
                         specific_tokens += [generic_token[:-len(enclitic)+1]] + ['e'+ generic_token[-len(enclitic)+1:]]
                     elif enclitic == 'cum':
                         if generic_token in self.inclusions:
                             specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
                         else:
                             specific_tokens += [generic_token]                                                     
                     else:
                         specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
                     is_enclitic = True
                     break
         if not is_enclitic:
             specific_tokens.append(generic_token)
     return specific_tokens
Beispiel #10
0
 def __init__(self):
     """Language taken as argument, necessary used when saving word frequencies to
     ``cltk_data/user_data``."""
     self.punkt = PunktLanguageVars()
     self.punctuation = [
         ',', '.', ';', ':', '"', "'", '?', '-', '!', '*', '[', ']', '{',
         '}'
     ]
Beispiel #11
0
def tokenize(desc):
    '''
	INPUT: List of cleaned descriptions
	OUTPUT: Tokenized and stemmed list of words from the descriptions 
	'''
    plv = PunktLanguageVars()
    snowball = SnowballStemmer('english')
    return [snowball.stem(word) for word in plv.word_tokenize(desc.lower())]
Beispiel #12
0
def tokenize(doc):
    '''
    INPUT: Document
    OUTPUT: Tokenized and stemmed list of words from the document 
    '''
    plv      = PunktLanguageVars()
    snowball = SnowballStemmer('english')
    return [snowball.stem(word) for word in plv.word_tokenize(doc.lower())]
Beispiel #13
0
def tokenize(desc):
	'''
	INPUT: List of cleaned descriptions
	OUTPUT: Tokenized and stemmed list of words from the descriptions 
	'''
	plv = PunktLanguageVars()
	snowball = SnowballStemmer('english')
	return [snowball.stem(word) for word in plv.word_tokenize(desc.lower())]
Beispiel #14
0
Datei: ner.py Projekt: cltk/cltk
def tag_ner(lang, input_text, output_type=list):
    """Run NER for chosen language.
    """

    _check_latest_data(lang)

    assert lang in NER_DICT.keys(), \
        'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))
    types = [str, list]
    assert type(input_text) in types, 'Input must be: {}.'.format(', '.join(types))
    assert output_type in types, 'Output must be a {}.'.format(', '.join(types))

    if type(input_text) == str:
        punkt = PunktLanguageVars()
        tokens = punkt.word_tokenize(input_text)
        new_tokens = []
        for word in tokens:
            if word.endswith('.'):
                new_tokens.append(word[:-1])
                new_tokens.append('.')
            else:
                new_tokens.append(word)
        input_text = new_tokens

    ner_file_path = os.path.expanduser(NER_DICT[lang])
    with open(ner_file_path) as file_open:
        ner_str = file_open.read()
    ner_list = ner_str.split('\n')

    ner_tuple_list = []
    for count, word_token in enumerate(input_text):
        match = False
        for ner_word in ner_list:
            # the replacer slows things down, but is necessary
            if word_token == ner_word:
                ner_tuple = (word_token, 'Entity')
                ner_tuple_list.append(ner_tuple)
                match = True
                break
        if not match:
            ner_tuple_list.append((word_token,))

    if output_type is str:
        string = ''
        for tup in ner_tuple_list:
            start_space = ' '
            final_space = ''
            # this is some mediocre string reconstitution
            # maybe not worth the effort
            if tup[0] in [',', '.', ';', ':', '?', '!']:
                start_space = ''
            if len(tup) == 2:
                string += start_space + tup[0] + '/' + tup[1] + final_space
            else:
                string += start_space + tup[0] + final_space
        return string

    return ner_tuple_list
Beispiel #15
0
 def test_french_stopwords(self):
     ##test filtering French stopwords
     sentence = "En pensé ai e en talant que d ’ Yonec vus die avant dunt il fu nez, e de sun pere cum il vint primes a sa mere ."
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in FRENCH_STOPS]
     target_list = ['pensé', 'talant', 'd', '’', 'yonec', 'die', 'avant', 'dunt', 'nez', ',', 'pere', 'cum', 'primes',
                    'mere','.']
     self.assertEqual(no_stops, target_list)
Beispiel #16
0
 def test_latin_stopwords(self):
     """Test filtering Latin stopwords."""
     sentence = 'Quo usque tandem abutere, Catilina, patientia nostra?'
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in LATIN_STOPS]
     target_list = ['usque', 'tandem', 'abutere', ',', 'catilina', ',',
                    'patientia', 'nostra', '?']
     self.assertEqual(no_stops, target_list)
Beispiel #17
0
 def test_latin_stopwords(self):
     """Test filtering Latin stopwords."""
     sentence = 'Quo usque tandem abutere, Catilina, patientia nostra?'
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in LATIN_STOPS]
     target_list = ['usque', 'tandem', 'abutere', ',', 'catilina', ',',
                    'patientia', 'nostra', '?']
     self.assertEqual(no_stops, target_list)
Beispiel #18
0
 def test_french_stopwords(self):
     ##test filtering French stopwords
     sentence = "En pensé ai e en talant que d ’ Yonec vus die avant dunt il fu nez, e de sun pere cum il vint primes a sa mere ."
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in FRENCH_STOPS]
     target_list = ['pensé', 'talant', 'd', '’', 'yonec', 'die', 'avant', 'dunt', 'nez', ',', 'pere', 'cum', 'primes',
                    'mere','.']
     self.assertEqual(no_stops, target_list)
Beispiel #19
0
    def preprocess(self, text):
        if self.token:
            lemma = self.lemmatizer.lemmatize(text)
        else:
            plv = PunktLanguageVars()
            unigrams = plv.word_tokenize(text)
            lemma = self.lemmatizer.lemmatize(unigrams)

        lemma = [t[0] if t[1] == "punc" else t[1] for t in lemma]

        return " ".join(lemma)
Beispiel #20
0
 def test_old_norse_stopwords(self):
     """
     Test filtering Old Norse stopwords
     Sentence extracted from Eiríks saga rauða (http://www.heimskringla.no/wiki/Eir%C3%ADks_saga_rau%C3%B0a)
     """
     sentence = 'Þat var einn morgin, er þeir Karlsefni sá fyrir ofan rjóðrit flekk nökkurn, sem glitraði við þeim'
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in OLD_NORSE_STOPS]
     target_list = ['var', 'einn', 'morgin', ',', 'karlsefni', 'rjóðrit', 'flekk', 'nökkurn', ',', 'glitraði']
     self.assertEqual(no_stops, target_list)
Beispiel #21
0
 def test_old_norse_stopwords(self):
     """
     Test filtering Old Norse stopwords
     Sentence extracted from Eiríks saga rauða (http://www.heimskringla.no/wiki/Eir%C3%ADks_saga_rau%C3%B0a)
     """
     sentence = 'Þat var einn morgin, er þeir Karlsefni sá fyrir ofan rjóðrit flekk nökkurn, sem glitraði við þeim'
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in OLD_NORSE_STOPS]
     target_list = ['var', 'einn', 'morgin', ',', 'karlsefni', 'rjóðrit', 'flekk', 'nökkurn', ',', 'glitraði']
     self.assertEqual(no_stops, target_list)
Beispiel #22
0
 def test_greek_stopwords(self):
     """Test filtering Greek stopwords."""
     sentence = 'Ἅρπαγος δὲ καταστρεψάμενος Ἰωνίην ἐποιέετο στρατηίην \
     ἐπὶ Κᾶρας καὶ Καυνίους καὶ Λυκίους, ἅμα ἀγόμενος καὶ Ἴωνας καὶ \
     Αἰολέας.'
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in GREEK_STOPS]
     target_list = ['ἅρπαγος', 'καταστρεψάμενος', 'ἰωνίην', 'ἐποιέετο',
                    'στρατηίην', 'κᾶρας', 'καυνίους', 'λυκίους', ',',
                    'ἅμα', 'ἀγόμενος', 'ἴωνας', 'αἰολέας.']
     self.assertEqual(no_stops, target_list)
Beispiel #23
0
 def test_greek_stopwords(self):
     """Test filtering Greek stopwords."""
     sentence = 'Ἅρπαγος δὲ καταστρεψάμενος Ἰωνίην ἐποιέετο στρατηίην \
     ἐπὶ Κᾶρας καὶ Καυνίους καὶ Λυκίους, ἅμα ἀγόμενος καὶ Ἴωνας καὶ \
     Αἰολέας.'
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in GREEK_STOPS]
     target_list = ['ἅρπαγος', 'καταστρεψάμενος', 'ἰωνίην', 'ἐποιέετο',
                    'στρατηίην', 'κᾶρας', 'καυνίους', 'λυκίους', ',',
                    'ἅμα', 'ἀγόμενος', 'ἴωνας', 'αἰολέας.']
     self.assertEqual(no_stops, target_list)
Beispiel #24
0
    def lemmatize(self, input_text, return_raw=False, return_string=False):
        """Take incoming string or list of tokens. Lookup done against a
        key-value list of lemmata-headword. If a string, tokenize with
        ``PunktLanguageVars()``. If a final period appears on a token, remove
        it, then re-add once replacement done.
        TODO: rm check for final period, change PunktLanguageVars()
        """
        assert type(input_text) in [
            list, str
        ], logger.error("Input must be a list or string.")
        if type(input_text) is str:
            punkt = PunktLanguageVars()
            tokens = punkt.word_tokenize(input_text)
        else:
            tokens = input_text

        lemmatized_tokens = []
        for token in tokens:
            # check for final period
            final_period = False
            if token[-1] == ".":
                final_period = True
                token = token[:-1]

            # look for token in lemma dict keys
            if token.lower() in self.lemmata.keys():
                headword = self.lemmata[token.lower()]

                # re-add final period if rm'd
                if final_period:
                    headword += "."

                # append to return list
                if not return_raw:
                    lemmatized_tokens.append(headword)
                else:
                    lemmatized_tokens.append(token + "/" + headword)
            # if token not found in lemma-headword list
            else:
                # re-add final period if rm'd
                if final_period:
                    token += "."

                if not return_raw:
                    lemmatized_tokens.append(token)
                else:
                    lemmatized_tokens.append(token + "/" + token)
        if not return_string:
            return lemmatized_tokens
        elif return_string:
            return " ".join(lemmatized_tokens)
Beispiel #25
0
 def test_akkadian_stopwords(self):
     """
     Test filtering Akkadian stopwrods
     Sentence extracted from the law code of Hammurabi, law 3 (Martha Roth 2nd Edition 1997, Law Collections from
     Mesopotamia and Asia Minor).
     """
     sentence = "šumma awīlum ina dīnim ana šībūt sarrātim ūṣiamma awat iqbû la uktīn šumma dīnum šû dīn napištim awīlum šû iddâk"
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in AKKADIAN_STOPS]
     target_list = ['awīlum', 'dīnim', 'šībūt', 'sarrātim', 'ūṣiamma', 'awat', 'iqbû', 'uktīn', 'dīnum',
                    'dīn', 'napištim', 'awīlum', 'iddâk']
     self.assertEqual(no_stops, target_list)
Beispiel #26
0
    def preprocess(self, text):
        if self.path:
            with open(self.path, "r") as f:
                sw = set(f.read().split("\n")[:-1])
        else:
            sw = set(stopwords.words(self.lang))

        if self.token:
            unigrams = text
        else:
            plv = PunktLanguageVars()
            unigrams = plv.word_tokenize(text.lower())

        return " ".join([unigram for unigram in unigrams if not unigram in sw])
Beispiel #27
0
    def lemmatize(self, input_text, return_raw=False, return_string=False):
        """Take incoming string or list of tokens. Lookup done against a
        key-value list of lemmata-headword. If a string, tokenize with
        ``PunktLanguageVars()``. If a final period appears on a token, remove
        it, then re-add once replacement done.
        TODO: rm check for final period, change PunktLanguageVars() to nltk_tokenize_words()
        """
        assert type(input_text) in [list, str], \
            logger.error('Input must be a list or string.')
        if type(input_text) is str:
            punkt = PunktLanguageVars()
            tokens = punkt.word_tokenize(input_text)
        else:
            tokens = input_text

        lemmatized_tokens = []
        for token in tokens:
            # check for final period
            final_period = False
            if token[-1] == '.':
                final_period = True
                token = token[:-1]

            # look for token in lemma dict keys
            if token in self.lemmata.keys():
                headword = self.lemmata[token.lower()]

                # re-add final period if rm'd
                if final_period:
                    headword += '.'

                # append to return list
                if not return_raw:
                    lemmatized_tokens.append(headword)
                else:
                    lemmatized_tokens.append(token + '/' + headword)
            # if token not found in lemma-headword list
            else:
                # re-add final period if rm'd
                if final_period:
                    token += '.'

                if not return_raw:
                    lemmatized_tokens.append(token)
                else:
                    lemmatized_tokens.append(token + '/' + token)
        if not return_string:
            return lemmatized_tokens
        elif return_string:
            return ' '.join(lemmatized_tokens)
Beispiel #28
0
 def test_akkadian_stopwords(self):
     """
     Test filtering Akkadian stopwrods
     Sentence extracted from the law code of Hammurabi, law 3 (Martha Roth 2nd Edition 1997, Law Collections from
     Mesopotamia and Asia Minor).
     """
     sentence = "šumma awīlum ina dīnim ana šībūt sarrātim ūṣiamma awat iqbû la uktīn šumma dīnum šû dīn napištim awīlum šû iddâk"
     lowered = sentence.lower()
     punkt = PunktLanguageVars()
     tokens = punkt.word_tokenize(lowered)
     no_stops = [w for w in tokens if w not in AKKADIAN_STOPS]
     target_list = ['awīlum', 'dīnim', 'šībūt', 'sarrātim', 'ūṣiamma', 'awat', 'iqbû', 'uktīn', 'dīnum',
                    'dīn', 'napištim', 'awīlum', 'iddâk']
     self.assertEqual(no_stops, target_list)
Beispiel #29
0
 def rm_stopwords(self, stoplist=[]):
     filtered_words = []
     # converts text to list of words with NLTK tokenizer
     tokenizer = PunktLanguageVars()
     tokens = tokenizer.word_tokenize(str(self.data))
     # loop through each word, if not in stoplist, append
     for word in tokens:
         not_found = True
         for stopword in stoplist:
             if str(word).strip().lower() == str(stopword).strip().lower():
                 not_found = False
         if not_found:
             filtered_words.append(word)
     # return rejoined word
     return self.__class__(" ".join(filtered_words), self.metadata)
Beispiel #30
0
    def _build_concordance(self, text_str):
        """
        Inherit or mimic the logic of ConcordanceIndex() at http://www.nltk.org/_modules/nltk/text.html
        and/or ConcordanceSearchView() & SearchCorpus() at https://github.com/nltk/nltk/blob/develop/nltk/app/concordance_app.py
        :param text_string: Text to be turned into a concordance
        :type text_string: str
        :return: list
        """
        p = PunktLanguageVars()
        orig_tokens = p.word_tokenize(text_str)
        c = ConcordanceIndex(orig_tokens)

        #! rm dupes after index, before loop
        tokens = set(orig_tokens)
        tokens = [x for x in tokens if x not in [',', '.', ';', ':', '"', "'", '[', ']']]  # this needs to be changed or rm'ed

        return c.return_concordance_all(tokens)
Beispiel #31
0
    def tokenize(self, string):
        """Tokenize incoming string."""
        punkt = PunktLanguageVars()
        generic_tokens = punkt.word_tokenize(string)
        specific_tokens = []
        for generic_token in generic_tokens:
            is_enclitic = False
            if generic_token not in self.exceptions:
                for enclitic in self.enclitics:
                    if generic_token.endswith(enclitic):
                        new_tokens = [generic_token[:-len(enclitic)]] + ['-' + enclitic]
                        specific_tokens += new_tokens
                        is_enclitic = True
                        break
            if not is_enclitic:
                specific_tokens.append(generic_token)

        return specific_tokens
Beispiel #32
0
    def tokenize(self, string):
        """Tokenize incoming string."""
        punkt = PunktLanguageVars()
        generic_tokens = punkt.word_tokenize(string)
        specific_tokens = []
        for generic_token in generic_tokens:
            is_enclitic = False
            if generic_token not in self.exceptions:
                for enclitic in self.enclitics:
                    if generic_token.endswith(enclitic):
                        new_tokens = [generic_token[:-len(enclitic)]
                                      ] + ['-' + enclitic]
                        specific_tokens += new_tokens
                        is_enclitic = True
                        break
            if not is_enclitic:
                specific_tokens.append(generic_token)

        return specific_tokens
Beispiel #33
0
def build_concordance(text_str: str) -> List[List[str]]:
    """
    Inherit or mimic the logic of ConcordanceIndex() at:
     http://www.nltk.org/_modules/nltk/text.html
    and/or ConcordanceSearchView() & SearchCorpus() at:
     https://github.com/nltk/nltk/blob/develop/nltk/app/concordance_app.py
    :param text_string: Text to be turned into a concordance
    :type text_string: str
    :return: list
    """
    punkt_vars = PunktLanguageVars()  # type: PunktLanguageVars
    orig_tokens = punkt_vars.word_tokenize(text_str)  # type: List[str]
    concordance_index = ConcordanceIndex(orig_tokens)  # type: Any
    #! rm dupes after index, before loop
    tokens_set = set(orig_tokens)  # type: Set[str]
    punct_list = [',', '.', ';', ':', '"', "'", '[', ']']  # type: List[str]
    # this needs to be changed or rm'ed
    tokens = [x for x in tokens_set if x not in punct_list]  # List[str]
    index = concordance_index.return_concordance_all(tokens)  # List[List[str]]
    return index
Beispiel #34
0
    def _build_concordance(self, text_str):
        """
        Inherit or mimic the logic of ConcordanceIndex() at http://www.nltk.org/_modules/nltk/text.html
        and/or ConcordanceSearchView() & SearchCorpus() at https://github.com/nltk/nltk/blob/develop/nltk/app/concordance_app.py
        :param text_string: Text to be turned into a concordance
        :type text_string: str
        :return: list
        """
        p = PunktLanguageVars()
        orig_tokens = p.word_tokenize(text_str)
        c = ConcordanceIndex(orig_tokens)

        #! rm dupes after index, before loop
        tokens = set(orig_tokens)
        tokens = [
            x for x in tokens
            if x not in [',', '.', ';', ':', '"', "'", '[', ']']
        ]  # this needs to be changed or rm'ed

        return c.return_concordance_all(tokens)
Beispiel #35
0
 def tokenize(self, string):
     """Tokenize incoming string."""
     punkt = PunktLanguageVars()
     generic_tokens = punkt.word_tokenize(string)
     # Rewrite as an if-else block for exceptions rather than separate list comprehensions
     generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'nec' else ['c', item[:-1]])] # Handle 'nec' as a special case.
     generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'sodes' else [item[0]+'i', 'audes'])] # Handle 'sodes' as a special case.
     generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'sultis' else [item[0]+'i', 'vultis'])] # Handle 'sultis' as a special case.
     generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'satin' else [item[:-1] + 's', 'ne'])] # Handle 'satin' as a special case.
     generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'scin' else [item[:-1] + 's', 'ne'])] # Handle 'scin' as a special case.      
     specific_tokens = []
     for generic_token in generic_tokens:
         is_enclitic = False
         if generic_token not in self.exceptions:
             for enclitic in self.enclitics:
                 if generic_token.endswith(enclitic):
                     if enclitic == 'cum':
                         if generic_token.lower() in self.inclusions:
                             specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
                         else:
                             specific_tokens += [generic_token]
                     elif enclitic == 'n':
                             specific_tokens += [generic_token[:-len(enclitic)]] + ['ne']                                                                                                    
                     elif enclitic == 'st':
                         if generic_token.endswith('ust'):
                             specific_tokens += [generic_token[:-len(enclitic)+1]] + ['est']
                         else:
                             # Does not handle 'similist', 'qualist', etc. correctly
                             specific_tokens += [generic_token[:-len(enclitic)]] + ['est']
                     else:
                         specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
                     is_enclitic = True
                     break
         if not is_enclitic:
             specific_tokens.append(generic_token)
     return specific_tokens
Beispiel #36
0
def setup_tokenizers(terminal_punctuation):
	PunktLanguageVars.sent_end_chars = terminal_punctuation
	PunktLanguageVars.re_boundary_realignment = re.compile(r'[›»》’”\'\")\)\]\}\>]+?(?:\s+|(?=--)|$)', re.MULTILINE)
	global word_tokenizer
	global sentence_tokenizers

	#Accessing private variables of PunktLanguageVars because nltk has a faulty design pattern that necessitates it.
	#Issue reported here: https://github.com/nltk/nltk/issues/2068
	word_tokenizer = PunktLanguageVars()
	word_tokenizer._re_word_tokenizer = re.compile(PunktLanguageVars._word_tokenize_fmt % {
	    'NonWord': r"(?:[\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
	    'MultiChar': PunktLanguageVars._re_multi_char_punct,
	    'WordStart': r"[^\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡]",
	}, re.UNICODE | re.VERBOSE)
	word_tokenizer._re_period_context = re.compile(PunktLanguageVars._period_context_fmt % {
		'NonWord': r"(?:[\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
		'SentEndChars': word_tokenizer._re_sent_end_chars, 
	}, re.UNICODE | re.VERBOSE)

	x = PunktLanguageVars()
	x._re_word_tokenizer = re.compile(PunktLanguageVars._word_tokenize_fmt % {
	    'NonWord': r"(?:[\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
	    'MultiChar': PunktLanguageVars._re_multi_char_punct,
	    'WordStart': r"[^\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡]",
	}, re.UNICODE | re.VERBOSE)
	x._re_period_context = re.compile(PunktLanguageVars._period_context_fmt % {
		'NonWord': r"(?:[\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
		'SentEndChars': x._re_sent_end_chars, 
	}, re.UNICODE | re.VERBOSE)

	#Read tokenizers from pickle files (also include an untrained tokenizer). Mapping from language name to tokenizer
	sentence_tokenizers = dict({None: PunktSentenceTokenizer(lang_vars=PunktLanguageVars())}, **{
		current_file_name[:current_file_name.index('.')]: pickle.load(open(join(current_path, current_file_name), mode='rb'))
		for current_path, current_dir_names, current_file_names in os.walk(sentence_tokenizer_dir) 
		for current_file_name in current_file_names if current_file_name.endswith('.pickle')
	})
	for s in sentence_tokenizers.values():
		s._lang_vars._re_period_context = x._re_period_context
		s._lang_vars._re_word_tokenizer = x._re_word_tokenizer
Beispiel #37
0
class Frequency:
    """Methods for making word frequency lists."""
    def __init__(self):
        """Language taken as argument, necessary used when saving word frequencies to
        ``cltk_data/user_data``."""
        self.punkt = PunktLanguageVars()
        self.punctuation = [
            ',', '.', ';', ':', '"', "'", '?', '-', '!', '*', '[', ']', '{',
            '}'
        ]

    def counter_from_str(self, string):
        """Build word frequency list from incoming string."""
        string_list = [
            chars for chars in string if chars not in self.punctuation
        ]
        string_joined = ''.join(string_list)
        tokens = self.punkt.word_tokenize(string_joined)
        return Counter(tokens)

    def counter_from_corpus(self, corpus):
        """Build word frequency list from one of several available corpora.
        TODO: Make this count iteratively, not all at once
        """
        assert corpus in ['phi5', 'tlg'], \
            "Corpus '{0}' not available. Choose from 'phi5' or 'tlg'.".format(corpus)

        all_strings = self._assemble_corpus_string(corpus=corpus)
        return self.counter_from_str(all_strings)

    def _assemble_corpus_string(self, corpus):
        """Takes a list of filepaths, returns a string containing contents of
        all files."""

        if corpus == 'phi5':
            filepaths = assemble_phi5_author_filepaths()
            file_cleaner = phi5_plaintext_cleanup
        elif corpus == 'tlg':
            filepaths = assemble_tlg_author_filepaths()
            file_cleaner = tlg_plaintext_cleanup

        for filepath in filepaths:
            with open(filepath) as file_open:
                file_read = file_open.read().lower()
            file_clean = file_cleaner(file_read)
            yield file_clean
Beispiel #38
0
class Frequency:
    """Methods for making word frequency lists."""

    def __init__(self):
        """Language taken as argument, necessary used when saving word frequencies to
        ``cltk_data/user_data``."""
        self.punkt = PunktLanguageVars()
        self.punctuation = [',', '.', ';', ':', '"', "'", '?', '-', '!', '*', '[', ']', '{', '}']

    def counter_from_str(self, string):
        """Build word frequency list from incoming string."""
        string_list = [chars for chars in string if chars not in self.punctuation]
        string_joined = ''.join(string_list)
        tokens = self.punkt.word_tokenize(string_joined)
        return Counter(tokens)


    def counter_from_corpus(self, corpus):
        """Build word frequency list from one of several available corpora.
        TODO: Make this count iteratively, not all at once
        """
        assert corpus in ['phi5', 'tlg'], \
            "Corpus '{0}' not available. Choose from 'phi5' or 'tlg'.".format(corpus)

        all_strings = self._assemble_corpus_string(corpus=corpus)
        return self.counter_from_str(all_strings)

    def _assemble_corpus_string(self, corpus):
        """Takes a list of filepaths, returns a string containing contents of
        all files."""

        if corpus == 'phi5':
            filepaths = assemble_phi5_author_filepaths()
            file_cleaner = phi5_plaintext_cleanup
        elif corpus == 'tlg':
            filepaths = assemble_tlg_author_filepaths()
            file_cleaner = tlg_plaintext_cleanup

        for filepath in filepaths:
            with open(filepath) as file_open:
                file_read = file_open.read().lower()
            file_clean = file_cleaner(file_read)
            yield file_clean
Beispiel #39
0
#!/usr/bin/python3

import concurrent.futures
from PIL import Image
import os
import re
import pickle
import docx
import logging
from pathlib import Path
import pytesseract
from nltk.tokenize.punkt import PunktLanguageVars
from string import punctuation
from multiprocessing import Lock

lemmatizer = PunktLanguageVars()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
format_log = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
log_path = Path(Path.cwd() / 'ocr.log')
fileHandler = logging.FileHandler(log_path)
fileHandler.setFormatter(format_log)
logger.addHandler(fileHandler)
stream = logging.StreamHandler()
stream.setFormatter(format_log)
logger.addHandler(stream)

lock = Lock()
new_paragraphs = []

paragraphs = []
Beispiel #40
0
def tag_ner(lang, input_text, output_type=list):
    """Run NER for chosen language.

    Choosing output_type=list, returns a list of tuples:
    >>> tag_ner('latin', input_text=text_str, output_type=list)
    >>> [('ut',), ('Venus', 'Entity'), (',',), ('ut',), ('Sirius', 'Entity'),
    (',',), ('ut',), ('Spica', 'Entity')]
    """

    _check_latest_data(lang)

    assert lang in NER_DICT.keys(), \
        'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))
    types = [str, list]
    assert type(input_text) in types, 'Input must be: {}.'.format(
        ', '.join(types))
    assert output_type in types, 'Output must be a {}.'.format(
        ', '.join(types))

    if type(input_text) == str:
        punkt = PunktLanguageVars()
        tokens = punkt.word_tokenize(input_text)
        new_tokens = []
        for word in tokens:
            if word.endswith('.'):
                new_tokens.append(word[:-1])
                new_tokens.append('.')
            else:
                new_tokens.append(word)
        input_text = new_tokens

    ner_file_path = os.path.expanduser(NER_DICT[lang])
    with open(ner_file_path, encoding='utf-8') as file_open:
        ner_str = file_open.read()
    ner_list = ner_str.split('\n')

    ner_tuple_list = []
    for count, word_token in enumerate(input_text):
        match = False
        for ner_word in ner_list:
            # the replacer slows things down, but is necessary
            if word_token == ner_word:
                ner_tuple = (word_token, 'Entity')
                ner_tuple_list.append(ner_tuple)
                match = True
                break
        if not match:
            ner_tuple_list.append((word_token, ))

    if output_type is str:
        string = ''
        for tup in ner_tuple_list:
            start_space = ' '
            final_space = ''
            # this is some mediocre string reconstitution
            # maybe not worth the effort
            if tup[0] in [',', '.', ';', ':', '?', '!']:
                start_space = ''
            if len(tup) == 2:
                string += start_space + tup[0] + '/' + tup[1] + final_space
            else:
                string += start_space + tup[0] + final_space
        return string

    return ner_tuple_list
Beispiel #41
0
def tokenize_latin_words(string):
    from cltk.tokenize.latin_exceptions import latin_exceptions

    assert isinstance(string, str), "Incoming string must be type str."

    def matchcase(word):
        # From Python Cookbook
        def replace(m):
            text = m.group()
            if text.isupper():
                return word.upper()
            elif text.islower():
                return word.lower()
            elif text[0].isupper():
                return word.capitalize()
            else:
                return word

        return replace

    replacements = [(r'mecum', 'cum me'),
                    (r'tecum', 'cum te'),
                    (r'secum', 'cum se'),
                    (r'nobiscum', 'cum nobis'),
                    (r'vobiscum', 'cum vobis'),
                    (r'quocum', 'cum quo'),
                    (r'quacum', 'cum qua'),
                    (r'quicum', 'cum qui'),
                    (r'quibuscum', 'cum quibus'),
                    (r'sodes', 'si audes'),
                    (r'satin', 'satis ne'),
                    (r'scin', 'scis ne'),
                    (r'sultis', 'si vultis'),
                    (r'similist', 'similis est'),
                    (r'qualist', 'qualis est')
                    ]

    for replacement in replacements:
        string = re.sub(replacement[0], matchcase(replacement[1]), string, flags=re.IGNORECASE)


    punkt_param = PunktParameters()
    abbreviations = ['c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', "m'", 'ser', 'ap', 'n', 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul', 'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop']
    punkt_param.abbrev_types = set(abbreviations)
    sent_tokenizer = PunktSentenceTokenizer(punkt_param)

    word_tokenizer = PunktLanguageVars()
    sents = sent_tokenizer.tokenize(string)

    enclitics = ['que', 'n', 'ue', 've', 'st']
    exceptions = enclitics
    exceptions = list(set(exceptions + latin_exceptions))

    tokens = []

    for sent in sents:
        temp_tokens = word_tokenizer.word_tokenize(sent)
        if temp_tokens[0].endswith('ne'):
            if temp_tokens[0].lower() not in exceptions:
                temp = [temp_tokens[0][:-2], '-ne']
                temp_tokens = temp + temp_tokens[1:]

        if temp_tokens[-1].endswith('.'):
            final_word = temp_tokens[-1][:-1]
            del temp_tokens[-1]
            temp_tokens += [final_word, '.']

        for token in temp_tokens:
            tokens.append(token)

    # Break enclitic handling into own function?
    specific_tokens = []

    for token in tokens:
        is_enclitic = False
        if token.lower() not in exceptions:
            for enclitic in enclitics:
                if token.endswith(enclitic):
                    if enclitic == 'n':
                        specific_tokens += [token[:-len(enclitic)]] + ['-ne']
                    elif enclitic == 'st':
                        if token.endswith('ust'):
                            specific_tokens += [token[:-len(enclitic) + 1]] + ['est']
                        else:
                            specific_tokens += [token[:-len(enclitic)]] + ['est']
                    else:
                        specific_tokens += [token[:-len(enclitic)]] + ['-' + enclitic]
                    is_enclitic = True
                    break
        if not is_enclitic:
            specific_tokens.append(token)

    return specific_tokens
Beispiel #42
0
 def tokenize(self, string):
     """Tokenize incoming string."""
     
     def matchcase(word):
         # From Python Cookbook
         def replace(m):
             text = m.group()
             if text.isupper():
                 return word.upper()
             elif text.islower():
                 return word.lower()
             elif text[0].isupper():
                 return word.capitalize()
             else:
                 return word
         return replace
     
     replacements = [(r'mecum', 'cum me'),
             (r'tecum', 'cum te'),
             (r'secum', 'cum se'),
             (r'nobiscum', 'cum nobis'),
             (r'vobiscum', 'cum vobis'),
             (r'quocum', 'cum quo'),
             (r'quacum', 'cum qua'), 
             (r'quicum', 'cum qui'),
             (r'quibuscum', 'cum quibus'),
             (r'sodes', 'si audes'),
             (r'satin', 'satis ne'),
             (r'scin', 'scis ne'),
             (r'sultis', 'si vultis'),
             (r'similist', 'similis est'),
             (r'qualist', 'qualis est')
             ]
             
     for replacement in replacements:
         string = re.sub(replacement[0], matchcase(replacement[1]), string, flags=re.IGNORECASE)
         
     print(string)
     
     punkt = PunktLanguageVars()
     generic_tokens = punkt.word_tokenize(string)
                 
     specific_tokens = []
     for generic_token in generic_tokens:
         is_enclitic = False
         if generic_token.lower() not in self.exceptions:
             for enclitic in self.enclitics:
                 if generic_token.endswith(enclitic):
                     if enclitic == 'n':
                             specific_tokens += [generic_token[:-len(enclitic)]] + ['-ne']                                                                                                    
                     elif enclitic == 'st':
                         if generic_token.endswith('ust'):
                             specific_tokens += [generic_token[:-len(enclitic)+1]] + ['est']
                         else:
                             specific_tokens += [generic_token[:-len(enclitic)]] + ['est']
                     else:
                         specific_tokens += [generic_token[:-len(enclitic)]] + ['-' + enclitic]
                     is_enclitic = True
                     break
         if not is_enclitic:
             specific_tokens.append(generic_token)
     return specific_tokens
Beispiel #43
0
class LatinLanguageVars(PunktLanguageVars):
    _re_non_word_chars = PunktLanguageVars()._re_non_word_chars.replace(
        "'", "")
Beispiel #44
0
def tokenize_latin_words(string):
    """
    Tokenizer divides the string into a list of substrings

    >>> from cltk.corpus.utils.formatter import remove_non_ascii
    >>> text =  'Dices ἐστιν ἐμός pulchrum esse inimicos ulcisci.'
    >>> tokenize_latin_words(text)
    ['Dices', 'ἐστιν', 'ἐμός', 'pulchrum', 'esse', 'inimicos', 'ulcisci', '.']

    :param string: This accepts the string value that needs to be tokenized
    :returns: A list of substrings extracted from the string
    """
    from cltk.tokenize.latin_exceptions import latin_exceptions

    assert isinstance(string, str), "Incoming string must be type str."

    def matchcase(word):
        # From Python Cookbook
        def replace(m):
            text = m.group()
            if text.isupper():
                return word.upper()
            elif text.islower():
                return word.lower()
            elif text[0].isupper():
                return word.capitalize()
            else:
                return word

        return replace

    replacements = [(r'\bmecum\b', 'cum me'), (r'\btecum\b', 'cum te'),
                    (r'\bsecum\b', 'cum se'), (r'\bnobiscum\b', 'cum nobis'),
                    (r'\bvobiscum\b', 'cum vobis'), (r'\bquocum\b', 'cum quo'),
                    (r'\bquacum\b', 'cum qua'), (r'\bquicum\b', 'cum qui'),
                    (r'\bquibuscum\b', 'cum quibus'),
                    (r'\bsodes\b', 'si audes'), (r'\bsatin\b', 'satis ne'),
                    (r'\bscin\b', 'scis ne'), (r'\bsultis\b', 'si vultis'),
                    (r'\bsimilist\b', 'similis est'),
                    (r'\bqualist\b', 'qualis est')]

    for replacement in replacements:
        string = re.sub(replacement[0],
                        matchcase(replacement[1]),
                        string,
                        flags=re.IGNORECASE)

    punkt_param = PunktParameters()
    abbreviations = [
        'c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', "m'",
        'ser', 'ap', 'n', 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul',
        'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop'
    ]
    punkt_param.abbrev_types = set(abbreviations)
    sent_tokenizer = PunktSentenceTokenizer(punkt_param)

    word_tokenizer = PunktLanguageVars()
    sents = sent_tokenizer.tokenize(string)

    enclitics = ['que', 'n', 'ue', 've', 'st']
    exceptions = enclitics
    exceptions = list(set(exceptions + latin_exceptions))

    tokens = []

    for sent in sents:
        temp_tokens = word_tokenizer.word_tokenize(sent)
        # Need to check that tokens exist before handling them;
        # needed to make stream.readlines work in PlaintextCorpusReader

        if temp_tokens:
            if temp_tokens[0].endswith('ne') and len(temp_tokens[0]) > 2:
                if temp_tokens[0].lower() not in exceptions:
                    temp = [temp_tokens[0][:-2], '-ne']
                    temp_tokens = temp + temp_tokens[1:]

            if temp_tokens[-1].endswith('.'):
                final_word = temp_tokens[-1][:-1]
                del temp_tokens[-1]
                temp_tokens += [final_word, '.']

            for token in temp_tokens:
                tokens.append(token)

    # Break enclitic handling into own function?
    specific_tokens = []

    for token in tokens:
        is_enclitic = False
        if token.lower() not in exceptions:
            for enclitic in enclitics:
                if token.endswith(enclitic):
                    if enclitic == 'n':
                        specific_tokens += [token[:-len(enclitic)]] + ['-ne']
                    elif enclitic == 'st':
                        if token.endswith('ust'):
                            specific_tokens += [token[:-len(enclitic) + 1]
                                                ] + ['est']
                        else:
                            specific_tokens += [token[:-len(enclitic)]
                                                ] + ['est']
                    else:
                        specific_tokens += [token[:-len(enclitic)]
                                            ] + ['-' + enclitic]
                    is_enclitic = True
                    break
        if not is_enclitic:
            specific_tokens.append(token)

    return specific_tokens
Beispiel #45
0
import re
import nltk
import os
from bs4 import BeautifulSoup
from nltk.tokenize.punkt import PunktLanguageVars
from cltk.stop.middle_english.stops import STOPS_LIST
import json
STOPS_LIST += ['!', ',' ,'.' ,'?', ';', "'", ':', '--', '[', ']']
with open('./stopwords.txt') as f:
    lines = f.readlines()
    for line in lines:
        STOPS_LIST.append(line.strip('\n'))


p = PunktLanguageVars()

root = "./"
plays = os.listdir(root)
for play in plays:
    if play == 'data' or re.match('\..*', play):
        continue
    playdir = root + play + '/'
    if not os.path.isdir(playdir):
        continue
    print(play + " ...")
    scenes = os.listdir(playdir)
    role = dict()
    speechbuf = []
    name = ""
    for file in scenes:
        scene = playdir + '/' + file
Beispiel #46
0
 def __init__(self):
     """Language taken as argument, necessary used when saving word frequencies to
     ``cltk_data/user_data``."""
     self.punkt = PunktLanguageVars()
     self.punctuation = [',', '.', ';', ':', '"', "'", '?', '-', '!', '*', '[', ']', '{', '}']
Beispiel #47
0
def tokenize_latin_words(string):
    """
    Tokenizer divides the string into a list of substrings
  
    >>> from cltk.corpus.utils.formatter import remove_non_ascii
    >>> text =  'Dices ἐστιν ἐμός pulchrum esse inimicos ulcisci.'
    >>> remove_non_ascii(text)
    >>> 'Dices   pulchrum esse inimicos ulcisci.
  
    :param string: This accepts the string value that needs to be tokenized
    :returns: A list of substrings extracted from the string
    """
    from cltk.tokenize.latin_exceptions import latin_exceptions

    assert isinstance(string, str), "Incoming string must be type str."

    def matchcase(word):
        # From Python Cookbook
        def replace(m):
            text = m.group()
            if text.isupper():
                return word.upper()
            elif text.islower():
                return word.lower()
            elif text[0].isupper():
                return word.capitalize()
            else:
                return word

        return replace

    replacements = [(r'mecum', 'cum me'),
                    (r'tecum', 'cum te'),
                    (r'secum', 'cum se'),
                    (r'nobiscum', 'cum nobis'),
                    (r'vobiscum', 'cum vobis'),
                    (r'quocum', 'cum quo'),
                    (r'quacum', 'cum qua'),
                    (r'quicum', 'cum qui'),
                    (r'quibuscum', 'cum quibus'),
                    (r'sodes', 'si audes'),
                    (r'satin', 'satis ne'),
                    (r'scin', 'scis ne'),
                    (r'sultis', 'si vultis'),
                    (r'similist', 'similis est'),
                    (r'qualist', 'qualis est')
                    ]

    for replacement in replacements:
        string = re.sub(replacement[0], matchcase(replacement[1]), string, flags=re.IGNORECASE)


    punkt_param = PunktParameters()
    abbreviations = ['c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', "m'", 'ser', 'ap', 'n', 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul', 'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop']
    punkt_param.abbrev_types = set(abbreviations)
    sent_tokenizer = PunktSentenceTokenizer(punkt_param)

    word_tokenizer = PunktLanguageVars()
    sents = sent_tokenizer.tokenize(string)

    enclitics = ['que', 'n', 'ue', 've', 'st']
    exceptions = enclitics
    exceptions = list(set(exceptions + latin_exceptions))

    tokens = []

    for sent in sents:
        temp_tokens = word_tokenizer.word_tokenize(sent)
        # Need to check that tokens exist before handling them; needed to make stream.readlines work in PlaintextCorpusReader
        
        if temp_tokens:
            if temp_tokens[0].endswith('ne'):
                if temp_tokens[0].lower() not in exceptions:
                    temp = [temp_tokens[0][:-2], '-ne']
                    temp_tokens = temp + temp_tokens[1:]

            if temp_tokens[-1].endswith('.'):
                final_word = temp_tokens[-1][:-1]
                del temp_tokens[-1]
                temp_tokens += [final_word, '.']

            for token in temp_tokens:
                tokens.append(token)

    # Break enclitic handling into own function?
    specific_tokens = []

    for token in tokens:
        is_enclitic = False
        if token.lower() not in exceptions:
            for enclitic in enclitics:
                if token.endswith(enclitic):
                    if enclitic == 'n':
                        specific_tokens += [token[:-len(enclitic)]] + ['-ne']
                    elif enclitic == 'st':
                        if token.endswith('ust'):
                            specific_tokens += [token[:-len(enclitic) + 1]] + ['est']
                        else:
                            specific_tokens += [token[:-len(enclitic)]] + ['est']
                    else:
                        specific_tokens += [token[:-len(enclitic)]] + ['-' + enclitic]
                    is_enclitic = True
                    break
        if not is_enclitic:
            specific_tokens.append(token)

    return specific_tokens
# -*- coding: utf-8 -*-
#pylint: disable = missing-docstring, blacklisted-name, unused-argument, invalid-name, line-too-long, protected-access
import unittest
import re

from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktLanguageVars

import context  #pylint: disable=unused-import
from qcrit import textual_feature

#[^\s\d’”\'\")\)\]\}\.,:;]
#[“‘—\-†&vâ\*\^(α-ωΑ-Ὠ`̔]
#΄´´``′″‴
textual_feature.setup_tokenizers(terminal_punctuation=('.', ';', ';'))
p = PunktLanguageVars()
#TODO don't mess with the PunktLanguageVars instance variables, mess with the class variables
p._re_word_tokenizer = re.compile(
    PunktLanguageVars._word_tokenize_fmt % {
        'NonWord':
        r"(?:[\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
        'MultiChar':
        PunktLanguageVars._re_multi_char_punct,
        'WordStart':
        r"[^\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡]",
    }, re.UNICODE | re.VERBOSE)
p._re_period_context = re.compile(
    PunktLanguageVars._period_context_fmt % {
        'NonWord':
        r"(?:[\d\.\?¿؟\!¡!‽…⋯᠁ฯ,،,、、。°※··᛫~\:;;\\\/⧸⁄()\(\)\[\]\{\}\<\>\'\"‘’“”‹›«»《》\|‖\=\-\‐\‒\–\—\―_\+\*\^\$£€§%#@&†‡])",
        'SentEndChars': p._re_sent_end_chars,
    }, re.UNICODE | re.VERBOSE)
Beispiel #49
0
                else:
                    lemmatized_tokens.append(token + '/' + headword)
            # if token not found in lemma-headword list
            else:
                # re-add final period if rm'd
                if final_period:
                    token += '.'

                if not return_raw:
                    lemmatized_tokens.append(token)
                else:
                    lemmatized_tokens.append(token + '/' + token)
        if not return_string:
            return lemmatized_tokens
        elif return_string:
            return ' '.join(lemmatized_tokens)


if __name__ == '__main__':
    REPLACER = LemmaReplacer('latin')
    PUNKT = PunktLanguageVars()
    #STRING = 'Est interdum praestare mercaturis rem quaerere, nisi tam periculosum sit, et item foenerari, si tam honestum. Maiores nostri sic habuerunt et ita in legibus posiuerunt: furem dupli condemnari, foeneratorem quadrupli. Quanto peiorem ciuem existimarint foeneratorem quam furem, hinc licet existimare. Et uirum bonum quom laudabant, ita laudabant: bonum agricolam bonumque colonum; amplissime laudari existimabatur qui ita laudabatur. Mercatorem autem strenuum studiosumque rei quaerendae existimo, uerum, ut supra dixi, periculosum et calamitosum. At ex agricolis et uiri fortissimi et milites strenuissimi gignuntur, maximeque pius quaestus stabilissimusque consequitur minimeque inuidiosus, minimeque male cogitantes sunt qui in eo studio occupati sunt. Nunc, ut ad rem redeam, quod promisi institutum principium hoc erit.'  # pylint: disable=line-too-long
    STRING = 'hominum divomque voluptas'
    #EX_TOKENS = PUNKT.word_tokenize(UMLEMMATIZED)
    # UMLEMMATIZED = ['τὴν', 'διάγνωσιν', 'αὐτῶν', 'ἔρχεσθαι']
    # LEMMATIZED = REPLACER.lemmatize(UMLEMMATIZED, return_raw=False, return_string=True)
    LEMMATIZED = REPLACER.lemmatize(STRING.split(),
                                    return_raw=False,
                                    return_string=True)
    print(LEMMATIZED)
Beispiel #50
0
    for f in docs:
    	#leemos cada documento
    	exec("file = codecs.open(datapath+'{0}.txt','r','utf-8')".format(f))
    	content = file.read()
    	file.close()

    	#convertimos a minusculas
    	content = content.lower()
    	#quitamos numeros y signos de puntuacion para bag of words, bigramas y trigramas
    	toker = RegexpTokenizer(r'\W+|(,.;)+|[0-9]+', gaps=True)
    	nc = toker.tokenize(content)
    	#dejamos solo puntuacion para representacion de signos de puntuacion
    	tokerPunct = RegexpTokenizer(r'[^,.;!?]+', gaps=True)
    	ncPunct = tokerPunct.tokenize(content)

    	p = PunktLanguageVars()
    	ncGreek = p.word_tokenize(content)

    	#quitamos palabras funcionales
    	if language=='english':
    		filtered_words = [w for w in nc if not w in stopwords.words('english')]
    	elif language=='greek':
    		filtered_words = [w for w in ncGreek if not w in STOPS_LIST]

        #creamos un diccionario y contamos los elementos mas comunes para bag of words, bigramas y trigramas
    	contador = Counter(filtered_words)	

    	#obtenemos palabras mas comunes
    	exec("{0}_mc = contador.most_common(num_common)".format(f))
    	exec("{0}_str_bow = []".format(f))
    	exec("{0}_num_bow = []".format(f))
Beispiel #51
0
def tokenize_latin_words(string):
    from cltk.tokenize.latin_exceptions import latin_exceptions

    assert isinstance(string, str), "Incoming string must be type str."

    def matchcase(word):
        # From Python Cookbook
        def replace(m):
            text = m.group()
            if text.isupper():
                return word.upper()
            elif text.islower():
                return word.lower()
            elif text[0].isupper():
                return word.capitalize()
            else:
                return word

        return replace

    replacements = [(r'mecum', 'cum me'),
                    (r'tecum', 'cum te'),
                    (r'secum', 'cum se'),
                    (r'nobiscum', 'cum nobis'),
                    (r'vobiscum', 'cum vobis'),
                    (r'quocum', 'cum quo'),
                    (r'quacum', 'cum qua'),
                    (r'quicum', 'cum qui'),
                    (r'quibuscum', 'cum quibus'),
                    (r'sodes', 'si audes'),
                    (r'satin', 'satis ne'),
                    (r'scin', 'scis ne'),
                    (r'sultis', 'si vultis'),
                    (r'similist', 'similis est'),
                    (r'qualist', 'qualis est')
                    ]

    for replacement in replacements:
        string = re.sub(replacement[0], matchcase(replacement[1]), string, flags=re.IGNORECASE)


    punkt_param = PunktParameters()
    abbreviations = ['c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', "m'", 'ser', 'ap', 'n', 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul', 'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop']
    punkt_param.abbrev_types = set(abbreviations)
    sent_tokenizer = PunktSentenceTokenizer(punkt_param)

    word_tokenizer = PunktLanguageVars()
    sents = sent_tokenizer.tokenize(string)

    enclitics = ['que', 'n', 'ue', 've', 'st']
    exceptions = enclitics
    exceptions = list(set(exceptions + latin_exceptions))

    tokens = []

    for sent in sents:
        temp_tokens = word_tokenizer.word_tokenize(sent)
        # Need to check that tokens exist before handling them; needed to make stream.readlines work in PlaintextCorpusReader
        
        if temp_tokens:
            if temp_tokens[0].endswith('ne'):
                if temp_tokens[0].lower() not in exceptions:
                    temp = [temp_tokens[0][:-2], '-ne']
                    temp_tokens = temp + temp_tokens[1:]

            if temp_tokens[-1].endswith('.'):
                final_word = temp_tokens[-1][:-1]
                del temp_tokens[-1]
                temp_tokens += [final_word, '.']

            for token in temp_tokens:
                tokens.append(token)

    # Break enclitic handling into own function?
    specific_tokens = []

    for token in tokens:
        is_enclitic = False
        if token.lower() not in exceptions:
            for enclitic in enclitics:
                if token.endswith(enclitic):
                    if enclitic == 'n':
                        specific_tokens += [token[:-len(enclitic)]] + ['-ne']
                    elif enclitic == 'st':
                        if token.endswith('ust'):
                            specific_tokens += [token[:-len(enclitic) + 1]] + ['est']
                        else:
                            specific_tokens += [token[:-len(enclitic)]] + ['est']
                    else:
                        specific_tokens += [token[:-len(enclitic)]] + ['-' + enclitic]
                    is_enclitic = True
                    break
        if not is_enclitic:
            specific_tokens.append(token)

    return specific_tokens
def featurize(tweet):
    tweet = tweet.lower()
    tweet = url_away(tweet)
    tokens = PunktLanguageVars.word_tokenize(tweet)
    tokens = filter(lambda x: len(x) > 2, tokens)
    return tokens