Example #1
0
    def __init__(self):
        with Timer() as self.model_load_time:
            from iwnlp.iwnlp_wrapper import IWNLPWrapper
            from stts2upos import conv_table
            data_loc = "/opt/iwnlp/IWNLP.Lemmatizer_20181001.json"
            self.lemmatizer = IWNLPWrapper(lemmatizer_path=data_loc)

            def myprocessor(myinput):
                mydoc = string2doc(myinput)
                for sent in mydoc:
                    for tok in sent:
                        try:
                            matching_lemmas = self.lemmatizer.lemmatize(
                                tok.word, conv_table.get(tok.xpos))
                            if matching_lemmas is None:
                                tok.lemma = "_"
                                # elif len(matching_lemmas) > 1:
                                #     print("lots o lemmas!", matching_lemmas)
                            else:
                                # unclear how to select best alternative
                                # just use first item in list
                                tok.lemma = matching_lemmas[0]
                        except ValueError:
                            tok.lemma = "_"
                        # don't repeat gold pos in output
                        tok.hide_fields(HIDDEN_FIELDS)
                return mydoc

            self.processor = myprocessor
 def __init__(self, lemmatizer_path, nlp):
     self.lemmatizer = IWNLPWrapper(lemmatizer_path=lemmatizer_path)
     self.stringstore = nlp.vocab.strings
     # self.matcher = PhraseMatcher(nlp.vocab)
     Token.set_extension('iwnlp_lemmas', getter=self.lemmatize, force=True)
     self.lookup = {
         ('fast', ADV): 'fast',
     }
Example #3
0
 def __init__(self,
              lemmatizer_path,
              use_plain_lemmatization=False,
              ignore_case=False):
     self.lemmatizer = IWNLPWrapper(lemmatizer_path=lemmatizer_path)
     self.use_plain_lemmatization = use_plain_lemmatization
     self.ignore_case = ignore_case
     Token.set_extension('iwnlp_lemmas', getter=self.get_lemmas, force=True)
Example #4
0
 def __init__(self):
     self.logger = logging.getLogger()
     self.logger.setLevel(logging.DEBUG)
     self.lemmatizer = IWNLPWrapper(
         lemmatizer_path='data/IWNLP/IWNLP.Lemmatizer_20170501.json')
     self.sentiws = SentiWSWrapper(sentiws_path='data/sentiws')
     self.logger.debug('Loading Spacy model')
     self.nlp = spacy.load('de')
     self.logger.debug('Spacy model loaded')
Example #5
0
    def preprocess(self):

        tokenizedTweets_writer = open(
            './daten/tokenized_tweets_normalized.txt', 'w')
        preprocTweets_writer = open(
            './daten/preprocessed_tweets_normalized.txt', 'w')

        pp = Pipeline(self.this_file, "./autosarkasmus/rsrc/de-tiger.map")
        tweets_tkn, tweets_proc, labels = pp.process()
        assert (len(tweets_tkn) == len(tweets_proc) == len(labels))

        # filter stopwords + normalize tokens
        lemmatizer = IWNLPWrapper(
            lemmatizer_path='daten/IWNLP.Lemmatizer_20170501.json')
        lemmatized_tokens = []
        for x in range(len(tweets_tkn)):
            tweet = []
            for token in tweets_tkn[x]:
                if token.lower() in stopwords.words('german'):
                    continue
                try:
                    lemma = lemmatizer.lemmatize_plain(token, ignore_case=True)
                    if (lemma):
                        tweet.append(lemma[0])
                    else:
                        tweet.append(token)

                except Exception as e:
                    print(e)

            lemmatized_tokens.append(tweet)

        assert (len(lemmatized_tokens) == len(tweets_proc) == len(labels))

        # write preprocessing results to file
        for x in range(len(lemmatized_tokens)):
            t_tweet = (" ").join(lemmatized_tokens[x])
            p_tweet = (" ").join(
                [str(x) + "/" + str(y) for x, y in tweets_proc[x]])
            label = labels[x]
            tokenizedTweets_writer.write(t_tweet + "\t" + label + "\n")
            preprocTweets_writer.write(p_tweet + "\t" + label + "\n")
 def load(cls, lemmatizer_path):
     lemmatizer = IWNLPWrapper(lemmatizer_path=lemmatizer_path)
     return cls(lemmatizer)
class Preprocess:
    # zur Lemmatisierung im Deutschen

    nlp = spacy.load('de')

    # IWNLP German Lemmatizations:
    dirname = os.path.dirname(__file__)
    iwnlp_file = os.path.join(dirname, 'data/IWNLP.Lemmatizer_20181001.json')
    #iwnlp = spaCyIWNLP(lemmatizer_path='data/IWNLP.Lemmatizer_20181001.json', ignore_case=True)
    lemmatizer = IWNLPWrapper(lemmatizer_path=iwnlp_file)

    #add custom tokenizer
    nlp.tokenizer = custom_tokenizer(nlp)
    '''
    try:
        # add pipes
        nlp.add_pipe(iwnlp)
        # nlp.add_pipe(__set_custom_boundaries, before='parser')
    except Exception:
        pass
    '''

    stopwords_to_remove_from_default_set = [
        "schlecht", "mensch", "menschen", "beispiel", "gott", "jahr", "jahre",
        "jahren", "nicht", "uhr"
    ]
    for stopword in stopwords_to_remove_from_default_set:
        nlp.vocab[stopword].is_stop = False

    #Spacy Token Tags, which will be removed by preprocessing
    tags_to_remove = [
        '$(',
        '$,',
        '$.',
        'APPR',
        'APPO',
        'APPRART',
        'APZR',
        'ART',
        'ITJ',
        'KOKOM',
        'KON',
        'KOUI',
        'KOUS',  # 'CARD',
        'PDS',
        'PAV',
        'PROAV',
        'PDAT',
        'PIAT',
        'PIDAT',
        'PIS',
        'PPER',
        'PPOSAT',
        'PPOSS',
        'PRELAT',
        'PRELS',
        'PRF',
        'PTKA',  # 'PTKANT',
        'PTKVZ',
        'PTKZU',
        'PWAT',
        'PWAV',
        'PWS',
        'TRUNC',
        'XY',
        'SP',
        'WRP'
    ]

    def __init__(self, text, split_in_sentences=True, with_pos=False):
        '''

        :param text: input text
        :param split_in_sentences: split text in sentences --> sub-arrays for sentences in Preprocess-result
        :param with_pos: true: give tripel with (<startpos in orig-text>, <endpos in origtext>, token), else only tokens
        '''

        self.text = text
        self.nlp_text = self.nlp(text)

        self.maintain_indeces = []

        self.noun_chunks = self.get_noun_chunks(cleaned=True, flattened=True)
        self.maintain_indeces.extend(index for index in self.noun_chunks
                                     if index not in self.maintain_indeces)

        self.named_entities = self.get_named_entities(flattened=True)
        self.maintain_indeces.extend(index for index in self.named_entities
                                     if index not in self.maintain_indeces)
        self.maintain_indeces.sort()

        self.preprocessed = self.preprocess(sentence_split=split_in_sentences,
                                            with_pos=with_pos)

    def __get_lemma(self, token):
        '''
        take lemma of IWNLP, if given, else spacy lemma
        :param token: spacy-token
        :return: lemmatization
        '''
        #lemma_iwnlp_list = token._.iwnlp_lemmas
        lemma_iwnlp_list = self.lemmatizer.lemmatize_plain(token.text,
                                                           ignore_case=False)
        if lemma_iwnlp_list:
            lemma_iwnlp = lemma_iwnlp_list[0]
            #print(token, ":::", lemma_iwnlp_list[0])
            return lemma_iwnlp

        return token.lemma_

    def get_named_entities(self, only_indeces=True, flattened=False):
        '''
        return array of named entities (PER: Person, LOC: Location, ORG: Named corporate, governmental, or other organizational entity, MISC: Miscellaneous entities, e.g. events, nationalities, products or works of art)
        :param only_indeces:
        :param flattened: returns only 1d array, else related entities are in sup-arrays
        :return: array with named entities
        '''
        if flattened:
            named_ents = [
                word.i if only_indeces else (word.i, word, ents.label_)
                for ents in self.nlp_text.ents for word in ents
            ]
        else:
            named_ents = [[
                word.i if only_indeces else (word.i, word, ents.label_)
                for word in ents
            ] for ents in self.nlp_text.ents]
        return named_ents

    def get_noun_chunks(self,
                        only_indices=True,
                        cleaned=True,
                        flattened=False):
        '''
        return array of noun_chunks/noun_phrases of the Text object
        :param only_indices:
        :param cleaned: noun phrases without stopword, punctuation
        :param flattened: returns only 1d array, else related phrases are in sup-arrays
        :return: array with noun-phrases
        '''

        # noun_words = [(word.i, word) for ent in text.noun_chunks for word in ent]
        # noun_words = [[(word.i, word) for word in ent] for ent in text.noun_chunks]
        if flattened:
            if cleaned:
                noun_words = [
                    word.i if only_indices else (word.i, word)
                    for ent in self.nlp_text.noun_chunks for word in ent
                    if self.__is_valid_token(word)
                ]
            else:
                noun_words = [
                    word.i if only_indices else (word.i, word)
                    for ent in self.nlp_text.noun_chunks for word in ent
                ]
        else:
            if cleaned:
                noun_words = [[
                    word.i if only_indices else (word.i, word) for word in ent
                    if self.__is_valid_token(word)
                ] for ent in self.nlp_text.noun_chunks]
            else:
                noun_words = [[
                    word.i if only_indices else (word.i, word) for word in ent
                ] for ent in self.nlp_text.noun_chunks]

        return noun_words

    def __is_valid_token(self, token):
        '''
        checks if token is valid: no stopword, punctuation oder whitespace
        :param token: spacy-token
        :return: bool
        '''
        # nlp(token.lower_)[0] wegen spacy bug --> z.B. "Der" würde nicht als stopwort erkannt werden, "der" aber schon
        if not self.nlp(
                token.lower_
        )[0].is_stop and not token.is_punct and not token.is_space:
            return True

        return False

    def __tokenize_words(self, doc, with_pos=False):
        '''
        tokenizes text and removes unimportant tokens
        :param doc: input spacy doc
        :param with_pos: true: give tripel with (<startpos in orig-text>, <endpos in origtext>, token), else only tokens
        :return: 1d array of tokens
        '''
        tokenized_text = [
            (token.idx, token.idx + len(token),
             self.__get_lemma(token).lower())
            if with_pos else self.__get_lemma(token).lower() for token in doc
            if self.__is_valid_token(token) and not token.tag_ in
            self.tags_to_remove or token.i in self.maintain_indeces
        ]

        return tokenized_text

    def __tokenize_to_list_sentences(self, with_pos=False):
        '''
        tokenizes text and removes unimportant tokens, split by sentences
        :param with_pos: true: give tripel with (<startpos in orig-text>, <endpos in origtext>, token), else only tokens
        :return: 2d array of tokens in sub-arrays (sentences)
        '''
        filtered_text = []
        for sentence in self.nlp_text.sents:
            filtered_sentence = self.__tokenize_words(sentence,
                                                      with_pos=with_pos)
            filtered_text.append(filtered_sentence)

        return filtered_text

    def preprocess(self, sentence_split=True, with_pos=False):
        '''
        preprocess text. removes unimportant tokens
        :param sentence_split: split by sentences
        :param with_pos: true: give tripel with (<startpos in orig-text>, <endpos in origtext>, token), else only tokens
        :return: 1d or 2d array with preprocessed text
        '''
        if sentence_split:
            preprocessed_text = self.__tokenize_to_list_sentences(
                with_pos=with_pos)
        else:
            preprocessed_text = self.__tokenize_words(self.nlp_text,
                                                      with_pos=with_pos)

        return preprocessed_text
Example #8
0
 def setUpClass(self):
     self.iwnlp = IWNLPWrapper(lemmatizer_path='data/IWNLP.Lemmatizer_20170501.json')
Example #9
0
from iwnlp.iwnlp_wrapper import IWNLPWrapper

output = 'output/Baselist.txt'
path = "Baselist.txt"
lemmatizer = IWNLPWrapper(lemmatizer_path='IWNLP.Lemmatizer_20170501.json')

with open(path, 'r') as read_file:
    data = read_file.read().splitlines()

tokens = []
lemmatized = []
tags = []

for i in data:
    j = i.split('|')
    tokens.append(j[0].lower())
    tags.append(j[1])

for token, tag in zip(tokens, tags):
    lemma = lemmatizer.lemmatize(token, pos_universal_google=tag)
    lemmatized.append(lemma)

for token, lemma in zip(tokens, lemmatized):
    ''
#   print(token, lemma)

with open(output, 'w') as write_file:
    for i, lemma in enumerate(lemmatized):
        if (lemmatized[i] == None):
            write_file.write(tokens[i] + '|' + tags[i] + "\n")
        else:
Example #10
0
 def __init__(self, lang):
     self.lang = lang
     self.nlp = nlp = spacy.load(lang)
     current_dir = os.path.dirname(__file__)
     self.iwnlp = IWNLPWrapper(lemmatizer_path=current_dir + '/../resources/IWNLP.Lemmatizer_20170501.json')