def preprocess_unicode(raw_text):
    raw_text = preprocess.transliterate_unicode(raw_text.lower())
    raw_text = preprocess.replace_urls(raw_text, replace_with=u'')
    raw_text = preprocess.replace_emails(raw_text, replace_with=u'')
    raw_text = preprocess.replace_phone_numbers(raw_text, replace_with=u'')
    raw_text = preprocess.replace_numbers(raw_text, replace_with=u'')
    raw_text = preprocess.replace_currency_symbols(raw_text, replace_with=u'')
    return raw_text
def clean_text(text):
    text = text.replace('/n', ' ')).replace('.com', ' ').replace('.org', ' ').replace('.net', ' ')
    text = strip_html(text)
    # Remove contractions, if any:
    text = preprocess_text(text, fix_unicode=True, no_accents=True, no_contractions=True, lowercase=True, no_punct=True, no_currency_symbols=True), replace_with=' ')
    text = replace_urls(text, replace_with='')
    text = replace_numbers(text, replace_with='')
    return text
Exemple #3
0
    def clean_tweet(self, text):
        # FIXED UNICODE
        text = preprocess.fix_bad_unicode(text)

        # GET TEXT ONLY FROM HTML
        text = BeautifulSoup(text, features='lxml').getText()
        # UN-PACK CONTRACTIONS
        text = preprocess.unpack_contractions(text)

        # REMOVE URL
        text = preprocess.replace_urls(text)

        # REMOVE EMAILS
        text = preprocess.replace_emails(text)

        # REMOVE PHONE NUMBERS
        text = preprocess.replace_phone_numbers(text)

        # REMOVE NUMBERS
        text = preprocess.replace_numbers(text)

        # REMOVE CURRENCY
        text = preprocess.replace_currency_symbols(text)

        # REMOVE ACCENTS
        text = preprocess.remove_accents(text)

        # CONVERT EMOJIS TO TEXT
        words = text.split()
        reformed = [
            self.SMILEY[word] if word in self.SMILEY else word
            for word in words
        ]
        text = " ".join(reformed)
        text = emoji.demojize(text)
        text = text.replace(":", " ")
        text = ' '.join(text.split())

        # SPLIT ATTACHED WORDS
        text = ' '.join(re.findall('[A-Z][^A-Z]*', text))

        # SPLIT UNDERSCORE WORDS
        text = text.replace('_', ' ')

        # REMOVE PUNCTUATION
        text = preprocess.remove_punct(text)

        # Remove numbers
        text = re.sub(r'\d', '', text)

        # REMOVE WORDS LESS THAN 3 CHARACTERS
        text = re.sub(r'\b\w{1,2}\b', '', text)

        # NORMALIZE WHITESPACE
        text = preprocess.normalize_whitespace(text)

        return text
Exemple #4
0
 def clean_text(self, raw_text):
     raw_text = self.strip_tags(raw_text)
     raw_text = raw_text.lower()
     raw_text = preprocess.remove_punct(raw_text)
     raw_text = preprocess.transliterate_unicode(raw_text)
     raw_text = preprocess.replace_urls(raw_text, replace_with='')
     raw_text = preprocess.replace_emails(raw_text, replace_with='')
     raw_text = preprocess.replace_phone_numbers(raw_text, replace_with='')
     raw_text = preprocess.replace_numbers(raw_text, replace_with='')
     raw_text = preprocess.replace_currency_symbols(raw_text,
                                                    replace_with='')
     return raw_text
Exemple #5
0
def preprocess(line):
    """
    Pre processes the given line.

    :param line: line as str
    :return: preprocessed sentence(s)
    """
    result = ''
    if len(line) < args.linelength:
        if args.clean:
            line = clean_text(line)
        if args.lemmatize:
            doc = nlp(line)
            tokens = [token.lemma_ for token in doc]
        else:
            tokens = line.split()
        if args.stem:
            tokens = [stemmer.stem(t) for t in tokens]
        if args.decapitalize:
            tokens = [t.lower() for t in tokens]
        if args.umlaute:
            tokens = [replace_umlaute(t) for t in tokens]
        if args.accents:
            tokens = [pp.remove_accents(t) for t in tokens]
        if args.numbers:
            tokens = [
                pp.replace_numbers(t, replace_with='*NUMMER*') for t in tokens
            ]
        if args.punctuation:
            tokens = [t for t in tokens if t not in punctuation_tokens]
        if args.stopwords:
            tokens = [t for t in tokens if t.lower() not in stop_words]
        if args.forbidden:
            tokens = [
                t for t in tokens
                if not any(kw in t.lower() for kw in forbidden_keywords)
            ]
        if len(tokens) > 3:
            result = "{}\n".format(' '.join(tokens))

    return result
Exemple #6
0
def test_replace_numbers():
    text = "I owe $1,000.99 to 123 people for 2 +1 reasons."
    proc_text = "I owe $*NUM* to *NUM* people for *NUM* *NUM* reasons."
    assert preprocess.replace_numbers(text, "*NUM*") == proc_text
 def test_replace_numbers(self):
     text = "I owe $1,000.99 to 123 people for 2 +1 reasons."
     proc_text = "I owe $*NUM* to *NUM* people for *NUM* *NUM* reasons."
     self.assertEqual(preprocess.replace_numbers(text, '*NUM*'), proc_text)
 def test_replace_numbers(self):
     text = "I owe $1,000.99 to 123 people for 2 +1 reasons."
     proc_text = "I owe $*NUM* to *NUM* people for *NUM* *NUM* reasons."
     self.assertEqual(preprocess.replace_numbers(text, '*NUM*'), proc_text)