def preprocess_unicode(raw_text):
    raw_text = preprocess.transliterate_unicode(raw_text.lower())
    raw_text = preprocess.replace_urls(raw_text, replace_with=u'')
    raw_text = preprocess.replace_emails(raw_text, replace_with=u'')
    raw_text = preprocess.replace_phone_numbers(raw_text, replace_with=u'')
    raw_text = preprocess.replace_numbers(raw_text, replace_with=u'')
    raw_text = preprocess.replace_currency_symbols(raw_text, replace_with=u'')
    return raw_text
def clean_text(text):
    text = text.replace('/n', ' ')).replace('.com', ' ').replace('.org', ' ').replace('.net', ' ')
    text = strip_html(text)
    # Remove contractions, if any:
    text = preprocess_text(text, fix_unicode=True, no_accents=True, no_contractions=True, lowercase=True, no_punct=True, no_currency_symbols=True), replace_with=' ')
    text = replace_urls(text, replace_with='')
    text = replace_numbers(text, replace_with='')
    return text
Пример #3
0
    def clean_tweet(self, text):
        # FIXED UNICODE
        text = preprocess.fix_bad_unicode(text)

        # GET TEXT ONLY FROM HTML
        text = BeautifulSoup(text, features='lxml').getText()
        # UN-PACK CONTRACTIONS
        text = preprocess.unpack_contractions(text)

        # REMOVE URL
        text = preprocess.replace_urls(text)

        # REMOVE EMAILS
        text = preprocess.replace_emails(text)

        # REMOVE PHONE NUMBERS
        text = preprocess.replace_phone_numbers(text)

        # REMOVE NUMBERS
        text = preprocess.replace_numbers(text)

        # REMOVE CURRENCY
        text = preprocess.replace_currency_symbols(text)

        # REMOVE ACCENTS
        text = preprocess.remove_accents(text)

        # CONVERT EMOJIS TO TEXT
        words = text.split()
        reformed = [
            self.SMILEY[word] if word in self.SMILEY else word
            for word in words
        ]
        text = " ".join(reformed)
        text = emoji.demojize(text)
        text = text.replace(":", " ")
        text = ' '.join(text.split())

        # SPLIT ATTACHED WORDS
        text = ' '.join(re.findall('[A-Z][^A-Z]*', text))

        # SPLIT UNDERSCORE WORDS
        text = text.replace('_', ' ')

        # REMOVE PUNCTUATION
        text = preprocess.remove_punct(text)

        # Remove numbers
        text = re.sub(r'\d', '', text)

        # REMOVE WORDS LESS THAN 3 CHARACTERS
        text = re.sub(r'\b\w{1,2}\b', '', text)

        # NORMALIZE WHITESPACE
        text = preprocess.normalize_whitespace(text)

        return text
Пример #4
0
 def clean_text(self, raw_text):
     raw_text = self.strip_tags(raw_text)
     raw_text = raw_text.lower()
     raw_text = preprocess.remove_punct(raw_text)
     raw_text = preprocess.transliterate_unicode(raw_text)
     raw_text = preprocess.replace_urls(raw_text, replace_with='')
     raw_text = preprocess.replace_emails(raw_text, replace_with='')
     raw_text = preprocess.replace_phone_numbers(raw_text, replace_with='')
     raw_text = preprocess.replace_numbers(raw_text, replace_with='')
     raw_text = preprocess.replace_currency_symbols(raw_text,
                                                    replace_with='')
     return raw_text
Пример #5
0
def test_replace_urls():
    text = "I learned everything I know from www.stackoverflow.com and http://wikipedia.org/ and Mom."
    proc_text = "I learned everything I know from *URL* and *URL* and Mom."
    assert preprocess.replace_urls(text, "*URL*") == proc_text
Пример #6
0
 def test_replace_urls(self):
     text = "I learned everything I know from www.stackoverflow.com and http://wikipedia.org/ and Mom."
     proc_text = "I learned everything I know from *URL* and *URL* and Mom."
     self.assertEqual(preprocess.replace_urls(text, '*URL*'), proc_text)
Пример #7
0
 def test_replace_urls(self):
     text = "I learned everything I know from www.stackoverflow.com and http://wikipedia.org/ and Mom."
     proc_text = "I learned everything I know from *URL* and *URL* and Mom."
     self.assertEqual(preprocess.replace_urls(text, '*URL*'), proc_text)