Example #1
0
def clean(text: str) -> str:
    txt = text.strip()
    txt = preprocessing.normalize_unicode(txt, form="NFKC")
    # Collapse whitespaces
    txt = preprocessing.normalize_whitespace(txt)
    # Remove newlines
    txt = preprocessing.normalize_repeating_chars(txt, chars="\n", maxn=1)
    # fix hyphen-ated words
    txt = preprocessing.normalize_hyphenated_words(txt)
    txt = preprocessing.normalize_quotation_marks(txt)
    txt = preprocessing.replace_urls(txt, replace_with="")
    txt = preprocessing.replace_phone_numbers(txt, replace_with="")
    txt = preprocessing.replace_emails(txt, replace_with="")
    txt = preprocessing.replace_user_handles(txt, replace_with="")
    txt = preprocessing.normalize_repeating_chars(txt, chars=".,;:-_ ", maxn=1)
    txt = re.sub("\n ", " ", txt)
    txt = re.sub(" \n", " ", txt)
    txt = re.sub("\n", " ", txt)
    txt = re.sub(" . ", " ", txt)
    txt = re.sub(r"\.([A-Z])", r". \1", txt)
    txt = re.sub(r"\. ([A-Z])", r".\n\1", txt)
    # fix for some common abbreviations
    for abv in ['Dr', 'St', 'Mr', 'Ms', 'mt', 'Inst', 'inc', 'est']:
        txt = re.sub(abv + "\.\n", abv + ". ", txt)
    return txt
Example #2
0
def cleanTweet(raw_data):
    data = [txt.replace_urls(x, "") for x in raw_data]
    data = [txt.replace_emails(x, "") for x in data]
    data = [txt.replace_emojis(x, "") for x in data]
    data = [txt.replace_user_handles(x, "") for x in data]
    data = [txt.replace_phone_numbers(x, "") for x in data]
    data = [txt.replace_numbers(x, "") for x in data]
    data = [txt.replace_currency_symbols(x, "") for x in data]
    data = [txt.replace_hashtags(x, "") for x in data]
    return data
Example #3
0
def test_replace_user_handles():
    in_outs = [
        ("like omg it's @bjdewilde", "like omg it's _USER_"),
        ("@Real_Burton_DeWilde: definitely not a bot",
         "_USER_: definitely not a bot"),
        ("wth twitter @b.j.dewilde", "wth twitter _USER_.j.dewilde"),
        ("[email protected] is not a user handle",
         "[email protected] is not a user handle"),
    ]
    for in_, out_ in in_outs:
        assert preprocessing.replace_user_handles(in_) == out_
Example #4
0
def checkLenTweet(raw_data):
    data = txt.replace_urls(raw_data, "")
    data = txt.replace_emails(data, "")
    data = txt.replace_emojis(data, "")
    data = txt.replace_user_handles(data, "")
    data = txt.replace_phone_numbers(data, "")
    data = txt.replace_numbers(data, "")
    data = txt.replace_currency_symbols(data, "")
    data = txt.replace_hashtags(data, "")
    if len(data) < 28:  #Lunghezza media di un tweet
        return False
    else:
        return True
Example #5
0
def preprocess_sentence(sent, lower=True):
    """Pre-process a sentence ( via ``textacy.preprocess` module ).

    Args:
        sent (str): text.
        lower (bool): whether to return a lowercase string.

    Returns:
        str
    """
    # normalize unicode
    sent = preprocessing.normalize_unicode(sent)

    # deaccent
    sent = preprocessing.remove_accents(sent)

    # replace newline chars
    sent = re.sub("\n|\r", " ", sent)

    # unpack contractions
    sent = contractions.fix(sent)

    # replace emoji symbols
    sent = preprocessing.replace_emojis(sent)

    # replace hashtags
    sent = preprocessing.replace_hashtags(sent)

    # replace user handles
    sent = preprocessing.replace_user_handles(sent)

    # replace currency symbols
    sent = preprocessing.replace_currency_symbols(sent)

    # replace emails
    sent = preprocessing.replace_emails(sent)

    # replace URLs
    sent = preprocessing.replace_urls(sent)

    # remove punctuation
    sent = preprocessing.remove_punctuation(sent)

    # normalize whitespace
    sent = preprocessing.normalize_whitespace(sent)

    if lower:
        sent = sent.lower()
    return sent
Example #6
0
def textacy_preprocess(sentence):
    """Preprocess text."""
    sentence = preprocessing.normalize_hyphenated_words(sentence)
    sentence = preprocessing.normalize_quotation_marks(sentence)
    #sentence = preprocessing.normalize_repeating_chars(sentence)
    sentence = preprocessing.normalize_unicode(sentence)
    sentence = preprocessing.normalize_whitespace(sentence)
    sentence = preprocessing.remove_accents(sentence)
    sentence = preprocessing.remove_punctuation(sentence)
    sentence = preprocessing.replace_currency_symbols(sentence)
    sentence = preprocessing.replace_emails(sentence)
    sentence = preprocessing.replace_emojis(sentence)
    sentence = preprocessing.replace_hashtags(sentence)
    sentence = preprocessing.replace_numbers(sentence)
    sentence = preprocessing.replace_phone_numbers(sentence)
    sentence = preprocessing.replace_urls(sentence)
    sentence = preprocessing.replace_user_handles(sentence)

    return sentence
Example #7
0
    def _clean(self, text: str):
        txt = text.strip()

        #
        txt = preprocessing.normalize_unicode(txt, form="NFKC")

        # txt = preprocessing.remove_punctuation(txt)

        # Collapse whitespaces
        txt = preprocessing.normalize_whitespace(txt)
        # Remove newlines
        txt = preprocessing.normalize_repeating_chars(txt, chars="\n", maxn=1)
        # fix hyphen-ated words
        txt = preprocessing.normalize_hyphenated_words(txt)
        txt = preprocessing.normalize_quotation_marks(txt)
        txt = preprocessing.replace_urls(txt, replace_with="")
        txt = preprocessing.replace_phone_numbers(txt, replace_with="")
        txt = preprocessing.replace_emails(txt, replace_with="")
        txt = preprocessing.replace_user_handles(txt, replace_with="")
        txt = preprocessing.normalize_repeating_chars(txt,
                                                      chars=".,;:-_ ",
                                                      maxn=1)
        txt = re.sub("\n ", " ", txt)
        txt = re.sub(" \n", " ", txt)
        txt = re.sub("\n", " ", txt)
        txt = re.sub(" . ", " ", txt)

        # txt = text.encode().decode("unicode-escape")
        # Used ftfy for "fixing" broken text, e.g. Unicode
        # txt = fix_text(txt.strip(), normalization="NFKC")

        # re- minissence => reminissence
        # txt = re.sub(r"([a-z])\-\s{,2}([a-z])", r"\1\2", txt)

        # collapse two+ newlines into single whitespace
        # txt = re.sub(r"\s+\n{1,}\s*(\w)", r" \1", txt)

        # collapse two+ newlines into single whitespace
        # txt = re.sub("\n+", " ", txt)
        """
        # collapse two+ newlines into single whitespace
        txt = re.sub(r"\s+\n{2,}\s*(\w)", r" \1", txt)

        # double-newlines to dots
        txt = re.sub(r"\n\n", ". ", txt)

        # collapse whitespace
        txt = re.sub(r"(\s){2,}", r"\1", txt)
        # collapse dots
        txt = re.sub(r"\.{2,}", ".", txt)
        # newline to whitespace between word characters
        txt = re.sub(r"(\w)\n(\w)", r"\1 \2", txt)
        # newline + open brace to whitespace
        txt = re.sub(r"(\w)\n(\()", r"\1 \2", txt)
        # comma + newline  to whitespace
        txt = re.sub(r"(\w)\,\n(\w)", r"\1 \2", txt)

        # Number end of sentence, followed by sentence that starts with number + dot
        txt = re.sub(r"(\d+)\.(\d\.\s+)", r"\1. ", txt)
        # remove decimals + dot after whitespace followed by whitespace
        txt = re.sub(r"(\.\s*)\d+\.\s+", r"\1", txt)

        # collapse backslashes
        txt = re.sub(r"\\{2,}", r"\\", txt)
        # remove 'escaped backslash' artefacts
        txt = re.sub(r"\\\\", "", txt)
        # remove lowdash artifacts ("lines")
        txt = re.sub(r"_{2,}", r"", txt)

        # normalize newline
        txt = re.sub(r"\r\n", r"\n", txt)
        # Linebreaks starting with numbers \n77\n
        txt = re.sub(r"\n\d+\n", r"\n", txt)

        # remove quotes + decimals on beginning of sentences
        txt = re.sub(r"\.([\"']?)\d+\s+", r".\1", txt)
        # remove quotes + decimals on beginning of sentences
        txt = re.sub(r"\.([\"']?)\d+\s+", r".\1", txt)

        # collapse dots
        txt = re.sub(r"\.\s+\.", ". ", txt)
        # collapse whitespace
        txt = re.sub(r"(\w+)\s{2,}(\w+)", r"\1 \2", txt)

        # Add space+ dot with double quotes
        txt = re.sub(r"\.\"(\w+)", r'.". \1', txt)

        # Add space+ between two sentences
        txt = re.sub(r"([a-z])\.([A-Z])", r"\1. \2", txt)
        """

        return txt
Example #8
0
def preprocess_text(text,
                    char_count_filter=True,
                    stopwords=None,
                    min_len=2,
                    max_len=15):
    """
    Pre-processing steps prior to spaCy nlp pipeline. Optional filtering of
    tokens based on character length.

    Parameters
    ----------
    text : str
    char_count_filter : bool
    stopwords : iterable, None
    min_len : int
    max_len : int

    Returns
    -------
    text : str
        pre-processed text
    """
    # 1) convert to lower case for robust stop-word recognition
    text = text.lower()

    # 2) normalise
    text = preprocessing.normalize_quotation_marks(text)
    # text = preprocessing.normalize_repeating_chars(text)
    text = preprocessing.normalize_hyphenated_words(text)
    text = preprocessing.normalize_whitespace(text)

    # 3) replace
    text = preprocessing.replace_currency_symbols(text)
    text = preprocessing.replace_emails(text)
    text = preprocessing.replace_emojis(text)
    text = preprocessing.replace_hashtags(text)
    text = preprocessing.replace_numbers(text)
    text = preprocessing.replace_phone_numbers(text)
    text = preprocessing.replace_urls(text)
    text = preprocessing.replace_user_handles(text)

    # 4) remove
    text = preprocessing.remove_accents(text)
    text = preprocessing.remove_punctuation(text)
    text = re.sub("[^A-Za-z0-9]+", " ", text)  # keep text and numbers

    # 5) optionally remove tokens based on length
    if char_count_filter & (stopwords is not None):
        # filter based on token length
        tokens = gensim.utils.simple_preprocess(doc=text,
                                                min_len=min_len,
                                                max_len=max_len)
        # filter case-specific words
        tokens = [token for token in tokens if token not in stopwords]

        # convert processed list of tokens back to one string
        text = " ".join(tokens)
    else:
        raise NotImplementedError("Not implemented.")

    return text