コード例 #1
0
ファイル: summarizer.py プロジェクト: Cornul11/healthDecode
def clean(text: str) -> str:
    txt = text.strip()
    txt = preprocessing.normalize_unicode(txt, form="NFKC")
    # Collapse whitespaces
    txt = preprocessing.normalize_whitespace(txt)
    # Remove newlines
    txt = preprocessing.normalize_repeating_chars(txt, chars="\n", maxn=1)
    # fix hyphen-ated words
    txt = preprocessing.normalize_hyphenated_words(txt)
    txt = preprocessing.normalize_quotation_marks(txt)
    txt = preprocessing.replace_urls(txt, replace_with="")
    txt = preprocessing.replace_phone_numbers(txt, replace_with="")
    txt = preprocessing.replace_emails(txt, replace_with="")
    txt = preprocessing.replace_user_handles(txt, replace_with="")
    txt = preprocessing.normalize_repeating_chars(txt, chars=".,;:-_ ", maxn=1)
    txt = re.sub("\n ", " ", txt)
    txt = re.sub(" \n", " ", txt)
    txt = re.sub("\n", " ", txt)
    txt = re.sub(" . ", " ", txt)
    txt = re.sub(r"\.([A-Z])", r". \1", txt)
    txt = re.sub(r"\. ([A-Z])", r".\n\1", txt)
    # fix for some common abbreviations
    for abv in ['Dr', 'St', 'Mr', 'Ms', 'mt', 'Inst', 'inc', 'est']:
        txt = re.sub(abv + "\.\n", abv + ". ", txt)
    return txt
コード例 #2
0
ファイル: helper_functions.py プロジェクト: rub-ksv/AdHominem
def preprocess_siamese(doc):

    # pre-process data
    doc = tp.normalize.normalize_unicode(doc)
    doc = tp.normalize_whitespace(doc)
    doc = tp.normalize_quotation_marks(doc)
    doc = tp.replace_emails(doc, replace_with="<EMAIL>")
    doc = tp.replace_urls(doc, replace_with="<URL>")
    doc = tp.replace_hashtags(doc, replace_with="<HASHTAG>")
    doc = tp.replace_emojis(doc, replace_with="<EMOJI>")
    doc = tp.replace_phone_numbers(doc, replace_with="<PHONE>")

    # apply spaCy to tokenize doc
    doc = nlp_token(doc)

    # build new sentences for pre-processed doc
    doc_new = []
    for sent in doc.sents:
        sent_new = ""
        for token in sent:
            token = token.text
            token = token.replace("\n", "")
            token = token.replace("\t", "")
            token = token.strip()
            sent_new += token + " "

        doc_new.append(sent_new[:-1])

    return doc_new
コード例 #3
0
ファイル: Chi lo ha detto.py プロジェクト: Mirkesx/SMM
def cleanTweet(raw_data):
    data = [txt.replace_urls(x, "") for x in raw_data]
    data = [txt.replace_emails(x, "") for x in data]
    data = [txt.replace_emojis(x, "") for x in data]
    data = [txt.replace_user_handles(x, "") for x in data]
    data = [txt.replace_phone_numbers(x, "") for x in data]
    data = [txt.replace_numbers(x, "") for x in data]
    data = [txt.replace_currency_symbols(x, "") for x in data]
    data = [txt.replace_hashtags(x, "") for x in data]
    return data
コード例 #4
0
ファイル: Chi lo ha detto.py プロジェクト: Mirkesx/SMM
def checkLenTweet(raw_data):
    data = txt.replace_urls(raw_data, "")
    data = txt.replace_emails(data, "")
    data = txt.replace_emojis(data, "")
    data = txt.replace_user_handles(data, "")
    data = txt.replace_phone_numbers(data, "")
    data = txt.replace_numbers(data, "")
    data = txt.replace_currency_symbols(data, "")
    data = txt.replace_hashtags(data, "")
    if len(data) < 28:  #Lunghezza media di un tweet
        return False
    else:
        return True
コード例 #5
0
def text_cleanup(text):
    "cleanup our text"

    text = preprocessing.replace_emails(text, replace_with='')
    text = preprocessing.replace_urls(text, replace_with='')
    text = preprocessing.replace_hashtags(text, replace_with='')
    text = preprocessing.replace_phone_numbers(text, replace_with='')
    text = preprocessing.replace_numbers(text, replace_with='')

    text = preprocessing.remove_accents(text)
    text = preprocessing.remove_punctuation(text)

    text = preprocessing.normalize_quotation_marks(text)
    text = preprocessing.normalize_hyphenated_words(text)
    text = text.replace('\n', ' ').replace('\t', ' ')
    text = text.lower()

    text = preprocessing.normalize_whitespace(text)
    return text
コード例 #6
0
def textacy_preprocess(sentence):
    """Preprocess text."""
    sentence = preprocessing.normalize_hyphenated_words(sentence)
    sentence = preprocessing.normalize_quotation_marks(sentence)
    #sentence = preprocessing.normalize_repeating_chars(sentence)
    sentence = preprocessing.normalize_unicode(sentence)
    sentence = preprocessing.normalize_whitespace(sentence)
    sentence = preprocessing.remove_accents(sentence)
    sentence = preprocessing.remove_punctuation(sentence)
    sentence = preprocessing.replace_currency_symbols(sentence)
    sentence = preprocessing.replace_emails(sentence)
    sentence = preprocessing.replace_emojis(sentence)
    sentence = preprocessing.replace_hashtags(sentence)
    sentence = preprocessing.replace_numbers(sentence)
    sentence = preprocessing.replace_phone_numbers(sentence)
    sentence = preprocessing.replace_urls(sentence)
    sentence = preprocessing.replace_user_handles(sentence)

    return sentence
コード例 #7
0
    def _clean(self, text: str):
        txt = text.strip()

        #
        txt = preprocessing.normalize_unicode(txt, form="NFKC")

        # txt = preprocessing.remove_punctuation(txt)

        # Collapse whitespaces
        txt = preprocessing.normalize_whitespace(txt)
        # Remove newlines
        txt = preprocessing.normalize_repeating_chars(txt, chars="\n", maxn=1)
        # fix hyphen-ated words
        txt = preprocessing.normalize_hyphenated_words(txt)
        txt = preprocessing.normalize_quotation_marks(txt)
        txt = preprocessing.replace_urls(txt, replace_with="")
        txt = preprocessing.replace_phone_numbers(txt, replace_with="")
        txt = preprocessing.replace_emails(txt, replace_with="")
        txt = preprocessing.replace_user_handles(txt, replace_with="")
        txt = preprocessing.normalize_repeating_chars(txt,
                                                      chars=".,;:-_ ",
                                                      maxn=1)
        txt = re.sub("\n ", " ", txt)
        txt = re.sub(" \n", " ", txt)
        txt = re.sub("\n", " ", txt)
        txt = re.sub(" . ", " ", txt)

        # txt = text.encode().decode("unicode-escape")
        # Used ftfy for "fixing" broken text, e.g. Unicode
        # txt = fix_text(txt.strip(), normalization="NFKC")

        # re- minissence => reminissence
        # txt = re.sub(r"([a-z])\-\s{,2}([a-z])", r"\1\2", txt)

        # collapse two+ newlines into single whitespace
        # txt = re.sub(r"\s+\n{1,}\s*(\w)", r" \1", txt)

        # collapse two+ newlines into single whitespace
        # txt = re.sub("\n+", " ", txt)
        """
        # collapse two+ newlines into single whitespace
        txt = re.sub(r"\s+\n{2,}\s*(\w)", r" \1", txt)

        # double-newlines to dots
        txt = re.sub(r"\n\n", ". ", txt)

        # collapse whitespace
        txt = re.sub(r"(\s){2,}", r"\1", txt)
        # collapse dots
        txt = re.sub(r"\.{2,}", ".", txt)
        # newline to whitespace between word characters
        txt = re.sub(r"(\w)\n(\w)", r"\1 \2", txt)
        # newline + open brace to whitespace
        txt = re.sub(r"(\w)\n(\()", r"\1 \2", txt)
        # comma + newline  to whitespace
        txt = re.sub(r"(\w)\,\n(\w)", r"\1 \2", txt)

        # Number end of sentence, followed by sentence that starts with number + dot
        txt = re.sub(r"(\d+)\.(\d\.\s+)", r"\1. ", txt)
        # remove decimals + dot after whitespace followed by whitespace
        txt = re.sub(r"(\.\s*)\d+\.\s+", r"\1", txt)

        # collapse backslashes
        txt = re.sub(r"\\{2,}", r"\\", txt)
        # remove 'escaped backslash' artefacts
        txt = re.sub(r"\\\\", "", txt)
        # remove lowdash artifacts ("lines")
        txt = re.sub(r"_{2,}", r"", txt)

        # normalize newline
        txt = re.sub(r"\r\n", r"\n", txt)
        # Linebreaks starting with numbers \n77\n
        txt = re.sub(r"\n\d+\n", r"\n", txt)

        # remove quotes + decimals on beginning of sentences
        txt = re.sub(r"\.([\"']?)\d+\s+", r".\1", txt)
        # remove quotes + decimals on beginning of sentences
        txt = re.sub(r"\.([\"']?)\d+\s+", r".\1", txt)

        # collapse dots
        txt = re.sub(r"\.\s+\.", ". ", txt)
        # collapse whitespace
        txt = re.sub(r"(\w+)\s{2,}(\w+)", r"\1 \2", txt)

        # Add space+ dot with double quotes
        txt = re.sub(r"\.\"(\w+)", r'.". \1', txt)

        # Add space+ between two sentences
        txt = re.sub(r"([a-z])\.([A-Z])", r"\1. \2", txt)
        """

        return txt
コード例 #8
0
ファイル: make_corpus.py プロジェクト: derpyninja/nlp4cciwr
def preprocess_text(text,
                    char_count_filter=True,
                    stopwords=None,
                    min_len=2,
                    max_len=15):
    """
    Pre-processing steps prior to spaCy nlp pipeline. Optional filtering of
    tokens based on character length.

    Parameters
    ----------
    text : str
    char_count_filter : bool
    stopwords : iterable, None
    min_len : int
    max_len : int

    Returns
    -------
    text : str
        pre-processed text
    """
    # 1) convert to lower case for robust stop-word recognition
    text = text.lower()

    # 2) normalise
    text = preprocessing.normalize_quotation_marks(text)
    # text = preprocessing.normalize_repeating_chars(text)
    text = preprocessing.normalize_hyphenated_words(text)
    text = preprocessing.normalize_whitespace(text)

    # 3) replace
    text = preprocessing.replace_currency_symbols(text)
    text = preprocessing.replace_emails(text)
    text = preprocessing.replace_emojis(text)
    text = preprocessing.replace_hashtags(text)
    text = preprocessing.replace_numbers(text)
    text = preprocessing.replace_phone_numbers(text)
    text = preprocessing.replace_urls(text)
    text = preprocessing.replace_user_handles(text)

    # 4) remove
    text = preprocessing.remove_accents(text)
    text = preprocessing.remove_punctuation(text)
    text = re.sub("[^A-Za-z0-9]+", " ", text)  # keep text and numbers

    # 5) optionally remove tokens based on length
    if char_count_filter & (stopwords is not None):
        # filter based on token length
        tokens = gensim.utils.simple_preprocess(doc=text,
                                                min_len=min_len,
                                                max_len=max_len)
        # filter case-specific words
        tokens = [token for token in tokens if token not in stopwords]

        # convert processed list of tokens back to one string
        text = " ".join(tokens)
    else:
        raise NotImplementedError("Not implemented.")

    return text
コード例 #9
0
    def clean_tweet(self, text):
        # FIXED UNICODE
        # text = preprocess.fix_bad_unicode(text)
        text = ftfy.fix_text(text)

        # GET TEXT ONLY FROM HTML
        text = BeautifulSoup(text, features='lxml').getText()

        # UN-PACK CONTRACTIONS
        text = preprocess.unpack_contractions(text)

        # REMOVE URL
        # text = preprocess.replace_urls(text)
        text = preprocessing.replace_urls(text)

        # REMOVE EMAILS
        # text = preprocess.replace_emails(text)
        text = preprocessing.replace_emails(text)

        # REMOVE PHONE NUMBERS
        # text = preprocess.replace_phone_numbers(text)
        text = preprocessing.replace_phone_numbers(text)

        # REMOVE NUMBERS
        # text = preprocess.replace_numbers(text)
        text = preprocessing.replace_numbers(text)

        # REMOVE CURRENCY
        # text = preprocess.replace_currency_symbols(text)
        text = preprocessing.replace_currency_symbols(text)

        # REMOVE ACCENTS
        # text = preprocess.remove_accents(text)
        text = preprocessing.remove_accents(text)

        # CONVERT EMOJIS TO TEXT
        words = text.split()
        reformed = [
            self.SMILEY[word] if word in self.SMILEY else word
            for word in words
        ]
        text = " ".join(reformed)
        text = emoji.demojize(text)
        text = text.replace(":", " ")
        text = ' '.join(text.split())

        # SPLIT ATTACHED WORDS
        text = ' '.join(re.findall('[A-Z][^A-Z]*', text))

        # SPLIT UNDERSCORE WORDS
        text = text.replace('_', ' ')

        # REMOVE PUNCTUATION
        # text = preprocess.remove_punct(text)
        text = preprocessing.remove_punctuation(text)

        # Remove numbers
        text = re.sub(r'\d', '', text)

        # REMOVE WORDS LESS THAN 3 CHARACTERS
        text = re.sub(r'\b\w{1,2}\b', '', text)

        # NORMALIZE WHITESPACE
        # text = preprocess.normalize_whitespace(text)
        text = preprocessing.normalize_whitespace(text)

        return text
コード例 #10
0
ファイル: test_replace.py プロジェクト: zhanglipku/textacy
def test_replace_phone_numbers():
    text = "I can be reached at 555-123-4567 through next Friday."
    proc_text = "I can be reached at _PHONE_ through next Friday."
    assert preprocessing.replace_phone_numbers(text, "_PHONE_") == proc_text