def test_replace_emojis(): in_outs = [ ("ugh, it's raining *again* ☔", "ugh, it's raining *again* _EMOJI_"), ("✌ tests are passing ✌", "_EMOJI_ tests are passing _EMOJI_"), ] for in_, out_ in in_outs: assert preprocessing.replace_emojis(in_) == out_
def preprocess_siamese(doc): # pre-process data doc = tp.normalize.normalize_unicode(doc) doc = tp.normalize_whitespace(doc) doc = tp.normalize_quotation_marks(doc) doc = tp.replace_emails(doc, replace_with="<EMAIL>") doc = tp.replace_urls(doc, replace_with="<URL>") doc = tp.replace_hashtags(doc, replace_with="<HASHTAG>") doc = tp.replace_emojis(doc, replace_with="<EMOJI>") doc = tp.replace_phone_numbers(doc, replace_with="<PHONE>") # apply spaCy to tokenize doc doc = nlp_token(doc) # build new sentences for pre-processed doc doc_new = [] for sent in doc.sents: sent_new = "" for token in sent: token = token.text token = token.replace("\n", "") token = token.replace("\t", "") token = token.strip() sent_new += token + " " doc_new.append(sent_new[:-1]) return doc_new
def cleanTweet(raw_data): data = [txt.replace_urls(x, "") for x in raw_data] data = [txt.replace_emails(x, "") for x in data] data = [txt.replace_emojis(x, "") for x in data] data = [txt.replace_user_handles(x, "") for x in data] data = [txt.replace_phone_numbers(x, "") for x in data] data = [txt.replace_numbers(x, "") for x in data] data = [txt.replace_currency_symbols(x, "") for x in data] data = [txt.replace_hashtags(x, "") for x in data] return data
def checkLenTweet(raw_data): data = txt.replace_urls(raw_data, "") data = txt.replace_emails(data, "") data = txt.replace_emojis(data, "") data = txt.replace_user_handles(data, "") data = txt.replace_phone_numbers(data, "") data = txt.replace_numbers(data, "") data = txt.replace_currency_symbols(data, "") data = txt.replace_hashtags(data, "") if len(data) < 28: #Lunghezza media di un tweet return False else: return True
def preprocess_sentence(sent, lower=True): """Pre-process a sentence ( via ``textacy.preprocess` module ). Args: sent (str): text. lower (bool): whether to return a lowercase string. Returns: str """ # normalize unicode sent = preprocessing.normalize_unicode(sent) # deaccent sent = preprocessing.remove_accents(sent) # replace newline chars sent = re.sub("\n|\r", " ", sent) # unpack contractions sent = contractions.fix(sent) # replace emoji symbols sent = preprocessing.replace_emojis(sent) # replace hashtags sent = preprocessing.replace_hashtags(sent) # replace user handles sent = preprocessing.replace_user_handles(sent) # replace currency symbols sent = preprocessing.replace_currency_symbols(sent) # replace emails sent = preprocessing.replace_emails(sent) # replace URLs sent = preprocessing.replace_urls(sent) # remove punctuation sent = preprocessing.remove_punctuation(sent) # normalize whitespace sent = preprocessing.normalize_whitespace(sent) if lower: sent = sent.lower() return sent
def textacy_preprocess(sentence): """Preprocess text.""" sentence = preprocessing.normalize_hyphenated_words(sentence) sentence = preprocessing.normalize_quotation_marks(sentence) #sentence = preprocessing.normalize_repeating_chars(sentence) sentence = preprocessing.normalize_unicode(sentence) sentence = preprocessing.normalize_whitespace(sentence) sentence = preprocessing.remove_accents(sentence) sentence = preprocessing.remove_punctuation(sentence) sentence = preprocessing.replace_currency_symbols(sentence) sentence = preprocessing.replace_emails(sentence) sentence = preprocessing.replace_emojis(sentence) sentence = preprocessing.replace_hashtags(sentence) sentence = preprocessing.replace_numbers(sentence) sentence = preprocessing.replace_phone_numbers(sentence) sentence = preprocessing.replace_urls(sentence) sentence = preprocessing.replace_user_handles(sentence) return sentence
def preprocess_text(text, char_count_filter=True, stopwords=None, min_len=2, max_len=15): """ Pre-processing steps prior to spaCy nlp pipeline. Optional filtering of tokens based on character length. Parameters ---------- text : str char_count_filter : bool stopwords : iterable, None min_len : int max_len : int Returns ------- text : str pre-processed text """ # 1) convert to lower case for robust stop-word recognition text = text.lower() # 2) normalise text = preprocessing.normalize_quotation_marks(text) # text = preprocessing.normalize_repeating_chars(text) text = preprocessing.normalize_hyphenated_words(text) text = preprocessing.normalize_whitespace(text) # 3) replace text = preprocessing.replace_currency_symbols(text) text = preprocessing.replace_emails(text) text = preprocessing.replace_emojis(text) text = preprocessing.replace_hashtags(text) text = preprocessing.replace_numbers(text) text = preprocessing.replace_phone_numbers(text) text = preprocessing.replace_urls(text) text = preprocessing.replace_user_handles(text) # 4) remove text = preprocessing.remove_accents(text) text = preprocessing.remove_punctuation(text) text = re.sub("[^A-Za-z0-9]+", " ", text) # keep text and numbers # 5) optionally remove tokens based on length if char_count_filter & (stopwords is not None): # filter based on token length tokens = gensim.utils.simple_preprocess(doc=text, min_len=min_len, max_len=max_len) # filter case-specific words tokens = [token for token in tokens if token not in stopwords] # convert processed list of tokens back to one string text = " ".join(tokens) else: raise NotImplementedError("Not implemented.") return text