def get_special_metrics(text): blob = TextBlob(text) main = { 'statistics': { 'syllables': textstat.syllable_count(text), 'words': textstat.lexicon_count(text), 'characters': textstat.char_count(text), 'polysyllables': textstat.polysyllabcount(text), 'average letter per word': textstat.avg_letter_per_word(text), 'average sentence length': textstat.avg_sentence_length(text), 'average sentence per word': textstat.avg_sentence_per_word(text), 'sentences': textstat.sentence_count(text) }, 'difficulty': { 'flesch reading ease': textstat.flesch_reading_ease(text), 'smog index': textstat.smog_index(text), 'flesch kincaid grade': textstat.flesch_kincaid_grade(text), 'coleman liau index': textstat.coleman_liau_index(text), #'automated readability index': textstat.automated_readability_index(text), #'dale chall readability score': textstat.dale_chall_readability_score(text), #'difficult words': textstat.difficult_words(text), #'linsear write formula': textstat.linsear_write_formula(text), 'gunning fog': textstat.gunning_fog(text) }, 'sentiments': { 'polarity': blob.sentiment.polarity, 'subjectivity': blob.sentiment.subjectivity } } return main
def get_special_metrics(text): blob = TextBlob(text) main = { "statistics": { "syllables": textstat.syllable_count(text), "words": textstat.lexicon_count(text), "characters": textstat.char_count(text), "polysyllables": textstat.polysyllabcount(text), "average letter per word": textstat.avg_letter_per_word(text), "average sentence length": textstat.avg_sentence_length(text), "average sentence per word": textstat.avg_sentence_per_word(text), "sentences": textstat.sentence_count(text), }, "difficulty": { "flesch reading ease": textstat.flesch_reading_ease(text), "smog index": textstat.smog_index(text), "flesch kincaid grade": textstat.flesch_kincaid_grade(text), "coleman liau index": textstat.coleman_liau_index(text), #'automated readability index': textstat.automated_readability_index(text), #'dale chall readability score': textstat.dale_chall_readability_score(text), #'difficult words': textstat.difficult_words(text), #'linsear write formula': textstat.linsear_write_formula(text), "gunning fog": textstat.gunning_fog(text), }, "sentiments": {"polarity": blob.sentiment.polarity, "subjectivity": blob.sentiment.subjectivity}, } return main
def scores_cal_ori(text): char_count_value=textstat.char_count(text,ignore_spaces=True) lexicon_count_value=textstat.lexicon_count(text,removepunct=True) syllable_count_value=textstat.syllable_count(text) sentence_count_value=textstat.sentence_count(text) avg_sentence_length_value=textstat.avg_sentence_length(text) avg_syllables_per_word_value=textstat.avg_syllables_per_word(text) avg_letter_per_word_value=textstat.avg_letter_per_word(text) avg_sentence_per_word_value=textstat.avg_sentence_per_word(text) flesch_kincaid_grade_value=textstat.flesch_kincaid_grade(text) smog_index_value=textstat.smog_index(text) gunning_fog_value=textstat.gunning_fog(text) difficult_words_value=textstat.difficult_words(text) dale_chall_value=textstat.dale_chall_readability_score(text) polysyllab_value=textstat.polysyllabcount(text) return char_count_value,lexicon_count_value,syllable_count_value,sentence_count_value,avg_sentence_length_value,avg_syllables_per_word_value,avg_letter_per_word_value,avg_sentence_per_word_value,flesch_kincaid_grade_value,smog_index_value,gunning_fog_value,difficult_words_value,dale_chall_value,polysyllab_value return smog_index_value
def __init__(self, path): """ Create document instance for analysis. Opens and reads document to string raw_text. Textract interprets the document format and opens to plain text string (docx, pdf, odt, txt) Args: path (str): path to file to open, anaylze, close Public attributes: -user: (str) optional string to set username. -path: (str) relative path to document. -abs_path: (str) the absolute path to the document. -file_name: (str) the file name with extension of document (base name). -mime: tbd -guessed_type: makes best guess of mimetype of document. -file_type: returns index[0] from guessed_type. -raw_text: (str) plain text extracted from .txt, .odt, .pdf, .docx, and .doc. -ptext: (str) raw text after a series of regex expressions to eliminate special characters. -text_no_feed: (str) ptext with most new line characters eliminated /n/n stays intact. -sentence_tokens: list of all sentences in a comma separated list derived by nltk. -sentence_count: (int) count of sentences found in list. -passive_sentences: list of passive sentences identified by the passive module. -passive_sentence_count: count of the passive_sentences list. -percent_passive: (float) ratio of passive sentences to all sentences in percent form. -be_verb_analysis: (int) sum number of occurrences of each to be verb (am, is, are, was, were, be, being been). -be_verb_count: tbd -be_verb_analysis: tbd -weak_sentences_all: (int) sum of be verb analysis. -weak_sentences_set: (set) set of all sentences identified as having to be verbs. -weak_sentences_count: (int) count of items in weak_sentences_set. -weak_verbs_to_sentences: (float) proportion of sentences with to be to all sentences in percent (this might not be sound). -word_tokens: list of discreet words in text that breaks contractions up (default nltk tokenizer). -word_tokens_no_punct: list of all words in text including contractions but otherwise no punctuation. -no_punct: (str) full text string without sentence punctuation. -word_tokens_no_punct: uses white-space tokenizer to create a list of all words. -readability_flesch_re: (int) Flesch Reading Ease Score (numeric score) made by textstat module. -readability_smog_index: (int) grade level as determined by the SMOG algorithum made by textstat module. -readability_flesch_kincaid_grade: (int) Flesch-Kincaid grade level of reader made by textstat module. -readability_coleman_liau_index: (int) grade level of reader as made by textstat module. -readability_ari: (int) grade leader of reader determined by automated readability index algorithum implemented by textstat. -readability_linser_write: FIX SPELLING grade level as determined by Linsear Write algorithum implemented by textstat. -readability_dale_chall: (int) grade level based on Dale-Chall readability as determined by textstat. -readability_standard: composite grade level based on readability algorithums. -flesch_re_key: list for interpreting Flesch RE Score. -word_count: word count of document based on white space tokener, this word count should be used. -page_length: (float) page length in decimal format given 250 words per page. -paper_count: (int) number of printed pages given 250 words per page. -parts_of_speech: words with parts of speech tags. -pos_counts: values in word, tag couple grouped in a list (Counter). -pos_total: (int) sum of pos_counts values -pos_freq: (dict) word, ratio of whole -doc_pages: (float) page length based on 250 words per page (warning, this is the second time this attribute is defined). -freq_words: word frequency count not standardized based on the correct word tokener (not ratio, just count). modal_dist: count of auxillary verbs based on word_tokens_no_punct. sentence_count (int): Count the sentence tokens passive_sentences (list): List of all sentences identified as passive passive_sentence_count (int): count of items in passive_sentences be_verb_count (int): count "to be" verbs in text word_tokens_no_punct (list): words separated, stripped of punctuation, made lower case flesch_re_key (str): reading ease score to description freq_words (list or dict): frequency distribution of all words modal_dist (list): frequency distribution of aux verbs """ self.user = "" self.path = path self.abs_path = os.path.abspath(self.path) if os.path.isfile(self.path): self.time_stamp = self.timestamp() self.file_name = os.path.basename(path) self.mime = MimeTypes() self.guessed_type = self.mime.guess_type(self.path) self.file_type = self.guessed_type[0] self.raw_text = textract.process(self.path, encoding="ascii") self.ptext = re.sub(u'[\u201c\u201d]', '"', self.raw_text) self.ptext = re.sub(u"\u2014", "--", self.ptext) self.ptext = re.sub(",", ",", self.ptext) self.ptext = re.sub("—", "--", self.ptext) self.ptext = re.sub("…", "...", self.ptext) self.text_no_feed = self.clean_new_lines(self.ptext) self.sentence_tokens = self.sentence_tokenize(self.text_no_feed) self.sentence_count = len(self.sentence_tokens) self.passive_sentences = passive(self.text_no_feed) self.passive_sentence_count = len(self.passive_sentences) self.percent_passive = (100 * (float(self.passive_sentence_count) / float(self.sentence_count))) self.percent_passive_round = round(self.percent_passive, 2) self.be_verb_analysis = self.count_be_verbs(self.sentence_tokens) self.be_verb_count = self.be_verb_analysis[0] self.weak_sentences_all = self.be_verb_analysis[1] self.weak_sentences_set = set(self.weak_sentences_all) self.weak_sentences_count = len(self.weak_sentences_set) self.weak_verbs_to_sentences = 100 * float( self.weak_sentences_count) / float(self.sentence_count) self.weak_verbs_to_sentences_round = round( self.weak_verbs_to_sentences, 2) self.word_tokens = self.word_tokenize(self.text_no_feed) self.word_tokens_no_punct = \ self.word_tokenize_no_punct(self.text_no_feed) self.no_punct = self.strip_punctuation(self.text_no_feed) # use this! It make lower and strips symbols self.word_tokens_no_punct = self.ws_tokenize(self.no_punct) self.readability_flesch_re = \ textstat.flesch_reading_ease(self.text_no_feed) self.readability_smog_index = \ textstat.smog_index(self.text_no_feed) self.readability_flesch_kincaid_grade = \ textstat.flesch_kincaid_grade(self.text_no_feed) self.readability_coleman_liau_index = \ textstat.coleman_liau_index(self.text_no_feed) self.readability_ari = \ textstat.automated_readability_index(self.text_no_feed) self.readability_linser_write = \ textstat.linsear_write_formula(self.text_no_feed) self.readability_dale_chall = \ textstat.dale_chall_readability_score(self.text_no_feed) self.readability_standard = \ textstat.text_standard(self.text_no_feed) self.flesch_re_desc_str = self.flesch_re_desc( int(textstat.flesch_reading_ease(self.text_no_feed))) self.polysyllabcount = textstat.polysyllabcount(self.text_no_feed) self.lexicon_count = textstat.lexicon_count(self.text_no_feed) self.avg_syllables_per_word = textstat.avg_syllables_per_word( self.text_no_feed) self.avg_sentence_per_word = textstat.avg_sentence_per_word( self.text_no_feed) self.avg_sentence_length = textstat.avg_sentence_length( self.text_no_feed) self.avg_letter_per_word = textstat.avg_letter_per_word( self.text_no_feed) self.difficult_words = textstat.difficult_words(self.text_no_feed) self.rand_passive = self.select_random(self.passive_sentence_count, self.passive_sentences) self.rand_weak_sentence = self.select_random( len(self.weak_sentences), self.weak_sentences) if self.word_tokens_no_punct: self.word_count = len(self.word_tokens_no_punct) self.page_length = float(self.word_count) / float(250) self.paper_count = int(math.ceil(self.page_length)) self.parts_of_speech = pos_tag(self.word_tokens_no_punct) self.pos_counts = Counter( tag for word, tag in self.parts_of_speech) self.pos_total = sum(self.pos_counts.values()) self.pos_freq = dict( (word, float(count) / self.pos_total) for word, count in self.pos_counts.items()) self.doc_pages = float(float(self.word_count) / float(250)) self.freq_words = \ self.word_frequency(self.word_tokens_no_punct) self.modal_dist = self.modal_count(self.word_tokens_no_punct) # self.ws_tokens = self.ws_tokenize(self.text_no_cr) self.pos_count_dict = self.pos_counts.items() # Model - use for any pos self.modals = self.pos_isolate('MD', self.pos_count_dict) self.preposition_count = self.pos_isolate('IN', self.pos_count_dict) self.adjective_count = self.pos_isolate_fuzzy( 'JJ', self.pos_count_dict) self.adverb_count = self.pos_isolate_fuzzy('RB', self.pos_count_dict) self.proper_nouns = self.pos_isolate_fuzzy('NNP', self.pos_count_dict) self.cc_count = self.pos_isolate('CC', self.pos_count_dict) self.commas = self.char_count(",") self.comma_sentences = self.list_sentences(",") self.comma_example = self.select_random(len(self.comma_sentences), self.comma_sentences) self.semicolons = self.char_count(";") self.semicolon_sentences = self.list_sentences(";") self.semicolon_example = self.select_random( len(self.semicolon_sentences), self.semicolon_sentences) self.lint_suggestions = lint(self.raw_text)
def test_polysyllabcount(self): count = textstat.polysyllabcount(self.long_test) self.assertEqual(32, count)
def preprocess(x): print('PROCESSING ID: ' + str(x['id'])) try: fvec = [] fvec.append(int(x['id'])) # Append Article ID fvec.append(nnp_num(x['targetTitle'])) if len(x['targetParagraphs']) > 0: fvec.append( ts.automated_readability_index(' '.join( x['targetParagraphs']))) fvec.append(ts.avg_letter_per_word(' '.join( x['targetParagraphs']))) fvec.append(ts.avg_sentence_length(' '.join( x['targetParagraphs']))) fvec.append( ts.avg_sentence_per_word(' '.join(x['targetParagraphs']))) fvec.append( ts.avg_syllables_per_word(' '.join(x['targetParagraphs']))) fvec.append(ts.char_count(' '.join(x['targetParagraphs']))) fvec.append(ts.coleman_liau_index(' '.join(x['targetParagraphs']))) fvec.append( ts.dale_chall_readability_score(' '.join( x['targetParagraphs']))) fvec.append(ts.difficult_words(' '.join(x['targetParagraphs']))) fvec.append( ts.flesch_kincaid_grade(' '.join(x['targetParagraphs']))) fvec.append(ts.flesch_reading_ease(' '.join( x['targetParagraphs']))) fvec.append(ts.gunning_fog(' '.join(x['targetParagraphs']))) fvec.append(ts.lexicon_count(' '.join(x['targetParagraphs']))) fvec.append( ts.linsear_write_formula(' '.join(x['targetParagraphs']))) fvec.append(ts.polysyllabcount(' '.join(x['targetParagraphs']))) fvec.append(ts.sentence_count(' '.join(x['targetParagraphs']))) fvec.append(ts.smog_index(' '.join(x['targetParagraphs']))) fvec.append(ts.syllable_count(' '.join(x['targetParagraphs']))) fvec.append(mean_wordlen(x['targetParagraphs'])) fvec += ratio(x['targetParagraphs']) #36 fvec += ngram_feat(x['targetParagraphs']) # 6 else: fvec += [0] * 61 if len(word_tokenize(' '.join(x['postText']))) > 0: fvec.append(max_wordlen(x['postText'])) fvec.append(sw_ratio(' '.join(x['postText']))) fvec += ngram_feat(x['postText']) #6 else: fvec += [0] * 8 fvec.append(len(word_tokenize(x['targetTitle']))) fvec.append(wlen_title(x['targetTitle'])) fvec.append(pos_2gram(x['targetTitle'], 'NNP', 'NNP')) fvec.append(int(num_start(x['targetTitle']))) fvec.append(in_num(x['targetTitle'])) fvec.append(pos_2gram(x['targetTitle'], 'NNP', 'VBZ')) fvec.append(pos_2gram(x['targetTitle'], 'IN', 'NNP')) fvec.append(wrb_num(x['targetTitle'])) fvec.append(nnp_num(x['targetTitle'])) fvec.append(int(wh_start(x['targetTitle']))) fvec.append(int(qm_exist(x['targetTitle']))) fvec.append(pos_thnn(x['targetTitle'])) fvec.append(prp_count(x['targetTitle'])) fvec.append(vbz_count(x['targetTitle'])) fvec.append(pos_3gram(x['targetTitle'], 'NNP', 'NNP', 'VBZ')) fvec.append(pos_2gram(x['targetTitle'], 'NN', 'IN')) fvec.append(pos_3gram(x['targetTitle'], 'NN', 'IN', 'NNP')) fvec.append(pos_2gram(x['targetTitle'], 'NNP', '.')) fvec.append(pos_2gram(x['targetTitle'], 'PRP', 'VBP')) fvec.append(wp_count(x['targetTitle'])) fvec.append(dt_count(x['targetTitle'])) fvec.append(pos_2gram(x['targetTitle'], 'NNP', 'IN')) fvec.append(pos_3gram(x['targetTitle'], 'IN', 'NNP', 'NNP')) fvec.append(pos_count(x['targetTitle'])) fvec.append(pos_2gram(x['targetTitle'], 'IN', 'NN')) if len(x['targetKeywords']) > 0 and len(x['postText']) > 0: fvec.append(kw_post_match(x['targetKeywords'], x['postText'])) else: fvec += [0] * 1 fvec.append(comma_count(x['targetTitle'])) fvec.append(pos_2gram(x['targetTitle'], 'NNP', 'NNS')) fvec.append(pos_2gram(x['targetTitle'], 'IN', 'JJ')) fvec.append(pos_2gram(x['targetTitle'], 'NNP', 'POS')) fvec.append(wdt_count(x['targetTitle'])) fvec.append(pos_2gram(x['targetTitle'], 'NN', 'NN')) fvec.append(pos_2gram(x['targetTitle'], 'NN', 'NNP')) fvec.append(pos_2gram(x['targetTitle'], 'NNP', 'VBD')) fvec.append(rb_count(x['targetTitle'])) fvec.append(pos_3gram(x['targetTitle'], 'NNP', 'NNP', 'NNP')) fvec.append(pos_3gram(x['targetTitle'], 'NNP', 'NNP', 'NN')) fvec.append(rbs_count(x['targetTitle'])) fvec.append(vbn_count(x['targetTitle'])) fvec.append(pos_2gram(x['targetTitle'], 'VBN', 'IN')) fvec.append(pos_2gram(x['targetTitle'], 'JJ', 'NNP')) fvec.append(pos_3gram(x['targetTitle'], 'NNP', 'NN', 'NN')) fvec.append(pos_2gram(x['targetTitle'], 'DT', 'NN')) fvec.append(ex_exist(x['targetTitle'])) fvec += ngram_feat(x['targetTitle']) #6 except Exception as e: print('EXCEPTION AT ID ' + str(x['id'])) print(e) sys.exit() return fvec