def do_text_stats(self, text): ### Syllable Count syllable_count = textstat.syllable_count(text) ### Lexicon Count lexicon_count = textstat.lexicon_count(text, True) ### Sentence Count sentence_count = textstat.sentence_count(text) ### The Flesch Reading Ease formula try: flesch_reading_ease = textstat.flesch_reading_ease(text) except TypeError as e: flesch_reading_ease = None #* 90-100 : Very Easy #* 80-89 : Easy #* 70-79 : Fairly Easy #* 60-69 : Standard #* 50-59 : Fairly Difficult #* 30-49 : Difficult #* 0-29 : Very Confusing ### The The Flesch-Kincaid Grade Level try: flesch_kincaid_grade = textstat.flesch_kincaid_grade(text) except TypeError as e: flesch_kincaid_grade = None ## The Fog Scale (Gunning FOG Formula) gunning_fog = textstat.gunning_fog(text) ### The SMOG Index smog_index = textstat.smog_index(text) ### Automated Readability Index automated_readability_index = textstat.automated_readability_index( text) ### The Coleman-Liau Index try: coleman_liau_index = textstat.coleman_liau_index(text) except TypeError as e: coleman_liau_index = None ### Linsear Write Formula linsear_write_formula = textstat.linsear_write_formula(text) ### Dale-Chall Readability Score dale_chall_readability_score = textstat.dale_chall_readability_score( text) ### Readability Consensus based upon all the above tests try: text_standard = textstat.text_standard(text) except TypeError as e: text_standard = None return { "syllable_count": syllable_count, "lexicon_count": lexicon_count, "sentence_count": sentence_count, "flesch_reading_ease": flesch_reading_ease, "flesch_kincaid_grade": flesch_kincaid_grade, "gunning_fog": gunning_fog, "smog_index": smog_index, "automated_readability_index": automated_readability_index, "coleman_liau_index": coleman_liau_index, "linsear_write_formula": linsear_write_formula, "dale_chall_readability_score": dale_chall_readability_score, "text_standard": text_standard }
def text_analytics(text): if textstat.sentence_count(text) != 0: lexicon = textstat.lexicon_count(text) #word count sent = textstat.sentence_count(text) #sentence count syll = textstat.syllable_count(text) #syllable count flesch = textstat.flesch_reading_ease(text) #flesch score smog = textstat.smog_index(text) #SMOG index fog = textstat.gunning_fog(text) #FOG index dale = textstat.dale_chall_readability_score(text) #grade level ari = textstat.automated_readability_index(text) #grade level cl = textstat.coleman_liau_index(text) #grade level flesch1 = lexicon*flesch flesch2 = sent*flesch flesch3 = syll*flesch smog1 = lexicon*smog smog2 = sent*smog smog3 = syll*smog fog1 = lexicon*fog fog2 = sent*fog fog3 = syll*fog dale1 = lexicon*dale dale2 = sent*dale dale3=syll*dale ari1 = lexicon*ari ari2 = sent*ari ari3 = syll*ari cl1 = lexicon*cl cl2 = sent*cl cl3 = syll*cl x=[lexicon,sent,syll,flesch,smog,fog,dale,ari,cl,flesch1,flesch2,flesch3,smog1, smog2,smog3,fog1,fog2,fog3,dale1,dale2,dale3,ari1,ari2,ari3,cl1,cl2,cl3] return(x)
def print_readability_metrics(text, file_name): print(file_name, " readability metrics") print("flesch reading ease: ", textstat.flesch_reading_ease(text)) print("dale chall readability: ", textstat.dale_chall_readability_score(text)) print("smog index: ", textstat.smog_index(text)) print('------------------------------------------------')
def get_special_metrics(text): blob = TextBlob(text) main = { 'statistics': { 'syllables': textstat.syllable_count(text), 'words': textstat.lexicon_count(text), 'characters': textstat.char_count(text), 'polysyllables': textstat.polysyllabcount(text), 'average letter per word': textstat.avg_letter_per_word(text), 'average sentence length': textstat.avg_sentence_length(text), 'average sentence per word': textstat.avg_sentence_per_word(text), 'sentences': textstat.sentence_count(text) }, 'difficulty': { 'flesch reading ease': textstat.flesch_reading_ease(text), 'smog index': textstat.smog_index(text), 'flesch kincaid grade': textstat.flesch_kincaid_grade(text), 'coleman liau index': textstat.coleman_liau_index(text), #'automated readability index': textstat.automated_readability_index(text), #'dale chall readability score': textstat.dale_chall_readability_score(text), #'difficult words': textstat.difficult_words(text), #'linsear write formula': textstat.linsear_write_formula(text), 'gunning fog': textstat.gunning_fog(text) }, 'sentiments': { 'polarity': blob.sentiment.polarity, 'subjectivity': blob.sentiment.subjectivity } } return main
def get_readability(df2): df = df2.copy() text_feats = df.select_dtypes(include=['object']).columns.values for i, col in enumerate(text_feats): df['flesch_reading_ease{}'.format(i)] = df[col].apply( lambda x: textstat.flesch_reading_ease(x)) df['smog_index{}'.format(i)] = df[col].apply( lambda x: textstat.smog_index(x)) df['flesch_kincaid_grade{}'.format(i)] = df[col].apply( lambda x: textstat.flesch_kincaid_grade(x)) df['coleman_liau_index{}'.format(i)] = df[col].apply( lambda x: textstat.coleman_liau_index(x)) df['automated_readability_index{}'.format(i)] = df[col].apply( lambda x: textstat.automated_readability_index(x)) df['dale_chall_readability_score{}'.format(i)] = df[col].apply( lambda x: textstat.dale_chall_readability_score(x)) df['difficult_words{}'.format(i)] = df[col].apply( lambda x: textstat.difficult_words(x)) df['linsear_write_formula{}'.format(i)] = df[col].apply( lambda x: textstat.linsear_write_formula(x)) df['gunning_fog{}'.format(i)] = df[col].apply( lambda x: textstat.gunning_fog(x)) df['text_standard{}'.format(i)] = df[col].apply( lambda x: textstat.text_standard(x)) return df
def get_special_metrics(text): blob = TextBlob(text) main = { "statistics": { "syllables": textstat.syllable_count(text), "words": textstat.lexicon_count(text), "characters": textstat.char_count(text), "polysyllables": textstat.polysyllabcount(text), "average letter per word": textstat.avg_letter_per_word(text), "average sentence length": textstat.avg_sentence_length(text), "average sentence per word": textstat.avg_sentence_per_word(text), "sentences": textstat.sentence_count(text), }, "difficulty": { "flesch reading ease": textstat.flesch_reading_ease(text), "smog index": textstat.smog_index(text), "flesch kincaid grade": textstat.flesch_kincaid_grade(text), "coleman liau index": textstat.coleman_liau_index(text), #'automated readability index': textstat.automated_readability_index(text), #'dale chall readability score': textstat.dale_chall_readability_score(text), #'difficult words': textstat.difficult_words(text), #'linsear write formula': textstat.linsear_write_formula(text), "gunning fog": textstat.gunning_fog(text), }, "sentiments": {"polarity": blob.sentiment.polarity, "subjectivity": blob.sentiment.subjectivity}, } return main
def compareContents(): if request.method == "POST": line = request.form['poem'] poem1 = request.form['poem1'] #---------Metrics comparison logic goes here. keep them in session attributes-----------------------# session['line'] = line #print("i am in row : ",row) #print "Tagline :", line #print("no of words= ",len(line.split())) #line1 = line.lstrip('0123456789.- ,') #print "flesch_reading_ease = ",textstat.flesch_reading_ease(line) fre = textstat.flesch_reading_ease(line) session['fre'] = fre #print "smog_index = ",textstat.smog_index(line) smog = textstat.smog_index(line) session['smog'] = smog #print "flesch_kincaid_grade = ",textstat.flesch_kincaid_grade(line) fkg = textstat.flesch_kincaid_grade(line) session['fkg'] = fkg #print "dale_chall_readability_score = ", textstat.dale_chall_readability_score(line) dcr = textstat.dale_chall_readability_score(line) session['dcr'] = dcr #print "gunning_fog = ",textstat.gunning_fog(line) gf = textstat.gunning_fog(line) session['gf'] = gf metrics = True return render_template('compareContents.html',metrics=metrics, line=line, fre=fre, smog=smog, fkg=fkg, dcr=dcr,gf=gf) return render_template('compareContents.html')
def readability(text): print("Readability\n=================================\n\n") print("Flesch Reading Ease\n________________________\n\n") print str(textstat.flesch_reading_ease(text)) + "\n" print("Smog Index\n________________________\n\n") print str(textstat.smog_index(text)) + "\n" print("Flesch Kincaid Grade\n________________________\n\n") print str(textstat.flesch_kincaid_grade(text)) + "\n" print("Coleman Liau Index\n________________________\n\n") print str(textstat.coleman_liau_index(text)) + "\n" print("ARI\n________________________\n\n") print str(textstat.automated_readability_index(text)) + "\n" print("Dale Chall\n________________________\n\n") print str(textstat.dale_chall_readability_score(text)) + "\n" print("Difficult Words\n________________________\n\n") print str(textstat.difficult_words(text)) + "\n" print("Linsear Write Formula\n________________________\n\n") print str(textstat.linsear_write_formula(text)) + "\n" print("Gunning Fog\n________________________\n\n") print str(textstat.gunning_fog(text)) + "\n" print "Compiled Score\n_____________________________\n\n" print str(textstat.text_standard(text)) + "\n" return len(adjectives)
def _get_reading_stats(no_code_text): """ Returns reading level information :param no_code_text: String to analyse :return: list of details """ group_by = 'Reading Level Analysis ' results = [] results.append(TextFeature('Flesch Reading Ease', textstat.flesch_reading_ease(no_code_text), group_by)) # higher is better, scale 0 to 100 results.append(TextFeature('Flesch-Kincaid Grade Level', textstat.flesch_kincaid_grade(no_code_text), group_by)) try: results.append(TextFeature('The Fog Scale (Gunning FOG formula)', textstat.gunning_fog(no_code_text), group_by)) except IndexError: # Not sure why, but this test throws this error sometimes results.append(TextFeature('The Fog Scale (Gunning FOG formula)', "Undetermined", group_by)) try: results.append(TextFeature('The SMOG Index', textstat.smog_index(no_code_text), group_by)) except IndexError: # Not sure why, but this test throws this error sometimes results.append(TextFeature('The SMOG Index', "Undetermined", group_by)) results.append(TextFeature('Automated Readability Index', textstat.automated_readability_index(no_code_text), group_by)) results.append(TextFeature('The Coleman-Liau Index', textstat.coleman_liau_index(no_code_text), group_by)) try: results.append(TextFeature('Linsear Write Formula', textstat.linsear_write_formula(no_code_text), group_by)) except IndexError: results.append(TextFeature('Linsear Write Formula', "Undetermined", group_by)) try: results.append(TextFeature('Dale Chall Readability Score', textstat.dale_chall_readability_score(no_code_text), group_by)) except IndexError: # Not sure why, but this test throws this error sometimes results.append(TextFeature('Dale Chall Readability Score', "Undetermined", group_by)) try: results.append(TextFeature('Readability Consensus', textstat.readability_consensus(no_code_text), group_by)) except (TypeError, IndexError): results.append(TextFeature('Readability Consensus', "Undetermined; One of the tests above failed.", group_by)) return results
def f(): print("hello") book = xlwt.Workbook() worksheet = book.add_sheet('ReadabilityScore') worksheet.write(0, 0, "Gen_sent") worksheet.write(0, 1, "flesch_reading_ease") worksheet.write(0, 2, "flesch_kincaid_grade") worksheet.write(0, 3, "dale_chall_readability_score") worksheet.write(0, 4, "gunning_fog") f = open('abc.txt') #, encoding='utf-8') row = 1 for line in iter(f): #print("i am in row : ",row) #print "Tagline :", line worksheet.write(row, 0, line) #print("no of words= ",len(line.split())) #line1 = line.lstrip('0123456789.- ,') #print "flesch_reading_ease = ",textstat.flesch_reading_ease(line) fre = textstat.flesch_reading_ease(line) worksheet.write(row, 1, fre) #print "smog_index = ",textstat.smog_index(line) smog = textstat.smog_index(line) #print "flesch_kincaid_grade = ",textstat.flesch_kincaid_grade(line) fkg = textstat.flesch_kincaid_grade(line) worksheet.write(row, 2, fkg) #print "dale_chall_readability_score = ", textstat.dale_chall_readability_score(line) dcr = textstat.dale_chall_readability_score(line) worksheet.write(row, 3, dcr) #print "gunning_fog = ",textstat.gunning_fog(line) gf = textstat.gunning_fog(line) worksheet.write(row, 4, gf) row += 1 book.save('Readability_Scores.xls')
def main(): csv_file2 = open(sys.argv[2], 'w', encoding="utf8") writer = csv.writer(csv_file2, delimiter=',') doc_id = 1 writer.writerow(["ID", "URL", "text", "impact-score", "readability", "grade-level", "smog-index", "total-words", "total-sentences"]) with open(sys.argv[1], 'r', encoding="utf8", errors='ignore') as csv_file1: reader = csv.reader(csv_file1) # Skip the first line with headers next(reader) for row in reader: impact = str(row[0]) url = str(row[1]) text = str(row[2]) read_ease = textstat.flesch_reading_ease(text) grade = textstat.flesch_kincaid_grade(text) smog = textstat.smog_index(text) words = textstat.lexicon_count(text) sentences = textstat.sentence_count(text) # Uncomment this if we want summary and key words # summary = summarize(text, ratio=0.3) # key_words = keywords(text, ratio=0.3) writer.writerow([doc_id]+[url]+[text]+[impact]+[read_ease]+[grade]+[smog]+[words]+[sentences]) doc_id = doc_id+1 csv_file1.close() csv_file2.close() print('Summary statistics complete!')
def smog_index(text): """ :type text: Text :param text: The text to be analysed :rtype float :returns Smog Index """ return textstat.smog_index(text.text)
def all_trad_scores(text): fre = textstat.flesch_reading_ease(text) fkg = textstat.flesch_kincaid_grade(text) smog = textstat.smog_index(text) cole = textstat.coleman_liau_index(text) ari = textstat.automated_readability_index(text) dale = textstat.dale_chall_readability_score(text) linsear = textstat.linsear_write_formula(text) gunning = textstat.gunning_fog(text) return [fre, fkg, smog, cole, ari, dale, linsear, gunning]
def reading_difficulty(self): diff_words = textstat.difficult_words(self.text) / self.nword flesch_kincaid = textstat.flesch_kincaid_grade(self.text) coleman_liau = textstat.coleman_liau_index(self.text) ari = textstat.automated_readability_index(self.text) dale_chall = textstat.dale_chall_readability_score(self.text) linsear = textstat.linsear_write_formula(self.text) gunning_fog = textstat.gunning_fog(self.text) - 6 smog = textstat.smog_index(self.text) avg_grade = max( math.ceil((flesch_kincaid + coleman_liau + ari + dale_chall + linsear + gunning_fog + smog) / 7), 12) return avg_grade, diff_words
def textstat_analysis(profile_text): fre = textstat.flesch_reading_ease(profile_text) smog = textstat.smog_index(profile_text) fkg = textstat.flesch_kincaid_grade(profile_text) coleman = textstat.coleman_liau_index(profile_text) ari = textstat.automated_readability_index(profile_text) dale = textstat.dale_chall_readability_score(profile_text) dw = textstat.difficult_words(profile_text) lwf = textstat.linsear_write_formula(profile_text) gf = textstat.gunning_fog(profile_text) rc = textstat.readability_consensus(profile_text) word_count = textstat.lexicon_count(profile_text) return (fre, smog, fkg, coleman, ari, dale, dw, lwf, gf, rc, word_count)
def get_readability(contents): readability = [] readability.append(textstat.flesch_reading_ease(contents)) readability.append(textstat.smog_index(contents)) readability.append(textstat.flesch_kincaid_grade(contents)) readability.append(textstat.automated_readability_index(contents)) readability.append(textstat.dale_chall_readability_score(contents)) readability.append(textstat.difficult_words(contents)) readability.append(textstat.linsear_write_formula(contents)) readability.append(textstat.gunning_fog(contents)) readability.append(textstat.coleman_liau_index(contents)) readability.append(textstat.text_standard(contents)) return readability
def main() : for arg in sys.argv[1:]: with open(arg) as f: text = f.read() with open(arg + '.readability.snip','w') as f: f.write ("syllable_count : %s\n" % textstat.syllable_count(text)) f.write ("lexicon_count : %s\n" % textstat.lexicon_count(text)) f.write ("sentence_count : %s\n" % textstat.sentence_count(text)) f.write ("difficult_words : %s\n" % textstat.difficult_words(text)) f.write ("flesch_reading_ease : %s\n" % textstat.flesch_reading_ease(text)) f.write ("flesch_kincaid_grade : %s\n" % textstat.flesch_kincaid_grade(text)) f.write ("smog_index : %s\n" % textstat.smog_index(text)) f.write ("automated_readability_index : %s\n" % textstat.automated_readability_index(text)) f.write ("coleman_liau_index : %s\n" % textstat.coleman_liau_index(text)) f.write ("linsear_write_formula : %s\n" % textstat.linsear_write_formula(text)) f.write ("dale_chall_readability_score : %s\n" % textstat.dale_chall_readability_score(text))
def scores_cal_ori(text): char_count_value=textstat.char_count(text,ignore_spaces=True) lexicon_count_value=textstat.lexicon_count(text,removepunct=True) syllable_count_value=textstat.syllable_count(text) sentence_count_value=textstat.sentence_count(text) avg_sentence_length_value=textstat.avg_sentence_length(text) avg_syllables_per_word_value=textstat.avg_syllables_per_word(text) avg_letter_per_word_value=textstat.avg_letter_per_word(text) avg_sentence_per_word_value=textstat.avg_sentence_per_word(text) flesch_kincaid_grade_value=textstat.flesch_kincaid_grade(text) smog_index_value=textstat.smog_index(text) gunning_fog_value=textstat.gunning_fog(text) difficult_words_value=textstat.difficult_words(text) dale_chall_value=textstat.dale_chall_readability_score(text) polysyllab_value=textstat.polysyllabcount(text) return char_count_value,lexicon_count_value,syllable_count_value,sentence_count_value,avg_sentence_length_value,avg_syllables_per_word_value,avg_letter_per_word_value,avg_sentence_per_word_value,flesch_kincaid_grade_value,smog_index_value,gunning_fog_value,difficult_words_value,dale_chall_value,polysyllab_value return smog_index_value
def features_smog_content(cls, row): data = row['review_content'] token = cls.tokenizer.tokenize(data) if(token == None or len(token)==0): return 0 val = textstat.smog_index(row['review_content']) # 0.0970957994444 # val = textstat.dale_chall_readability_score(row['review_content']) # 0.0655506963852 # val = textstat.difficult_words(row['review_content']) # 0.119689698366 # val = textstat.linsear_write_formula(row['review_content']) # 0.0393165149095 # val = textstat.gunning_fog(row['review_content']) # 0.064893772836 # val = textstat.flesch_reading_ease(row['review_content']) # -0.000962802863895 # val = textstat.automated_readability_index(row['review_content']) #0.0206780263383 if(val != None): return val else: return 0
def processData(self, orid, oid, index): # check if resource exist rid = hashlib.md5(orid.encode('utf-8')).hexdigest() r = requests.get(conf["es_base"] + conf["resource_index"] + '/resource/' + rid) res = r.json() # if not if "error" in res or "_source" not in res: print "not found" # get text -> change to get didactalia description resdesc = self.getResourceDescription(orid) if "error" not in resdesc: headers = {'Accept': 'text/html'} resdesc["smog"] = -1 if 'link' in resdesc or 'resource_url' in resdesc: url = '' if 'resource_url' in resdesc: url = resdesc['resource_url'] if 'link' in resdesc: url = resdesc['link'] OK = False try: response = requests.get(url, headers=headers) OK = True except requests.exceptions.ConnectionError: self.indexError(rid, "Cannot connect to the page") if OK: try: h = html2text.HTML2Text() h.ignore_links = True text = h.handle(response.text) resdesc["smog"] = textstat.smog_index(text) resdesc["sig"] = hashlib.sha1( text.encode('utf-8')).hexdigest() self.indexResource(rid, resdesc) except html2text.HTMLParser.HTMLParseError: self.indexError(rid, "Cannot parse page as HTML") else: self.indexError(rid, "no url for resource") else: self.indexError(rid, "could not retrieve resource") if "_source" in res: resdesc = res["_source"] self.reindexActivity(resdesc, oid, index)
def analyse_json(json_text): # consider moving this to be a feature of Transcript in the other module df_witnesses = pd.DataFrame(columns=['html_file_location', 'witness_name', 'syllable_count','lexicon_count', 'sentence_count', 'syllables_per_word', 'gunning_fog', 'smog_index', 'text_standard'], index=[]) trscrpt = json.loads(json_text) if 'witnesses' in trscrpt: witnesses = trscrpt['witnesses'] for s in trscrpt['all_sections']: if 'speaker' in s and 'person' in s['speaker'] and \ s['speaker']['person']['speaker_type']=='witness': witness = witnesses[s['speaker']['person']['name']] witness.setdefault('all_text', []).append(s['spoken_text']) for i, p in enumerate(witnesses): if 'all_text' in witnesses[p]: witness_text = '\n\n'.join(witnesses[p]['all_text']) if len(witness_text) > 0: stats_data = {'html_file_location': trscrpt['html_file_location'], 'witness_name': p, 'syllable_count': textstat.syllable_count(witness_text), 'lexicon_count': textstat.lexicon_count(witness_text), 'sentence_count': textstat.sentence_count(witness_text), 'syllables_per_word': textstat.avg_syllables_per_word(witness_text), 'gunning_fog': textstat.gunning_fog(witness_text), 'smog_index': textstat.smog_index(witness_text), 'text_standard': textstat.text_standard(witness_text)} df_witnesses.loc['witness_%i' % i] = stats_data else: df_witnesses.loc['witness_%i' % i, 'html_file_location'] = trscrpt['html_file_location'] df_witnesses.loc['witness_%i' % i, 'witness_name'] = p else: df_witnesses.loc['witness_%i' % i, 'html_file_location'] = trscrpt['html_file_location'] df_witnesses.loc['witness_%i' % i, 'witness_name'] = p return df_witnesses
def run_textstat(text): #text = """Playing games has always been thought to be important to the development of well-balanced and creative children; however, what part, if any, they should play in the lives of adults has never been researched that deeply. I believe that playing games is every bit as important for adults as for children. Not only is taking time out to play games with our children and other adults valuable to building interpersonal relationships but is also a wonderful way to release built up tension.""" ts_flesch_reading_ease = textstat.flesch_reading_ease(text) ts_smog_index = textstat.smog_index(text) ts_flesch_kincaid_grade = textstat.flesch_kincaid_grade(text) ts_coleman_liau_index = textstat.coleman_liau_index(text) ts_automated_readability_index = textstat.automated_readability_index(text) ts_dale_chall_readability_score = textstat.dale_chall_readability_score( text) ts_difficult_words = textstat.difficult_words(text) ts_linsear_write_formula = textstat.linsear_write_formula(text) ts_gunning_fog = textstat.gunning_fog(text) ts_text_standard = textstat.text_standard(text) return (ts_flesch_reading_ease, ts_smog_index, ts_flesch_kincaid_grade, ts_coleman_liau_index, ts_automated_readability_index, ts_dale_chall_readability_score, ts_difficult_words, ts_linsear_write_formula, ts_gunning_fog, ts_text_standard)
def smog_index(text): score = textstat.smog_index(text) level = 0 if 0 < score < 6: level = 1 elif 6 <= score < 8: level = 2 elif 8 <= score < 10: level = 3 elif 10 <= score < 11: level = 4 elif 11 <= score < 12: level = 5 elif 12 <= score < 13: level = 6 elif 13 <= score: level = 7 return level
def get_feat_readability_metrics(self): # https://github.com/shivam5992/textstat try: test_data = self.webscrap.get_body() out = [] out.append(textstat.flesch_reading_ease(test_data)) out.append(textstat.smog_index(test_data)) out.append(textstat.flesch_kincaid_grade(test_data)) out.append(textstat.coleman_liau_index(test_data)) out.append(textstat.automated_readability_index(test_data)) out.append(textstat.dale_chall_readability_score(test_data)) out.append(textstat.difficult_words(test_data)) out.append(textstat.linsear_write_formula(test_data)) out.append(textstat.gunning_fog(test_data)) #out.append(textstat.text_standard(test_data)) return out, False except Exception as e: config.logger.error(repr(e)) return MISSING_FEATURE * 9, True
def lambda_handler(event, context): text = event['text'] response = {} response['flesch_reading_ease'] = textstat.flesch_reading_ease(text) response['smog_index'] = textstat.smog_index(text) response['flesch_kincaid_grade'] = textstat.flesch_kincaid_grade(text) response['coleman_liau_index'] = textstat.coleman_liau_index(text) response[ 'automated_readability_index'] = textstat.automated_readability_index( text) response[ 'dale_chall_readability_score'] = textstat.dale_chall_readability_score( text) response['difficult_words'] = textstat.difficult_words(text) response['linsear_write_formula'] = textstat.linsear_write_formula(text) response['gunning_fog'] = textstat.gunning_fog(text) response['text_standard'] = textstat.text_standard(text) return respond(None, response)
def feature_readability(essay): syllable_count = textstat.syllable_count(essay) #音节数统计 flesch_reading_ease = textstat.flesch_reading_ease(essay) #文档的易读性0-100之间的分数 smog_index = textstat.smog_index(essay) #烟雾指数,反映文档的易读程度,更精确,更容易计算 flesch_kincaid_index = textstat.flesch_kincaid_grade(essay) #等级分数,年级等级 coleman_liau_index = textstat.coleman_liau_index(essay) #返回文本的年级级别 automated_readability_index = textstat.automated_readability_index(essay) #自动可读性指数,接近理解文本需要的年级 dale_chall_readability_score = textstat.dale_chall_readability_score(essay) #返回年级级别,使用最常见的英文单词 difficult_words = textstat.difficult_words(essay) linsear_write_formula = textstat.linsear_write_formula(essay) #返回文本的年级级别 gunning_fog = textstat.gunning_fog(essay) #迷雾指数, 反映文本的阅读难度 return syllable_count, flesch_reading_ease, smog_index, flesch_kincaid_index, coleman_liau_index, automated_readability_index, dale_chall_readability_score, difficult_words, linsear_write_formula, gunning_fog
def analyseText(): values = request.get_json() required = [ 'inputText' ] if not all(k in values for k in required): return 'Missing values', 400 text = values['inputText'] result = { 'syllable_count': textstat.syllable_count(text), 'lexicon_count': textstat.lexicon_count(text), 'sentence_count': textstat.sentence_count(text), 'flesch_reading_ease': textstat.flesch_reading_ease(text), 'flesch_kincaid_grade': textstat.flesch_kincaid_grade(text), 'gunning_fog': textstat.gunning_fog(text), 'smog_index': textstat.smog_index(text), 'automated_readability_index': textstat.automated_readability_index(text), 'coleman_liau_index': textstat.coleman_liau_index(text), 'linsear_write_formula': textstat.linsear_write_formula(text), 'dale_chall_readability_score': textstat.dale_chall_readability_score(text) }; return jsonify(result), 200
def calculate_readability_measures(id): """ Count the words in doc and update the document. """ es = elasticsearch.Elasticsearch() source = es.get_source(index='beek', doc_type='page', id=id) # count = len(source['content'].split()) try: measures = { 'flesch': textstat.flesch_reading_ease(source['content']), 'smog': textstat.smog_index(source['content']), 'flesch_kincaid': textstat.flesch_kincaid_grade(source['content']), 'coleman_liau': textstat.coleman_liau_index(source['content']), 'readability': textstat.automated_readability_index(source['content']), 'dale_chall': textstat.dale_chall_readability_score(source['content']), 'difficult_words': textstat.difficult_words(source['content']), 'linsear_write_formula': textstat.linsear_write_formula(source['content']), 'gunning_fog': textstat.gunning_fog(source['content']), 'consensus': textstat.readability_consensus(source['content']), } es.update(index='beek', doc_type='page', id=id, body={'doc': { 'measures': measures }}, refresh=True) except Exception as err: pass
def calculate_readability_measures(id): """ Count the words in doc and update the document. """ es = elasticsearch.Elasticsearch() source = es.get_source(index='beek', doc_type='page', id=id) # count = len(source['content'].split()) try: measures = { 'flesch': textstat.flesch_reading_ease(source['content']), 'smog': textstat.smog_index(source['content']), 'flesch_kincaid': textstat.flesch_kincaid_grade(source['content']), 'coleman_liau': textstat.coleman_liau_index(source['content']), 'readability': textstat.automated_readability_index(source['content']), 'dale_chall': textstat.dale_chall_readability_score(source['content']), 'difficult_words': textstat.difficult_words(source['content']), 'linsear_write_formula': textstat.linsear_write_formula(source['content']), 'gunning_fog': textstat.gunning_fog(source['content']), 'consensus': textstat.readability_consensus(source['content']), } es.update(index='beek', doc_type='page', id=id, body={'doc': {'measures': measures}}, refresh=True) except Exception as err: pass
def stats(self, text): test_data = text stats = {} stats['flesch_reading_ease'] = textstat.flesch_reading_ease(test_data) stats['smog'] = textstat.smog_index(test_data) stats['flesch kincaid'] = textstat.flesch_kincaid_grade(test_data) stats['coleman Liau'] = textstat.coleman_liau_index(test_data) stats['automated'] = textstat.automated_readability_index(test_data) stats['dale chall'] = textstat.dale_chall_readability_score(test_data) stats['difficult'] = textstat.difficult_words(test_data) stats['linsear'] = textstat.linsear_write_formula(test_data) stats['gunning_fog'] = textstat.gunning_fog(test_data) stats['standard'] = textstat.text_standard(test_data) stats['charcount'] = textstat.char_count(test_data) stats['lexicon count'] = textstat.lexicon_count(test_data) stats['syllable count'] = textstat.syllable_count(test_data) stats['sentence count'] = textstat.sentence_count(test_data) stats['avg sentence length'] = textstat.avg_sentence_length(test_data) stats['avg_syllables_per_word'] = textstat.avg_syllables_per_word( test_data) stats['avg_letter_per_word'] = textstat.avg_letter_per_word(test_data) stats['avg_sentence_per_word'] = textstat.avg_sentence_per_word( test_data) return stats
def get_readability(self, corpus, type='ari'): readability = None if type == 'ari': readability = textstat.automated_readability_index(corpus) elif type == 'flesch': readability = textstat.flesch_reading_ease(corpus) elif type == 'smog': readability = textstat.smog_index(corpus) elif type == 'flesch_kinciad': readability = textstat.flesch_kincaid_grade(corpus) elif type == 'coleman': readability = textstat.coleman_liau_index(corpus) elif type == 'dale_chall': readability = textstat.dale_chall_readability_score(corpus) elif type == 'difficult_words': readability = textstat.difficult_words(corpus) elif type == 'linsear': readability = textstat.linsear_write_formula(corpus) elif type == 'gunning_fog': readability = textstat.gunning_fog(corpus) elif type == 'readability_conensus': readability = textstat.readability_consensus(corpus) return readability
#main script if __name__ == '__main__': print "TextStat Comparison Script" print "--------------------------" #read in text from the command line #This needs to be fixed to deal/escape special characters textToCheck = raw_input("Please enter the text you would like to analyse: ") #read in text from a file- but what format? print "\n\n" print "Results" print "==============================================" print "==============================================\n" print "Syllable Count: " + str(textstat.syllable_count(textToCheck)) print "Lexicon Count: " + str(textstat.lexicon_count(textToCheck)) #TRUE is default and removes punctuation before counting print "Sentence Count: " + str(textstat.sentence_count(textToCheck)) print "Flesch Reading Ease formula: " + str(textstat.flesch_reading_ease(textToCheck)) print "Flesch-Kincaid Grade Level: " + str(textstat.flesch_kincaid_grade(textToCheck)) print "Fog Scale (Gunning FOG Formula): " + str(textstat.gunning_fog(textToCheck)) print "SMOG Index: " + str(textstat.smog_index(textToCheck)) print "Automated Readability Index: " + str(textstat.automated_readability_index(textToCheck)) print "Coleman-Liau Index: " + str(textstat.coleman_liau_index(textToCheck)) print "Linsear Write Formula: " + str(textstat.linsear_write_formula(textToCheck)) print "Dale-Chall Readability Score: " + str(textstat.dale_chall_readability_score(textToCheck)) print "--------------------------------------------------------------" print "Readability Consensus based upon all the above tests: " + str(textstat.text_standard(textToCheck)) print "\n\n"
def __init__(self, path): """ Create document instance for analysis. Opens and reads document to string raw_text. Textract interprets the document format and opens to plain text string (docx, pdf, odt, txt) Args: path (str): path to file to open, anaylze, close Public attributes: -user: (str) optional string to set username. -path: (str) relative path to document. -abs_path: (str) the absolute path to the document. -file_name: (str) the file name with extension of document (base name). -mime: tbd -guessed_type: makes best guess of mimetype of document. -file_type: returns index[0] from guessed_type. -raw_text: (str) plain text extracted from .txt, .odt, .pdf, .docx, and .doc. -ptext: (str) raw text after a series of regex expressions to eliminate special characters. -text_no_feed: (str) ptext with most new line characters eliminated /n/n stays intact. -sentence_tokens: list of all sentences in a comma separated list derived by nltk. -sentence_count: (int) count of sentences found in list. -passive_sentences: list of passive sentences identified by the passive module. -passive_sentence_count: count of the passive_sentences list. -percent_passive: (float) ratio of passive sentences to all sentences in percent form. -be_verb_analysis: (int) sum number of occurrences of each to be verb (am, is, are, was, were, be, being been). -be_verb_count: tbd -be_verb_analysis: tbd -weak_sentences_all: (int) sum of be verb analysis. -weak_sentences_set: (set) set of all sentences identified as having to be verbs. -weak_sentences_count: (int) count of items in weak_sentences_set. -weak_verbs_to_sentences: (float) proportion of sentences with to be to all sentences in percent (this might not be sound). -word_tokens: list of discreet words in text that breaks contractions up (default nltk tokenizer). -word_tokens_no_punct: list of all words in text including contractions but otherwise no punctuation. -no_punct: (str) full text string without sentence punctuation. -word_tokens_no_punct: uses white-space tokenizer to create a list of all words. -readability_flesch_re: (int) Flesch Reading Ease Score (numeric score) made by textstat module. -readability_smog_index: (int) grade level as determined by the SMOG algorithum made by textstat module. -readability_flesch_kincaid_grade: (int) Flesch-Kincaid grade level of reader made by textstat module. -readability_coleman_liau_index: (int) grade level of reader as made by textstat module. -readability_ari: (int) grade leader of reader determined by automated readability index algorithum implemented by textstat. -readability_linser_write: FIX SPELLING grade level as determined by Linsear Write algorithum implemented by textstat. -readability_dale_chall: (int) grade level based on Dale-Chall readability as determined by textstat. -readability_standard: composite grade level based on readability algorithums. -flesch_re_key: list for interpreting Flesch RE Score. -word_count: word count of document based on white space tokener, this word count should be used. -page_length: (float) page length in decimal format given 250 words per page. -paper_count: (int) number of printed pages given 250 words per page. -parts_of_speech: words with parts of speech tags. -pos_counts: values in word, tag couple grouped in a list (Counter). -pos_total: (int) sum of pos_counts values -pos_freq: (dict) word, ratio of whole -doc_pages: (float) page length based on 250 words per page (warning, this is the second time this attribute is defined). -freq_words: word frequency count not standardized based on the correct word tokener (not ratio, just count). modal_dist: count of auxillary verbs based on word_tokens_no_punct. sentence_count (int): Count the sentence tokens passive_sentences (list): List of all sentences identified as passive passive_sentence_count (int): count of items in passive_sentences be_verb_count (int): count "to be" verbs in text word_tokens_no_punct (list): words separated, stripped of punctuation, made lower case flesch_re_key (str): reading ease score to description freq_words (list or dict): frequency distribution of all words modal_dist (list): frequency distribution of aux verbs """ self.user = "" self.path = path self.abs_path = os.path.abspath(self.path) if os.path.isfile(self.path): self.time_stamp = self.timestamp() self.file_name = os.path.basename(path) self.mime = MimeTypes() self.guessed_type = self.mime.guess_type(self.path) self.file_type = self.guessed_type[0] self.raw_text = textract.process(self.path, encoding="ascii") self.ptext = re.sub(u'[\u201c\u201d]', '"', self.raw_text) self.ptext = re.sub(u"\u2014", "--", self.ptext) self.ptext = re.sub(",", ",", self.ptext) self.ptext = re.sub("—", "--", self.ptext) self.ptext = re.sub("…", "...", self.ptext) self.text_no_feed = self.clean_new_lines(self.ptext) self.sentence_tokens = self.sentence_tokenize(self.text_no_feed) self.sentence_count = len(self.sentence_tokens) self.passive_sentences = passive(self.text_no_feed) self.passive_sentence_count = len(self.passive_sentences) self.percent_passive = (100 * (float(self.passive_sentence_count) / float(self.sentence_count))) self.percent_passive_round = round(self.percent_passive, 2) self.be_verb_analysis = self.count_be_verbs(self.sentence_tokens) self.be_verb_count = self.be_verb_analysis[0] self.weak_sentences_all = self.be_verb_analysis[1] self.weak_sentences_set = set(self.weak_sentences_all) self.weak_sentences_count = len(self.weak_sentences_set) self.weak_verbs_to_sentences = 100 * float( self.weak_sentences_count) / float(self.sentence_count) self.weak_verbs_to_sentences_round = round( self.weak_verbs_to_sentences, 2) self.word_tokens = self.word_tokenize(self.text_no_feed) self.word_tokens_no_punct = \ self.word_tokenize_no_punct(self.text_no_feed) self.no_punct = self.strip_punctuation(self.text_no_feed) # use this! It make lower and strips symbols self.word_tokens_no_punct = self.ws_tokenize(self.no_punct) self.readability_flesch_re = \ textstat.flesch_reading_ease(self.text_no_feed) self.readability_smog_index = \ textstat.smog_index(self.text_no_feed) self.readability_flesch_kincaid_grade = \ textstat.flesch_kincaid_grade(self.text_no_feed) self.readability_coleman_liau_index = \ textstat.coleman_liau_index(self.text_no_feed) self.readability_ari = \ textstat.automated_readability_index(self.text_no_feed) self.readability_linser_write = \ textstat.linsear_write_formula(self.text_no_feed) self.readability_dale_chall = \ textstat.dale_chall_readability_score(self.text_no_feed) self.readability_standard = \ textstat.text_standard(self.text_no_feed) self.flesch_re_desc_str = self.flesch_re_desc( int(textstat.flesch_reading_ease(self.text_no_feed))) self.polysyllabcount = textstat.polysyllabcount(self.text_no_feed) self.lexicon_count = textstat.lexicon_count(self.text_no_feed) self.avg_syllables_per_word = textstat.avg_syllables_per_word( self.text_no_feed) self.avg_sentence_per_word = textstat.avg_sentence_per_word( self.text_no_feed) self.avg_sentence_length = textstat.avg_sentence_length( self.text_no_feed) self.avg_letter_per_word = textstat.avg_letter_per_word( self.text_no_feed) self.difficult_words = textstat.difficult_words(self.text_no_feed) self.rand_passive = self.select_random(self.passive_sentence_count, self.passive_sentences) self.rand_weak_sentence = self.select_random( len(self.weak_sentences), self.weak_sentences) if self.word_tokens_no_punct: self.word_count = len(self.word_tokens_no_punct) self.page_length = float(self.word_count) / float(250) self.paper_count = int(math.ceil(self.page_length)) self.parts_of_speech = pos_tag(self.word_tokens_no_punct) self.pos_counts = Counter( tag for word, tag in self.parts_of_speech) self.pos_total = sum(self.pos_counts.values()) self.pos_freq = dict( (word, float(count) / self.pos_total) for word, count in self.pos_counts.items()) self.doc_pages = float(float(self.word_count) / float(250)) self.freq_words = \ self.word_frequency(self.word_tokens_no_punct) self.modal_dist = self.modal_count(self.word_tokens_no_punct) # self.ws_tokens = self.ws_tokenize(self.text_no_cr) self.pos_count_dict = self.pos_counts.items() # Model - use for any pos self.modals = self.pos_isolate('MD', self.pos_count_dict) self.preposition_count = self.pos_isolate('IN', self.pos_count_dict) self.adjective_count = self.pos_isolate_fuzzy( 'JJ', self.pos_count_dict) self.adverb_count = self.pos_isolate_fuzzy('RB', self.pos_count_dict) self.proper_nouns = self.pos_isolate_fuzzy('NNP', self.pos_count_dict) self.cc_count = self.pos_isolate('CC', self.pos_count_dict) self.commas = self.char_count(",") self.comma_sentences = self.list_sentences(",") self.comma_example = self.select_random(len(self.comma_sentences), self.comma_sentences) self.semicolons = self.char_count(";") self.semicolon_sentences = self.list_sentences(";") self.semicolon_example = self.select_random( len(self.semicolon_sentences), self.semicolon_sentences) self.lint_suggestions = lint(self.raw_text)
def predict_relevance(df): #Loading data into SFrame df[[a for a in df.columns.values]] = df[[a for a in df.columns.values ]].astype(str) tf = gl.SFrame(data=df) tf = tf.unique() #Loading LDA model for topic modeling, pysentiment module for financial sentiment analysis and the relevance prediction model lda = models.ldamodel.LdaModel.load('lda1.model') lm = py.LM() model = gl.load_model('relevance_model_64feat') #Building the LDA model using news articles tf['tokens'] = tf['content'].apply(lambda x: dc.tokenize_doc(x, 'STEM')) tokens_text = [ unicode('|'.join(i), errors='replace').split('|') for i in tf['tokens'] ] dictionary = corpora.Dictionary(tokens_text) corpus = [dictionary.doc2bow(text) for text in tokens_text] ldamat = lda[corpus] #Building LDA topic arrays per topic topic_arrays = np.zeros((30, len(ldamat))) for i, x in enumerate(ldamat): for topic_no, contrib in x: topic_arrays[topic_no, i] = contrib #Adding LDA topic arrays as feature columns as 'Tx' for i, x in enumerate(topic_arrays): tf['T' + str(i)] = gl.SArray(data=x, dtype=float) #Polarity feature extraction from content of news articles tf['Polarity_text'] = tf['content'].apply( lambda x: lm.get_score(lm.tokenize(x))['Polarity']) tf['Subjectivity_text'] = tf['content'].apply( lambda x: lm.get_score(lm.tokenize(x))['Subjectivity']) tf['Positive_text_wc'] = tf['content'].apply( lambda x: lm.get_score(lm.tokenize(x))['Positive']) tf['Negative_text_wc'] = tf['content'].apply( lambda x: lm.get_score(lm.tokenize(x))['Negative']) tf['Total_text_wc'] = tf['content'].apply(lambda x: len(lm.tokenize(x))) tf['Negative_text_rate'] = tf['Negative_text_wc'] / tf['Total_text_wc'] tf['Positive_text_rate'] = tf['Positive_text_wc'] / tf['Total_text_wc'] tf['Max_Polarity'] = tf['content'].apply(lambda x: max( [lm.get_score(lm.tokenize(y))['Polarity'] for y in sent_tokenize(x)])) tf['Min_Polarity'] = tf['content'].apply(lambda x: min( [lm.get_score(lm.tokenize(y))['Polarity'] for y in sent_tokenize(x)])) tf['Sentences_wc'] = tf['content'].apply(lambda x: len(sent_tokenize(x))) tf['Positive_sentrate'] = tf['Positive_text_wc'] / tf['Sentences_wc'] tf['Negative_sentrate'] = tf['Negative_text_wc'] / tf['Sentences_wc'] #Readability feature extraction from content of news articles tf['FRE_text'] = tf['content'].apply( lambda x: textstat.flesch_reading_ease(x)) tf['FRE_tagged_text'] = tf['FRE_text'].apply( lambda x: 1 if x < 100 and x >= 90 else 2 if x < 90 and x >= 80 else 3 if x < 80 and x >= 70 else 4 if x < 70 and x >= 60 else 5 if x < 60 and x >= 50 else 6 if x < 50 and x >= 30 else 7) tf['FK_text'] = tf['content'].apply( lambda x: int(textstat.flesch_kincaid_grade(x))) tf['GFI_text'] = tf['content'].apply(lambda x: textstat.gunning_fog(x)) tf['SMI_text'] = tf['content'].apply(lambda x: textstat.smog_index(x)) tf['CLI_text'] = tf['content'].apply( lambda x: textstat.coleman_liau_index(x)) tf['ARI_text'] = tf['content'].apply( lambda x: int(textstat.automated_readability_index(x))) tf['DC_text'] = tf['content'].apply( lambda x: textstat.dale_chall_readability_score(x)) tf['Difficult_text_wc'] = tf['content'].apply( lambda x: textstat.difficult_words(x)) #Hand-picked quantitative features - # of percentage occurrences percent_pattern = re.compile('((?:|0|[1-9]\d\d?)(?:\.\d{1,3})?)%') tf['Percent_occurrences'] = tf['content'].apply( lambda x: len(percent_pattern.findall(x))) #Polarity feature extraction from news headlines tf['Polarity_head'] = tf['title'].apply( lambda x: lm.get_score(lm.tokenize(x))['Polarity']) tf['Subjectivity_head'] = tf['title'].apply( lambda x: lm.get_score(lm.tokenize(x))['Subjectivity']) tf['Positive_head_wc'] = tf['title'].apply( lambda x: lm.get_score(lm.tokenize(x))['Positive']) tf['Negative_head_wc'] = tf['title'].apply( lambda x: lm.get_score(lm.tokenize(x))['Negative']) tf['Total_head_wc'] = tf['title'].apply(lambda x: len(lm.tokenize(x))) tf['Negative_head_rate'] = tf['Negative_head_wc'] / tf['Total_head_wc'] tf['Positive_head_rate'] = tf['Positive_head_wc'] / tf['Total_head_wc'] #Readability feature extraction from news headlines tf['FRE_head'] = tf['title'].apply( lambda x: textstat.flesch_reading_ease(x)) tf['FRE_tagged_head'] = tf['FRE_head'].apply( lambda x: 1 if x < 100 and x >= 90 else 2 if x < 90 and x >= 80 else 3 if x < 80 and x >= 70 else 4 if x < 70 and x >= 60 else 5 if x < 60 and x >= 50 else 6 if x < 50 and x >= 30 else 7) tf['FK_head'] = tf['title'].apply( lambda x: int(textstat.flesch_kincaid_grade(x))) tf['GFI_head'] = tf['title'].apply(lambda x: textstat.gunning_fog(x)) tf['SMI_head'] = tf['title'].apply(lambda x: textstat.smog_index(x)) tf['CLI_head'] = tf['title'].apply( lambda x: textstat.coleman_liau_index(x)) tf['ARI_head'] = tf['title'].apply( lambda x: int(textstat.automated_readability_index(x))) tf['DC_head'] = tf['title'].apply( lambda x: textstat.dale_chall_readability_score(x)) tf['Difficult_head_wc'] = tf['title'].apply( lambda x: textstat.difficult_words(x)) #Predicting relevance class using these features in sorted order of confidence tf = tf.add_row_number() pred = model.classify(tf) pred = pred.add_row_number() relevant = pred.sort('probability', ascending=False)[:10] relevant = pred[pred['class'] == 1] non_relevant = pred[pred['class'] == 0] if relevant.num_rows() > 10: relevant_news_out = tf.join(relevant).sort('probability', ascending=False)[:10] else: relevant_news = relevant.sort('probability', ascending=False) req_num_non_relevant_news = 10 - relevant.num_rows() non_relevant_news = non_relevant.sort( 'probability')[:req_num_non_relevant_news] relevant_news = relevant_news.append(non_relevant_news) relevant_news_out = tf.join(relevant_news) return relevant_news_out
#!/bin/python import sys, string, os from textstat.textstat import textstat inputfile = '' test_data = "" script_name = sys.argv[0] inputfile = sys.argv[1] with open(inputfile) as myfile: test_data="".join(line.rstrip() for line in myfile) var1 = str(textstat.flesch_reading_ease(test_data)) var2 = str(textstat.smog_index(test_data)) var3 = str(textstat.flesch_kincaid_grade(test_data)) var4 = str(textstat.coleman_liau_index(test_data)) var5 = str(textstat.automated_readability_index(test_data)) var6 = str(textstat.dale_chall_readability_score(test_data)) var7 = str(textstat.difficult_words(test_data)) var8 = str(textstat.linsear_write_formula(test_data)) var9 = str(textstat.gunning_fog(test_data)) var10 = str(textstat.readability_consensus(test_data)) var11 = str(textstat.syllable_count(test_data)) var12 = str(textstat.lexicon_count(test_data, 1)) var13 = str(textstat.sentence_count(test_data)) print(var1 + ',' + var2 + ',' + var3 + ',' + var4 + ',' + var5 + ',' + var6 + ',' + var7 + ',' + var8 + ',' + var9 + ',' + var10 + ',' + var11 + ',' + var12 + ',' + var13)
dcr_total_grade = 0 num_tweets = 0 for tweet in cleanest_tweets: # skipping tweets which are not just contextbased text. if textstat.sentence_count(tweet) < 1: continue flesch_kincaid_grade = textstat.flesch_kincaid_grade(tweet) flesch_kincaid_grades.append(flesch_kincaid_grade) flesch_kincaid_total_grade += flesch_kincaid_grade gunning_fog_grade = textstat.gunning_fog(tweet) gunning_fog_grades.append(gunning_fog_grade) gunning_fog_total_grade += gunning_fog_grade smog_index_grade = textstat.smog_index(tweet) smog_index_grades.append(smog_index_grade) smog_index_total_grade += smog_index_grade ar_index_grade = textstat.automated_readability_index(tweet) ar_index_grades.append(ar_index_grade) ar_index_total_grade += ar_index_grade cl_index_grade = textstat.coleman_liau_index(tweet) cl_index_grades.append(cl_index_grade) cl_index_total_grade += cl_index_grade lwf_grade = textstat.linsear_write_formula(tweet) lwf_grades.append(lwf_grade) lwf_total_grade += lwf_grade
def updateData(self): # Full list of polarity scores self.polscore = self.sid.polarity_scores(self.text) ##### INDEX 0 IN DATA: Text Sentiment ##### # [INDEX 0] Compounded score (0.0 - 1.0) [INDEX 1] Negative connotation rating (0.0 - 1.0), # [INDEX 2] Positive connotation rating (0.0 - 1.0) [INDEX 3] Neutral connotation rating (0.0 - 1.0) self.data.append([ self.polscore['compound'], self.polscore['neg'], self.polscore['pos'], self.polscore['neu'] ]) ##### INDEX 1 IN DATA: Sentence Info ##### # [INDEX 0] Sentence count [INDEX 1] Average sentence length # [INDEX 2] Syllable count [INDEX 3] Overall word count # [INDEX 4] Character count [INDEX 5] Character count without spaces # [INDEX 6] Avg letters per word [INDEX 7] Avg syllables per word self.data.append([ textstat.sentence_count(self.text), textstat.avg_sentence_length(self.text), textstat.syllable_count(self.text), len(self.splList), textstat.char_count(self.text, False), textstat.char_count(self.text, True), textstat.avg_letter_per_word(self.text), textstat.avg_syllables_per_word(self.text) ]) ##### INDEX 2 IN DATA: Flesch Reading Ease ##### # [INDEX 0] Pure score [INDEX 1] Approximate grade [INDEX 2] Normalized (ratio) score # SCORE SCALE: 0 - 100 self.freRaw = textstat.flesch_reading_ease(self.text) self.freStat = min(max(self.freRaw, 0), 100) self.data.append([ round(self.freStat, 3), self.freGrade(self.freStat), round(abs(self.freStat - 100), 2) ]) ##### INDEX 3 IN DATA: Flesch-Kincaid Grade ##### # [INDEX 0] Pure score [INDEX 1] Approximate grade [INDEX 2] Normalized (ratio) score # SCORE SCALE: 0 - 18 self.fkgRaw = textstat.flesch_kincaid_grade(self.text) self.fkgStat = self.adjustScore(self.fkgRaw) self.data.append([ round(self.fkgStat, 3), self.grade(self.fkgStat), round(self.fkgStat / 0.18, 2) ]) ##### INDEX 4 IN DATA: Gunning FOG Index ##### # [INDEX 0] Pure Score [INDEX 1] Approximate grade [INDEX 2] Normalized (ratio) score # SCORE SCALE: 0 - 18 self.fogRaw = textstat.gunning_fog(self.text) self.fogStat = self.adjustScore(self.fogRaw) self.data.append([ round(self.fogStat, 3), self.grade(self.fogStat), round(self.fogStat / 0.18, 2) ]) ##### INDEX 5 IN DATA: SMOG Index ##### # [INDEX 0] Pure Score [INDEX 1] Approximate grade [INDEX 2] Normalized (ratio) score # SCORE SCALE: 0 - 18 self.smogRaw = textstat.smog_index(self.text) self.smogStat = self.adjustScore(self.smogRaw) self.data.append([ round(self.smogStat, 3), self.grade(self.smogStat), round(self.smogStat / 0.18, 2) ]) ##### INDEX 6 IN DATA: Automated Readability Index ##### # [INDEX 0] Pure Score [INDEX 1] Approximate grade [INDEX 2] Normalized (ratio) score # SCORE SCALE: 0 - 14 self.ariRaw = textstat.automated_readability_index(self.text) self.ariStat = min(max(self.ariRaw, 0), 14) self.data.append([ round(self.ariStat, 3), self.ariGrade(ceil(self.ariStat)), round(self.ariStat / 0.14, 2) ]) #13 ##### INDEX 7 IN DATA: Coleman-Liau Index ##### # [INDEX 0] Pure Score [INDEX 1] Approximate grade [INDEX 2] Normalized (ratio) score # SCORE SCALE: 0 - 18 self.cliRaw = textstat.coleman_liau_index(self.text) self.cliStat = self.adjustScore(self.cliRaw) self.data.append([ round(self.cliStat, 3), self.grade(self.cliStat), round(self.cliStat / 0.18, 2) ]) ##### INDEX 8 IN DATA: Linsear Write Index ##### # [INDEX 0] Pure Score [INDEX 1] Approximate grade [INDEX 2] Normalized (ratio) score # SCORE SCALE: 0 - 18 self.lwiRaw = textstat.linsear_write_formula(self.text) self.lwiStat = self.adjustScore(self.lwiRaw) self.data.append([ round(self.lwiStat, 3), self.grade(self.lwiStat), round(self.lwiStat / 0.18, 2) ]) ##### INDEX 9 IN DATA: Dale-Chall Readability Score ##### # [INDEX 0] Pure Score [INDEX 1] Approximate grade [INDEX 2] Normalized (ratio) score # SCORE SCALE: 0 - 10 self.dcrRaw = textstat.dale_chall_readability_score(self.text) self.dcrStat = min(max(self.dcrRaw, 0), 10) self.data.append([ round(self.dcrStat, 3), self.daleChallGrade(self.dcrStat), round(self.dcrStat / 0.1, 2) ]) ##### INDEX 10 IN DATA: Overall Score ##### # [INDEX 0] Pure Score [INDEX 1] Approximate grade [INDEX 2] Normalized (ratio) score # SCORE SCALE: 0 - 20 self.txtRaw = textstat.text_standard(self.text, True) self.txtStd = min(max(self.txtRaw, 0), 20) self.txtInfo = textstat.text_standard(self.text) self.data.append([ round(self.txtStd, 3), self.txtGrade(self.txtStd, self.txtInfo), round(self.txtStd / 0.2, 2) ]) return self.data
sf['Min_Polarity'] = sf['content'].apply(lambda x: min( [lm.get_score(lm.tokenize(y))['Polarity'] for y in sent_tokenize(x)])) sf['Sentences_wc'] = sf['content'].apply(lambda x: len(sent_tokenize(x))) sf['Positive_sentrate'] = sf['Positive_text_wc'] / sf['Sentences_wc'] sf['Negative_sentrate'] = sf['Negative_text_wc'] / sf['Sentences_wc'] #Readability feature extraction from content of news articles sf['FRE_text'] = sf['content'].apply(lambda x: textstat.flesch_reading_ease(x)) sf['FRE_tagged_text'] = sf['FRE_text'].apply( lambda x: 1 if x < 100 and x >= 90 else 2 if x < 90 and x >= 80 else 3 if x < 80 and x >= 70 else 4 if x < 70 and x >= 60 else 5 if x < 60 and x >= 50 else 6 if x < 50 and x >= 30 else 7) sf['FK_text'] = sf['content'].apply( lambda x: int(textstat.flesch_kincaid_grade(x))) sf['GFI_text'] = sf['content'].apply(lambda x: textstat.gunning_fog(x)) sf['SMI_text'] = sf['content'].apply(lambda x: textstat.smog_index(x)) sf['CLI_text'] = sf['content'].apply(lambda x: textstat.coleman_liau_index(x)) sf['ARI_text'] = sf['content'].apply( lambda x: int(textstat.automated_readability_index(x))) sf['DC_text'] = sf['content'].apply( lambda x: textstat.dale_chall_readability_score(x)) sf['Difficult_text_wc'] = sf['content'].apply( lambda x: textstat.difficult_words(x)) #Hand-picked quantitative features - # of percentage occurrences percent_pattern = re.compile('((?:|0|[1-9]\d\d?)(?:\.\d{1,3})?)%') sf['Percent_occurrences'] = sf['content'].apply( lambda x: len(percent_pattern.findall(x))) #Polarity feature extraction from news headlines sf['Polarity_head'] = sf['title'].apply(
reviews['scores'] = reviews['helpful'].apply(compute_score) print reviews['scores'].head(n=10) y = reviews['scores'] Text = reviews['reviewText'] del reviews X = np.zeros((len(Text), 4)) for idx, review in enumerate(Text): if review == '': continue try: X[idx][0] = ts.flesch_reading_ease(review) X[idx][1] = ts.flesch_kincaid_grade(review) X[idx][2] = ts.gunning_fog(review) X[idx][3] = ts.smog_index(review) except Exception as e: print review print e X = StandardScaler().fit_transform(X) print 'Computed X' print X[0] model = SVR(verbose=True) params = {'C': [0.1, 0.5]} grid = GridSearchCV(model, params, cv=10, scoring='mean_squared_error', n_jobs=-1) grid.fit(X, y) print grid.best_score_ print 'RMSE: ' + str(sqrt(abs(grid.best_score_)))
def smog(example): if not example: return 0 return textstat.smog_index(example.review)