def lex_readability(self, text, mode='fre'):

        if mode == 'all':
            fre_score = textstat.flesch_reading_ease(text)
            fog_index = textstat.gunning_fog(text)
            fkg_index = textstat.flesch_kincaid_grade(text)
            dcr_score = textstat.dale_chall_readability_score(text)
            text_standard = textstat.text_standard(text, float_output=True)
            return fre_score, fog_index, fkg_index, dcr_score, text_standard

        if mode == 'fre':
            fre_score = textstat.flesch_reading_ease(text)
            return fre_score

        if mode == 'fog':
            fog_index = textstat.gunning_fog(text)
            return fog_index

        if mode == 'fkg':
            fkg_index = textstat.flesch_kincaid_grade(text)
            return fkg_index

        if mode == 'dcr':
            dcr_score = textstat.dale_chall_readability_score(text)
            return dcr_score

        if mode == 'text_std':
            text_standard = textstat.text_standard(text, float_output=True)
            return text_standard
예제 #2
0
파일: DE_main.py 프로젝트: zzs-NLP/ACS-QG
def get_readibility(text, metric="flesch_kincaid_grade"):
    """
    Return a score which reveals a piece of text's readability level.
    Reference: https://chartbeat-labs.github.io/textacy/getting_started/quickstart.html
               https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests
    """
    if metric == "flesch_kincaid_grade":
        result = textstat.flesch_kincaid_grade(text)
    elif metric == "flesch_reading_ease":
        result = textstat.flesch_reading_ease(text)
    elif metric == "smog_index":
        result = textstat.smog_index(text)
    elif metric == "coleman_liau_index":
        result = textstat.coleman_liau_index(text)
    elif metric == "automated_readability_index":
        result = textstat.automated_readability_index(text)
    elif metric == "dale_chall_readability_score":
        result = textstat.dale_chall_readability_score(text)
    elif metric == "difficult_words":
        result = textstat.difficult_words(text)
    elif metric == "linsear_write_formula":
        result = textstat.linsear_write_formula(text)
    elif metric == "gunning_fog":
        result = textstat.gunning_fog(text)
    elif metric == "text_standard":
        result = textstat.text_standard(text)
    else:
        print("ERROR: Please select correct metric!")
        result = None
    return result
예제 #3
0
def textstat_stats(text):
    doc_length = len(text.split()) 
    flesch_ease = ts.flesch_reading_ease(text) #Flesch Reading Ease Score
    flesch_grade = ts.flesch_kincaid_grade(text) #Flesch-Kincaid Grade Level
    gfog = ts.gunning_fog(text) # FOG index, also indicates grade level
#    smog = ts.smog_index(text) # SMOG index, also indicates grade level, only useful on 30+ sentences
    auto_readability = ts.automated_readability_index(text) #approximates the grade level needed to comprehend the text.
    cl_index = ts.coleman_liau_index(text) #grade level of the text using the Coleman-Liau Formula.
    lw_formula = ts.linsear_write_formula(text) #grade level using the Linsear Write Formula.
    dcr_score = ts.dale_chall_readability_score(text) #uses a lookup table of the most commonly used 3000 English words
#    text_standard = ts.text_standard(text, float_output=False) # summary of all the grade level functions
    syll_count = ts.syllable_count(text, lang='en_US')
    syll_count_scaled = syll_count / doc_length
    lex_count = ts.lexicon_count(text, removepunct=True)
    lex_count_scaled = lex_count / doc_length
    idx = ['flesch_ease', 'flesch_grade','gfog',
           'auto_readability','cl_index','lw_formula',
           'dcr_score', 
#           'text_standard', 
           'syll_count', 'lex_count']
    return pd.Series([flesch_ease, flesch_grade, gfog, 
                      auto_readability, cl_index, lw_formula, 
                      dcr_score, 
#                      text_standard, 
                      syll_count_scaled, lex_count_scaled], index = idx)
예제 #4
0
def get_stats(text):
    fre = textstat.flesch_reading_ease(text)
    smog = textstat.smog_index(text)
    fkg = textstat.flesch_kincaid_grade(text)
    cli = textstat.coleman_liau_index(text)
    ari = textstat.automated_readability_index(text)
    dcr = textstat.dale_chall_readability_score(text)
    diff_words = textstat.difficult_words(text)
    lwf = textstat.linsear_write_formula(text)
    gunn_fog = textstat.gunning_fog(text)
    consolidated_score = textstat.text_standard(text)

    doc_length = len(text)  # think about excluding spaces?
    quote_count = text.count('"')

    stats = {
        "flesch_reading_ease": fre,
        "smog_index": smog,
        "flesch_kincaid_grade": fkg,
        "coleman_liau_index": cli,
        "automated_readability_index": ari,
        "dale_chall_readability_score": dcr,
        "difficult_words": diff_words,
        "linsear_write_formula": lwf,
        "gunning_fog": gunn_fog,
        "consolidated_score": consolidated_score,
        "doc_length": doc_length,
        "quote_count": quote_count
    }
    return stats
예제 #5
0
def score(full):
    st.header(textstat.flesch_reading_ease(full))
    st.write('Flesch Reading Ease Score')
    text = """90-100 Very Easy,70-79 Fairly Easy,60-69 Standard,50-59Fairly Difficult,30-49 Difficult,0-29 Very 
    Confusing """
    st.write(text, key=1)

    st.header(textstat.smog_index(full))
    st.write('Smog Index Score')
    text = "Returns the SMOG index of the given text.This is a grade formula in that a score of 9.3 means that a ninth " \
           "grader would be able to read the document.Texts of fewer than 30 sentences are statistically invalid, " \
           "because the SMOG formula was normed on 30-sentence samples. textstat requires at least 3 sentences for a " \
           "result. "
    st.write(text, key=2)

    st.header(textstat.dale_chall_readability_score(full))
    st.write('Dale Chall Readability Score')
    text = """Different from other tests, since it uses a lookup table of the most commonly used 3000 English words. 
            Thus it returns the grade level using the New Dale-Chall Formula.
            4.9 or lower	average 4th-grade student or lower
            5.0–5.9	average 5th or 6th-grade student
            6.0–6.9	average 7th or 8th-grade student
            7.0–7.9	average 9th or 10th-grade student
            8.0–8.9	average 11th or 12th-grade student
            9.0–9.9	average 13th to 15th-grade (college) student"""
    st.write(text, key=3)
def terms_and_weights(sample):
    sentences = list()
    file_path = f"data/Job Bulletins/{sample}"
    with open(file_path) as file:
        reading_score = textstat.flesch_reading_ease(file_path)
        reading_score_2 = textstat.dale_chall_readability_score(file_path)
        for line in file:
            for l in re.split(r"\.\s|\?\s|\!\s|\n", line):
                if l:
                    sentences.append(l)
    cvec = CountVectorizer(stop_words='english',
                           min_df=3,
                           max_df=0.5,
                           ngram_range=(1, 2))
    sf = cvec.fit_transform(sentences)
    transformer = TfidfTransformer()
    transformed_weights = transformer.fit_transform(sf)
    weights = np.asarray(transformed_weights.mean(axis=0)).ravel().tolist()
    weights_df = pd.DataFrame({
        'term': cvec.get_feature_names(),
        'weight': weights
    })
    weights_df = weights_df.sort_values(by='weight', ascending=False).head(10)
    myList = {
        "term": weights_df.term.tolist(),
        "weight": weights_df.weight.tolist(),
        "scores": [reading_score, reading_score_2]
    }
    file.close()
    return jsonify(myList)
예제 #7
0
def calculate_stats(data_folder):
    """Calculate stat of test.json file in a folder"""
    data_folder = Path(data_folder)
    for dataset in dataset_fields:
        print(f"loading {dataset}")
        field = dataset_fields[dataset]["text"].strip()
        sentences = []
        for item in json.load(open(data_folder / dataset / "test.json")):
            sentences.append(item[field][-1] if type(item[field]) == list else item[field])

        text = " ".join(sentences)
        lex_count = textstat.lexicon_count(text)
        print(lex_count)
        unique_words = count_words(text)
        print(f"all unique {len(unique_words)}")

        lower_unique_words = count_words(text, casing="lower")
        print(f"lowercase unique {len(lower_unique_words)}")

        upper_unique_words = count_words(text, casing="upper")
        print(f"uppercase unique {len(upper_unique_words)}")

        print(f"ratio {len(upper_unique_words) / len(unique_words)}")

        text_standard = textstat.text_standard(text, float_output=True)
        print(f"text_standard: {text_standard}")

        dale_chall_readability_score = textstat.dale_chall_readability_score(text)
        print(f"dale_chall_readability_score: {dale_chall_readability_score}")

        flesch_kincaid_grade = textstat.flesch_kincaid_grade(text)
        print(f"flesch_kincaid_grade: {flesch_kincaid_grade}")
    def readability_measures(self, as_dict=False):
        """
        Return the BOFIR score as well as other classic readability formulas for the paragraph.
        
        Parameters
        ----------
        as_dict : boolean
            Defines if output is a dataframe or dict
            
        Returns
        -------
        d: DataFrame
            DataFrame with the BOFIR score and additional readability measures
            
        """
        flesch = self.flesch
        smog = textstat.smog_index(self.paragraph)
        dale_chall = textstat.dale_chall_readability_score(self.paragraph)
        fog = textstat.gunning_fog(self.paragraph)
        bofir_5cat = self.bofir(cat5=True)
        bofir_3cat = self.bofir(cat5=False)

        d = {
            'bofir_5cat': bofir_5cat,
            'bofir_3cat': bofir_3cat,
            'fog': fog,
            'dale_chall': dale_chall,
            'smog': smog,
            'flesch': flesch
        }

        if as_dict:
            return d
        else:
            return pd.DataFrame(d, index=['readability_score'])
예제 #9
0
def seven_test(processed_essay):
    """
    score which is assigned to every script in on the basis of some predifened fomulas
    These scores are known as readability score.
    flesch_score,gunning_index,kincaid_grade,liau_index,automated_readability_index,dale_readability_score,difficult_word,linsear_write
    :param processed_essay:
    :return:flesch_score,gunning_index,kincaid_grade,liau_index,automated_readability_index,dale_readability_score,difficult_word,linsear_write
    """
    flesch_score = ["FS"]
    gunning_index = ["GI"]
    kincaid_grade = ["KG"]
    liau_index = ["LI"]
    automated_readability_index = ["ARI"]
    dale_readability_score = ["DLS"]
    difficult_word = ["DW"]
    linsear_write = ["LW"]
    for v in processed_essay:
        flesch_score.append(textstat.flesch_reading_ease(str(v)))
        gunning_index.append(textstat.gunning_fog(str(v)))
        kincaid_grade.append(textstat.flesch_kincaid_grade(str(v)))
        liau_index.append(textstat.coleman_liau_index(str(v)))
        automated_readability_index.append(textstat.automated_readability_index(str(v)))
        dale_readability_score.append(textstat.dale_chall_readability_score(str(v)))
        difficult_word.append(textstat.difficult_words(str(v)))
        linsear_write.append(textstat.linsear_write_formula(str(v)))
    return flesch_score,gunning_index,kincaid_grade,liau_index,automated_readability_index,dale_readability_score,difficult_word,linsear_write
예제 #10
0
def get_readability_score(text, metric="flesch"):
    global tknzr, DIFFICULT

    text = text.replace("’", "'")

    # https://pypi.org/project/textstat/
    if metric == "flesch":
        return textstat.flesch_reading_ease(text)
    elif metric == "smog":
        return textstat.smog_index(text)
    elif metric == "coleman_liau_index":
        return textstat.coleman_liau_index(text)
    elif metric == "automated_readability_index":
        return textstat.automated_readability_index(text)
    elif metric == "dale_chall_readability_score":
        return textstat.dale_chall_readability_score(text)
    elif metric == "difficult_words":
        nb_difficult = 0
        nb_easy = 0
        for w in set(tknzr.tokenize(text.lower())):
            if w not in EASY_WORDS and len(w) >= 6:
                nb_difficult += 1
            else:
                nb_easy += 1
        return 100 * nb_difficult / (nb_difficult + nb_easy)
        #return textstat.difficult_words(text)#/len(text.split())
    elif metric == "linsear_write_formula":
        return textstat.linsear_write_formula(text)
    elif metric == "gunning_fog":
        return textstat.gunning_fog(text)
    elif metric == "avg_word_length":
        words = tknzr.tokenize(text)
        words = [w for w in words if w not in misc_utils.PUNCT]
        if len(words) == 0: return 0
        return np.average([len(w) for w in words])
예제 #11
0
def analyze():
    print(request)
    str_to_read = request.data.decode("utf-8").strip()

    report = {
        "flesch-reading-ease":
        textstat.flesch_reading_ease(str_to_read),
        "smog-index":
        textstat.smog_index(str_to_read),
        "flesch-kincaid-grade":
        textstat.flesch_kincaid_grade(str_to_read),
        "coleman-liau-index":
        textstat.coleman_liau_index(str_to_read),
        "automated-readability-index":
        textstat.automated_readability_index(str_to_read),
        "dale-chall-readability-score":
        textstat.dale_chall_readability_score(str_to_read),
        "difficult-words":
        textstat.difficult_words(str_to_read),
        "linsear-write-formula":
        textstat.linsear_write_formula(str_to_read),
        "gunning-fog":
        textstat.gunning_fog(str_to_read),
        "text-standard":
        textstat.text_standard(str_to_read)
    }
    return decorate_response(jsonify(report))
예제 #12
0
def readability(queries):
    scores = pd.DataFrame(columns=[
        'Flesch', 'Smog', 'Flesch grade', 'Coleman', 'Automated', 'Dale',
        'Difficult', 'Linsear', 'Gunning', 'Text Standard'
    ])

    scores = {
        'Flesch': [],
        'Smog': [],
        'Flesch grade': [],
        'Coleman': [],
        'Automated': [],
        'Dale': [],
        'Difficult': [],
        'Linsear': [],
        'Gunning': [],
        'Text Standard': []
    }
    for line in queries:
        # results = readability.getmeasures(line, lang='en')
        # frescores.append(results['readability grades']['FleschReadingEase'])
        # line = 'yao family wines . yao family wines is a napa valley producer founded in 2011 by yao ming , the chinese-born , five-time nba all star . now retired from the houston rockets , yao ming is the majority owner in yao family wines , which has entered the wine market with a luxury cabernet sauvignon sourced from napa valley vineyards .'
        scores['Flesch'].append(textstat.flesch_reading_ease(line))
        scores['Smog'].append(textstat.smog_index(line))
        scores['Flesch grade'].append(textstat.flesch_kincaid_grade(line))
        scores['Coleman'].append(textstat.coleman_liau_index(line))
        scores['Automated'].append(textstat.automated_readability_index(line))
        scores['Dale'].append(textstat.dale_chall_readability_score(line))
        scores['Difficult'].append(textstat.difficult_words(line))
        scores['Linsear'].append(textstat.linsear_write_formula(line))
        scores['Gunning'].append(textstat.gunning_fog(line))
        scores['Text Standard'].append(
            textstat.text_standard(line, float_output=True))

    return scores
예제 #13
0
def getReadabilityMetrics(test_data):
    '''
        for a given article IN TEXT FORMAT, returns its readability metrics
        Uses textstat library, please install it
    '''
    metric = {
        "flesch_reading_ease":
        textstat.flesch_reading_ease(test_data),
        "smog_index":
        textstat.smog_index(test_data),
        "flesch_kincaid_grade":
        textstat.flesch_kincaid_grade(test_data),
        "coleman_liau_index":
        textstat.coleman_liau_index(test_data),
        "automated_readability_index":
        textstat.automated_readability_index(test_data),
        "dale_chall_readability_score":
        textstat.dale_chall_readability_score(test_data),
        "difficult_words":
        textstat.difficult_words(test_data),
        "linsear_write_formula":
        textstat.linsear_write_formula(test_data),
        "gunning_fog":
        textstat.gunning_fog(test_data),
        "text_standard":
        textstat.text_standard(test_data)
    }
    return metric
예제 #14
0
def readability_scores_mp(data):
    result_dict, idx, text = data

  #  flesch_reading_ease =  textstat.flesch_reading_ease(text)
    flesch_kincaid_grade =  textstat.flesch_kincaid_grade(text)
    dale_chall_readability_score =  textstat.dale_chall_readability_score(text) 

    result_dict[idx] = [flesch_kincaid_grade, dale_chall_readability_score]
예제 #15
0
 def getReadability(df):
     import textstat
     df['ARI'] = df.headline_text.apply(
         lambda x: textstat.automated_readability_index(x))
     df['DCR'] = df.headline_text.apply(
         lambda x: textstat.dale_chall_readability_score(x))
     df['TS'] = df.headline_text.apply(
         lambda x: textstat.text_standard(x, float_output=True))
     return df
예제 #16
0
def metrics(sentence):
    fk = round(flesch_kincaid_grade(sentence), 3)
    gf = round(gunning_fog(sentence), 3)
    dc = round(dale_chall_readability_score(sentence), 3)

    fk_label = grade_label(round(fk))
    gf_label = grade_label(round(gf))
    dc_label = grade_label(dale_chall_norm(round(dc)))

    return (fk, gf, dc, fk_label, gf_label, dc_label)
예제 #17
0
def compute_readability_stats(text):
    """
    Compute reading statistics of the given text
    Reference: https://github.com/shivam5992/textstat

    Parameters
    ==========
    text: str, input section or abstract text
    """
    try:
        readability_dict = {
            'flesch_reading_ease':
            textstat.flesch_reading_ease(text),
            'smog':
            textstat.smog_index(text),
            'flesch_kincaid_grade':
            textstat.flesch_kincaid_grade(text),
            'coleman_liau_index':
            textstat.coleman_liau_index(text),
            'automated_readability_index':
            textstat.automated_readability_index(text),
            'dale_chall':
            textstat.dale_chall_readability_score(text),
            'difficult_words':
            textstat.difficult_words(text),
            'linsear_write':
            textstat.linsear_write_formula(text),
            'gunning_fog':
            textstat.gunning_fog(text),
            'text_standard':
            textstat.text_standard(text),
            'n_syllable':
            textstat.syllable_count(text),
            'avg_letter_per_word':
            textstat.avg_letter_per_word(text),
            'avg_sentence_length':
            textstat.avg_sentence_length(text)
        }
    except:
        readability_dict = {
            'flesch_reading_ease': None,
            'smog': None,
            'flesch_kincaid_grade': None,
            'coleman_liau_index': None,
            'automated_readability_index': None,
            'dale_chall': None,
            'difficult_words': None,
            'linsear_write': None,
            'gunning_fog': None,
            'text_standard': None,
            'n_syllable': None,
            'avg_letter_per_word': None,
            'avg_sentence_length': None
        }
    return readability_dict
예제 #18
0
    def score(self, strText):
        self.automated_readability_index = textstat.automated_readability_index(
            strText)
        self.str_automated_readability_index = self.grade(
            self.automated_readability_index)

        self.coleman_liau_index = textstat.coleman_liau_index(strText)
        self.str_coleman_liau_index = self.grade(self.coleman_liau_index)

        self.dale_chall_readability_score = textstat.dale_chall_readability_score(
            strText)
        if self.dale_chall_readability_score >= 9.0:
            self.str_dale_chall_readability_score = ' | ' + '13th to 15th grade (college)'
        elif self.dale_chall_readability_score >= 8.0:
            self.str_dale_chall_readability_score = ' | ' + '11th to 12th grade'
        elif self.dale_chall_readability_score >= 7.0:
            self.str_dale_chall_readability_score = ' | ' + '9th to 10th grade'
        elif self.dale_chall_readability_score >= 6.0:
            self.str_dale_chall_readability_score = ' | ' + '7th to 8th grade'
        elif self.dale_chall_readability_score >= 5.0:
            self.str_dale_chall_readability_score = ' | ' + '5th to 6th grade'
        else:
            self.str_dale_chall_readability_score = ' | ' + '4th grade or lower'

        self.difficult_words = textstat.difficult_words(strText)

        self.flesch_kincaid_grade = textstat.flesch_kincaid_grade(strText)
        self.str_flesch_kincaid_grade = self.grade(self.flesch_kincaid_grade)

        self.flesch_reading_ease = textstat.flesch_reading_ease(strText)
        if self.flesch_reading_ease >= 90:
            self.str_flesch_reading_ease = ' | ' + 'Very Easy'
        elif self.flesch_reading_ease >= 80:
            self.str_flesch_reading_ease = ' | ' + 'Easy'
        elif self.flesch_reading_ease >= 70:
            self.str_flesch_reading_ease = ' | ' + 'Fairly Easy'
        elif self.flesch_reading_ease >= 60:
            self.str_flesch_reading_ease = ' | ' + 'Standard'
        elif self.flesch_reading_ease >= 50:
            self.str_flesch_reading_ease = ' | ' + 'Fairly Difficult'
        elif self.flesch_reading_ease >= 30:
            self.str_flesch_reading_ease = ' | ' + 'Difficult'
        else:
            self.str_flesch_reading_ease = ' | ' + 'Very Confusing'

        self.gunning_fog = textstat.gunning_fog(strText)
        self.str_gunning_fog = self.grade(self.gunning_fog)

        self.linsear_write_formula = textstat.linsear_write_formula(strText)
        self.str_linsear_write_formula = self.grade(self.linsear_write_formula)

        self.smog_index = textstat.smog_index(strText)
        self.str_smog_index = self.grade(self.smog_index)

        self.text_standard = textstat.text_standard(strText)
예제 #19
0
 def generate_score(self, text):
     self.flesch_reading_grade = ts.flesch_reading_ease(text)
     self.flesch_reading_grade_consensus = readability_test_consensus(self.flesch_reading_grade, flesch_ease_grading_system)
     self.flesch_kincaid_grade = ts.flesch_kincaid_grade(text)
     self.flesch_kincaid_grade_consensus = readability_test_consensus(self.flesch_kincaid_grade, us_grade_level_system_age)
     self.dale_chall_grade = ts.dale_chall_readability_score(text)
     self.dale_chall_grade_consensus = readability_test_consensus(self.dale_chall_grade, dale_chall_system)
     self.smog_grade = ts.smog_index(text)
     self.ari_grade = ts.automated_readability_index(text)
     """  self.ari_grade_consensus = readability_test_consensus(self.ari_grade, us_grade_level_system_level) """
     self.coleman_liau_grade = ts.coleman_liau_index(text)
     pass
예제 #20
0
    def process(self, df):

        t0 = time()
        print("\n---Generating Readability Features:---\n")

        def lexical_diversity(text):
            words = nltk.tokenize.word_tokenize(text.lower())
            word_count = len(words)
            vocab_size = len(set(words))
            diversity_score = vocab_size / word_count
            return diversity_score

        def get_counts(text, word_list):
            words = nltk.tokenize.word_tokenize(text.lower())
            count = 0
            for word in words:
                if word in word_list:
                    count += 1
            return count

        df['flesch_reading_ease'] = df['articleBody'].map(lambda x: textstat.flesch_reading_ease(x))
        df['smog_index'] = df['articleBody'].map(lambda x: textstat.smog_index(x))
        df['flesch_kincaid_grade'] = df['articleBody'].map(lambda x: textstat.flesch_kincaid_grade(x))
        df['coleman_liau_index'] = df['articleBody'].map(lambda x: textstat.coleman_liau_index(x))
        df['automated_readability_index'] = df['articleBody'].map(lambda x: textstat.automated_readability_index(x))
        df['dale_chall_readability_score'] = df['articleBody'].map(lambda x: textstat.dale_chall_readability_score(x))
        df['difficult_words'] = df['articleBody'].map(lambda x: textstat.difficult_words(x))
        df['linsear_write_formula'] = df['articleBody'].map(lambda x: textstat.linsear_write_formula(x))
        df['gunning_fog'] = df['articleBody'].map(lambda x: textstat.gunning_fog(x))
        df['i_me_myself'] = df['articleBody'].apply(get_counts,args = (['i', 'me', 'myself'],))
        df['punct'] = df['articleBody'].apply(get_counts,args = ([',','.', '!', '?'],))
        df['lexical_diversity'] = df['articleBody'].apply(lexical_diversity)

        feats = ['flesch_reading_ease', 'smog_index', 'flesch_kincaid_grade',
        'coleman_liau_index', 'automated_readability_index', 
        'dale_chall_readability_score', 'difficult_words', 'linsear_write_formula',
        'gunning_fog', 'i_me_myself', 'punct', 'lexical_diversity'
        ]


        outfilename_xReadable = df[feats].values

        with open('../saved_data/read.pkl', 'wb') as outfile:
            pickle.dump(feats, outfile, -1)
            pickle.dump(outfilename_xReadable, outfile, -1)

        print ('readable features saved in read.pkl')
        
        print('\n---Readability Features is complete---')
        print("Time taken {} seconds\n".format(time() - t0))
        
        return 1
def cal_readability(target, source):
    import pandas as pd
    tf_r_es = [textstat.flesch_reading_ease(t) for t in target]
    tf_k_gs = [textstat.flesch_kincaid_grade(t) for t in target]
    td_c_rs = [textstat.dale_chall_readability_score(t) for t in target]
    
    sf_r_es = [textstat.flesch_reading_ease(t) for t in source]
    sf_k_gs = [textstat.flesch_kincaid_grade(t) for t in source]
    sd_c_rs = [textstat.dale_chall_readability_score(t) for t in source]
    
    diff_r_es = [np.abs(tf_r_es[i] - sf_r_es[i]) for i in range(len(tf_r_es))]
    diff_k_gs = [np.abs(tf_k_gs[i] - sf_k_gs[i]) for i in range(len(tf_k_gs))]
    difd_c_rs = [np.abs(td_c_rs[i] - sd_c_rs[i]) for i in range(len(td_c_rs))]
    
    return {"Flesch ease mean gen": np.mean(tf_r_es), \
            "Flesch ease mean orig": np.mean(sf_r_es), \
            "Flesch ease mean diff": np.mean(diff_r_es), \
            
            "Flesch grade mean gen": np.mean(tf_k_gs), \
            "Flesch grade mean orig": np.mean(sf_k_gs), \
            "Flesch grade mean diff": np.mean(diff_k_gs), \
            
            "Dale Chall Readability V2 mean gen": np.mean(td_c_rs), \
            "Dale Chall Readability V2 mean orig": np.mean(sd_c_rs), \
            "Dale Chall Readability V2 mean diff": np.mean(difd_c_rs), \
           },\
            \
            {"Flesch ease std dev gen": np.std(tf_r_es), \
            "Flesch ease std dev orig": np.std(sf_r_es), \
            "Flesch ease std dev diff": np.std(diff_r_es), \
            
            "Flesch grade std dev gen": np.std(tf_k_gs), \
            "Flesch grade std dev orig": np.std(sf_k_gs), \
            "Flesch grade std dev diff": np.std(diff_k_gs), \
            
            "Dale Chall Readability V2 std dev gen": np.std(td_c_rs),\
            "Dale Chall Readability V2 std dev orig": np.std(sd_c_rs),\
            "Dale Chall Readability V2 std dev diff": np.std(difd_c_rs)\
           }
예제 #22
0
 def readability_scores(self, text):
     self.ari = textstat.automated_readability_index(text)
     self.flesch_kincaid_grade = textstat.flesch_kincaid_grade(text)
     self.coleman_liau_index = textstat.coleman_liau_index(text)
     self.dale_chall_readability_score = textstat.dale_chall_readability_score(
         text)
     self.flesch_reading_ease = textstat.flesch_reading_ease(text)
     self.gunning_fog = textstat.gunning_fog(text)
     self.linsear_write_formula = textstat.linsear_write_formula(text)
     self.lix = textstat.lix(text)
     self.rix = textstat.rix(text)
     self.smog_index = textstat.smog_index(text)
     self.text_standard = textstat.text_standard(text)
예제 #23
0
def get_readability_stats(text):
    return {
        'flesch_reading_ease': textstat.flesch_reading_ease(text),
        'smog_index': textstat.smog_index(text),
        'flesch_kincaid_grade': textstat.flesch_kincaid_grade(text),
        'coleman_liau_index': textstat.coleman_liau_index(text),
        'automated_readability_index':
        textstat.automated_readability_index(text),
        'dale_chall_readability_score':
        textstat.dale_chall_readability_score(text),
        'linsear_write_formula': textstat.linsear_write_formula(text),
        'gunning_fog': textstat.gunning_fog(text),
        'text_standard': textstat.text_standard(text, float_output=True),
    }
def lisibilty(text):

    f_lis = ([
        textstat.syllable_count(str(text), lang='en_arabic'),
        textstat.lexicon_count(str(text), removepunct=True),
        textstat.sentence_count(str(text)),
        textstat.flesch_reading_ease(str(text)),
        textstat.flesch_kincaid_grade(str(text)),
        textstat.gunning_fog(str(text)),
        textstat.smog_index(str(text)),
        textstat.automated_readability_index(str(text)),
        textstat.coleman_liau_index(str(text)),
        textstat.linsear_write_formula(str(text)),
        textstat.dale_chall_readability_score(str(text))
    ])
    return f_lis
예제 #25
0
def analyze_vocab(text):
    return {
        'num_words': textstat.lexicon_count(text),
        'flesch_reading_ease': textstat.flesch_reading_ease(text),
        'smog_index': textstat.smog_index(text),
        'flesch_kincaid_grade': textstat.flesch_kincaid_grade(text),
        'coleman_liau_index': textstat.coleman_liau_index(text),
        'automated_readability_index':
        textstat.automated_readability_index(text),
        'dale_chall_readability_score':
        textstat.dale_chall_readability_score(text),
        'difficult_words': textstat.difficult_words(text),
        'linsear_write_formula': textstat.linsear_write_formula(text),
        'gunning_fog': textstat.gunning_fog(text),
        'text_standard': textstat.text_standard(text, float_output=True)
    }
예제 #26
0
def vocab_check(text):
    
    #Construct dictionary
    vocab_results = {'dale_chall_readability_score': dale_chall_readability_score(text),
                     'smog_index': smog_index(text), 'gunning_fog': gunning_fog(text),
                     'flesch_reading_ease': flesch_reading_ease(text),
                     'flesch_kincaid_grade': flesch_kincaid_grade(text),
                     'linsear_write_formula': linsear_write_formula(text),
                     'coleman_liau_index': coleman_liau_index(text),
                     'automated_readability_index': automated_readability_index(text),
                     'yule_vocab_richness': yule(text),
                     'total_score': text_standard(text, float_output=True)}
                     
    diff_words, easy_word_dict = difficult_words(text)
    
    return(vocab_results, diff_words, easy_word_dict)
def add_features(row):
    '''Feature engineering via NLP.'''
    text = row.text
    doc = nlp(text)
    lemmas = list()
    entities = list()
    for token in doc:
        if token.text == ':':
            row['has_colon'] = 1
        if token.text == ';':
            row['has_semicolon'] = 1
        if token.text == '-':
            row['has_dash'] = 1
        if token.text.lower() == 'whom':
            row['whom'] = 1
        if token.text[-3:] == 'ing':
            row['num_ings'] += 1
        if token.text.lower() == 'had':
            row['has_had'] = 1
        pos = token.pos_
        row[pos] += 1
        if token.is_stop or not token.is_alpha:
            continue
        lemma = token.lemma_.strip().lower()
        if lemma:
            lemmas.append(lemma)
    for ent in doc.ents:
        entities.append(ent.text)
    lemmas = ' '.join(lemmas)
    blob = TextBlob(text)
    row['subjectivity'] = blob.sentiment.subjectivity
    row['polarity'] = blob.sentiment.polarity
    row['starts_conj'] = int(doc[0].pos_ == 'CONJ')
    row['ends_prep'] = int(doc[0].pos_ == 'PREP')
    row['entities'] = entities
    row['lemmas'] = lemmas
    row['raw_text_length'] = len(text)
    row['num_words'] = len(doc)
    row['avg_word_len'] = row.raw_text_length / row.num_words
    row['vector_avg'] = np.mean(nlp(lemmas).vector)
    row['num_ings'] /= row['num_words']
    row['rhyme_frequency'] = rhyme_frequency(row['text'])
    row['dale_chall'] = textstat.dale_chall_readability_score(row['text'])
    row['FleischReadingEase'] = textstat.flesch_reading_ease(row['text'])
    row['lexicon'] = textstat.lexicon_count(row['text'])
    row['word_diversity'] = row.lexicon / row.num_words
    return row
예제 #28
0
def textstat_stats(text):
    difficulty = textstat.flesch_reading_ease(text)
    grade_difficulty = textstat.flesch_kincaid_grade(text)
    gfog = textstat.gunning_fog(text)
    smog = textstat.smog_index(text)
    ari = textstat.automated_readability_index(text)
    cli = textstat.coleman_liau_index(text)
    lwf = textstat.linsear_write_formula(text)
    dcrs = textstat.dale_chall_readability_score(text)
    idx = [
        'difficulty', 'grade_difficulty', 'gfog', 'smog', 'ari', 'cli', 'lwf',
        'dcrs'
    ]

    return pd.Series(
        [difficulty, grade_difficulty, gfog, smog, ari, cli, lwf, dcrs],
        index=idx)
예제 #29
0
 def get_readability_features(self):
     sent_tokens = text_tokenizer(self.raw_text,
                                  replace_url_flag=True,
                                  tokenize_sent_flag=True)
     sentences = [' '.join(sent) + '\n' for sent in sent_tokens]
     sentences = ''.join(sentences)
     self.syllable_count = textstat.syllable_count(sentences)
     self.flesch_reading_ease = textstat.flesch_reading_ease(sentences)
     self.flesch_kincaid_grade = textstat.flesch_kincaid_grade(sentences)
     self.fog_scale = textstat.gunning_fog(sentences)
     self.smog = textstat.smog_index(sentences)
     self.automated_readability = textstat.automated_readability_index(
         sentences)
     self.coleman_liau = textstat.coleman_liau_index(sentences)
     self.linsear_write = textstat.linsear_write_formula(sentences)
     self.dale_chall_readability = textstat.dale_chall_readability_score(
         sentences)
     self.text_standard = textstat.text_standard(sentences)
예제 #30
0
 def score_text(self, test_data):
     score = {}
     score['flesch_reading_ease'] = textstat.flesch_reading_ease(test_data)
     score['smog_index'] = textstat.smog_index(test_data)
     score['flesch_kincaid_grade'] = textstat.flesch_kincaid_grade(
         test_data)
     score['coleman_liau_index'] = textstat.coleman_liau_index(test_data)
     score[
         'automated_readability_index'] = textstat.automated_readability_index(
             test_data)
     score[
         'dale_chall_readability_score'] = textstat.dale_chall_readability_score(
             test_data)
     score['difficult_words'] = textstat.difficult_words(test_data)
     score['linsear_write_formula'] = textstat.linsear_write_formula(
         test_data)
     score['gunning_fog'] = textstat.gunning_fog(test_data)
     score['text_standard'] = textstat.text_standard(test_data)
     return score
예제 #31
0
파일: test.py 프로젝트: shivam5992/textstat
def test_dale_chall_readability_score():
    score = textstat.dale_chall_readability_score(long_test)

    assert score == 6.87