Beispiel #1
0
def write(request):
    profile = request.user.get_profile()
#    try:
 #       message = request.session['message']
 #   except:
 #       message = ""
    if request.method == 'POST':
        form = EntryForm(request.POST)
        if form.is_valid() and form.cleaned_data['text'] != "":
            entry = Entry()
            entry.text = form.cleaned_data['text']
            entry.creator = UserProfile.objects.get(user=profile.user)
            entry.create_time = datetime.datetime.now()
            entry.save()
            if word_count(entry.text) >= profile.word_goal:
                time_delta = datetime.timedelta(hours=profile.hours_per_goal)
                last_post = profile.flag_time - time_delta
                profile.flag_time = entry.create_time + time_delta
                profile.flag = True
                #check consecuvitve days
                if datetime.datetime.now() - last_post <datetime.timedelta(hours=24):
                    profile.consecutive_days += 1
                else:
                    profile.consecutive_days = 1
                profile.save()
                return redirect('/entries/')
            else:
                check_github_two(profile)
                return redirect('/entries/')
        else:
            check_github_two(profile)
            return redirect('/entries/')
    else:
        form = EntryForm()
    return render_to_response("write.html", {'form':form, 'profile':profile}, context_instance=RequestContext(request))
Beispiel #2
0
def main():
    # print(functions.hailStoneR(10))
    # print(functions.factorialL(10))
    # functions.chooseLargest([1, 2, 3, 4, 5], [2, 2, 9, 0, 9])
    # print(functions.chooseLargest_comp([1, 2, 3, 4, 5], [2, 2, 9, 0, 9]))
    # print(functions.chooseLargest_lambda([1, 2, 3, 4, 5], [2, 2, 9, 0, 9]))
    # print(functions.redact())
    # print(functions.sum_it())
    # deck = functions.createDeck()
    # print(functions.shuffle(deck))
    print(functions.word_count())
Beispiel #3
0
def print_report(name, text):
    # create a .doc file where to save the report of the analysis
    name = name + ".txt"
    report = open(name, "w")
    print("writing the report...\n")
    report.write("ANALYSIS REPORT\n")
    report.write("_______________\n")
    report.write("\n")
    report.write("number of sentences: {}\n".format(
        functions.sentence_count(text)))
    report.write("number of words: {}\n".format(functions.word_count(text)))
    report.write("number of characters(no spaces): {}\n".format(
        functions.char_count_nospace(text)))
    report.write("\n")

    report.write("most common words distribution:\n\n\t")
    commonw = functions.words_distribution(text)
    report.write('\n\t'.join('{} {}'.format(x[0], x[1]) for x in commonw))

    report.write("\n\n")

    report.write("most common words' length distribution:\n\n\t")
    commonwl = functions.words_length_distribution(text)
    report.write('\n\t'.join('{} {}'.format(x[0], x[1]) for x in commonwl))

    report.write("\n\n")

    report.write("deviation standard: {}\n".format(
        functions.standard_dev(text)))
    report.write("lexical density: {}\n\n".format(
        functions.lexical_density(text)))

    report.write("Automated Readability Index (ARI): {}\n".format(
        functions.ARI(text)))

    report.write("\ncompare the score to the following guide.\n\n")

    report.write(
        tabulate(
            [["SCORE", "AGE", "GRADE LEVEL"], ["1", "5-6", "Kindergarten"],
             ["2", "6-7", "First/Second Grade"], ["3", "7-9", "Third Grade"],
             ["4", "9-10", "Fourth Grade"], ["5", "10-11", "Fifth Grade"],
             ["6", "11-12", "Sixth Grade"], ["7", "12-13", "Seventh Grade"],
             ["8", "13-14", "Eighth Grade"], ["9", "14-15", "Ninth Grade"],
             ["10", "15-16", "Tenth Grade"], ["11", "16-17", "Eleventh Grade"],
             ["12", "17-18", "Twelfth grade"],
             ["13", "18-24", "College student"], ["14", "24++", "Professor"]],
            headers="firstrow",
            tablefmt="psql"))

    report.write("\n\nEND.")
    report.close()
Beispiel #4
0
def list(request):
    profile = request.user.get_profile()
    time_delta = datetime.timedelta(hours=profile.hours_per_goal)
    last_post = profile.flag_time - time_delta
    if datetime.datetime.now() - last_post > datetime.timedelta(hours=24):
        profile.consecutive_days = 0
        profile.save()
    entries = Entry.objects.filter(creator=profile.user).order_by('-create_time')
    words_written = 0

    for entry in entries:
        words_written += word_count(entry.text)
    return render_to_response("list.html", {'entries':entries, 'profile':profile, 'words_written':words_written})
Beispiel #5
0
def no_commits(request):
    profile = request.user.get_profile()
    message = "We don't see any commits. Write something or commit again!"
    if request.method == 'POST':
        form = EntryForm(request.POST)
        if form.is_valid():
            entry = Entry()
            entry.text = form.cleaned_data['text']
            entry.creator = UserProfile.objects.get(user=profile.user)
            entry.create_time = datetime.datetime.now()
            entry.save()
            if word_count(entry.text) > profile.word_goal:
                time_delta = datetime.timedelta(hours=profile.hours_per_goal)
                profile.flag_time = entry.create_time + time_delta
                profile.flag = True
                profile.save()
            return redirect('/entries/')
    else:
        form = EntryForm()
    return render_to_response("write.html", {'form':form, 'profile':profile, 'message':message}, context_instance=RequestContext(request))
Beispiel #6
0
                               value=session_state.a,
                               key=9)
dtotal = pd.read_csv('cleaned_df.csv')

submit = st.sidebar.button('Search', key=1)
if submit:
    session_state.a = option
try:
    dtotal = dtotal[dtotal['title'].astype(str).str.contains(option)]
except:
    pass

total_length = len(dtotal)
dtotal2 = dtotal['description']
dtotal1 = functions.clean(dtotal2)
dtotal1 = functions.word_count(dtotal1)
c_let3 = functions.cleanC(dtotal2)
c_p3 = functions.C_plus(c_let3)
c_s3 = functions.C_sharp(c_let3)
test3a = Counter(c_p3) + Counter(c_s3)

ctotal = Counter(dtotal1) + Counter(test3a)
total = sum(ctotal.values())
Ctotaldict = [(i, ctotal[i] / total * 100.0) for i in ctotal]

total_result = pd.DataFrame(Ctotaldict, columns=['Tech', 'Percentage'])

total_resulty = pd.DataFrame(Ctotaldict, columns=['Tech', 'Percentage'])
total_resulty = total_resulty.set_index('Tech', drop=True)
total_result_chart = total_result.sort_values('Percentage',
                                              ascending=False).head(10)
Beispiel #7
0
def callback(eventObject):
    print(combo.get())
    if combo.get() == 'tokenization':
        option_label = tk.Label(window,
                                text=combo.get(),
                                font=("Helvetica", 12))
        option_label.grid(row=5, column=0)
        output = functions.token(textWidget.get(1.0, tk.END))
        display = tk.Text(window, width=80, height=10)
        display.grid(row=6, column=0, padx=10)
        display.insert(
            tk.END,
            'DESCRIPTION: Tokenization is the process of breaking down text document apart in Tokens.\n\n'
        )
        for x in output:
            display.insert(tk.END, '- ' + x + '\n')

    elif combo.get() == 'tokenization (stop words)':
        option_label = tk.Label(window,
                                text=combo.get(),
                                font=("Helvetica", 12))
        option_label.grid(row=5, column=0)
        output = functions.del_stopwords(textWidget.get(1.0, tk.END))
        display = tk.Text(window, width=80, height=10)
        display.grid(row=6, column=0, padx=10)
        display.insert(
            tk.END,
            'DESCRIPTION: Tokenization is the process of breaking down text document apart in Tokens. '
            'Stop words - usually the most common words in a language - are filtered out before or after processing of a text.\n\n'
        )
        for x in output:
            display.insert(tk.END, '- ' + x + '\n')

    elif combo.get() == 'stemming':
        option_label = tk.Label(window,
                                text=combo.get(),
                                font=("Helvetica", 12))
        option_label.grid(row=5, column=0)
        output = functions.stemming(textWidget.get(1.0, tk.END))
        display = tk.Text(window, width=80, height=10)
        display.grid(row=6, column=0, padx=10)
        display.insert(
            tk.END,
            'DESCRIPTION: Stemming redunces inflected words to their word stem, base or root form.\n\n'
        )
        for x in output:
            display.insert(tk.END, '- ' + x + '\n')

    elif combo.get() == 'lemmatisation':
        option_label = tk.Label(window,
                                text=combo.get(),
                                font=("Helvetica", 12))
        option_label.grid(row=5, column=0)
        output = functions.lemmas(textWidget.get(1.0, tk.END))
        display = tk.Text(window, width=80, height=10)
        display.grid(row=6, column=0, padx=10)
        display.insert(
            tk.END,
            'DESCRIPTION: Lemmatisation is the algorithmic process of determining the lemma of a word based on its intended meaning.\n\n'
        )
        for x in output:
            display.insert(tk.END, '- ' + x + '\n')

    elif combo.get() == 'counter':
        option_label = tk.Label(window,
                                text=combo.get(),
                                font=("Helvetica", 12))
        option_label.grid(row=5, column=0)
        words = functions.word_count(textWidget.get(1.0, tk.END))
        sentences = functions.sentence_count(textWidget.get(1.0, tk.END))
        ch_count = functions.char_count_spaces(textWidget.get(1.0, tk.END))
        ch_count_nsp = functions.char_count_nospace(textWidget.get(
            1.0, tk.END))
        display = tk.Text(window, width=80, height=10)
        display.grid(row=6, column=0, padx=10)
        display.insert(tk.END, 'Number of words: ' + str(words) + '\n')
        display.insert(tk.END, 'Number of sentences: ' + str(sentences) + '\n')
        display.insert(
            tk.END,
            'Number of characters (spaces included): ' + str(ch_count) + '\n')
        display.insert(
            tk.END, 'Number of characters (spaces excluded): ' +
            str(ch_count_nsp) + '\n')

    elif combo.get() == 'words length distribution':
        option_label = tk.Label(window,
                                text=combo.get(),
                                font=("Helvetica", 12))
        option_label.grid(row=5, column=0)
        output = functions.words_length_distribution(
            textWidget.get(1.0, tk.END))
        media = functions.arithmetic_mean(textWidget.get(1.0, tk.END))
        dev_standard = functions.standard_dev(textWidget.get(1.0, tk.END))
        display = tk.Text(window, width=80, height=10)
        display.grid(row=6, column=0, padx=2)
        display.insert(tk.END, 'most common 10:\n')
        for x in output:
            display.insert(tk.END, '- ' + str(x[0]) + ':' + str(x[1]) + '\n')
        display.insert(tk.END, 'arithmetic mean : ' + str(media) + '\n')
        display.insert(tk.END, 'standard deviation : ' + str(dev_standard))

    elif combo.get() == 'words distribution':
        option_label = tk.Label(window,
                                text=combo.get(),
                                font=("Helvetica", 12))
        option_label.grid(row=5, column=0)
        output = functions.words_distribution(textWidget.get(1.0, tk.END))
        display = tk.Text(window, width=80, height=10)
        display.grid(row=6, column=0, padx=2)
        display.insert(tk.END, 'most common 10:\n')
        for x in output:
            display.insert(tk.END, '- ' + str(x[0]) + ':' + str(x[1]) + '\n')

    elif combo.get() == 'POS tagging':
        option_label = tk.Label(window,
                                text=combo.get(),
                                font=("Helvetica", 12))
        option_label.grid(row=5, column=0)
        output = functions.pos_tagging(textWidget.get(1.0, tk.END))
        print(output)
        display = tk.Text(window, width=80, height=10)
        display.grid(row=6, column=0, padx=10)
        display.insert(
            tk.END,
            'DESCRIPTION: Part-of-speech tagging is the process of marking up a word in a text (corpus) as corresponding to a particular part of speech.\n\n'
        )
        for x in output:
            display.insert(tk.END, '- ' + str(x[0]) + ':' + str(x[1]) + '\n')

    elif combo.get() == 'lexical density':
        option_label = tk.Label(window,
                                text=combo.get(),
                                font=("Helvetica", 12))
        option_label.grid(row=5, column=0)
        output = functions.lexical_density(textWidget.get(1.0, tk.END))
        display = tk.Text(window, width=80, height=10)
        display.grid(row=6, column=0, padx=10)
        display.insert(
            tk.END,
            'DESCRIPTION: Lexical density estimates the linguistic complexity in a written or spoken composition from the functional words (grammatical units) and content words (lexical units, lexemes).\n\n'
        )
        display.insert(tk.END, 'Lexical density is: ' + str(output) + '\n')

    elif combo.get() == 'ARI':
        option_label = tk.Label(window,
                                text=combo.get(),
                                font=("Helvetica", 12))
        option_label.grid(row=5, column=0)
        output = functions.ARI(textWidget.get(1.0, tk.END))
        display = tk.Text(window, width=80, height=10)
        display.grid(row=6, column=0, padx=10)
        display.insert(
            tk.END,
            'DESCRIPTION: The Automated Readability Index (ARI) is a readability test for English texts, designed to gauge the understandability of a text.\n\n'
        )
        display.insert(tk.END,
                       'Automated Readability Index is: ' + str(output) + '\n')