Exemplo n.º 1
0
def sort_article_list(select_date):
    '''функция сотировки по дате
    '''
    articles = db.get_articles()
    sorted_articles = []
    for article in articles:
        if article['data_create'] == select_date:
            sorted_articles.append(article)
    update_article_list(sorted_articles)
Exemplo n.º 2
0
def request_user():
    '''получаем ответ от пользователя на удаления данных
    '''

    articles = db.get_articles()
    answer = messagebox.askyesno(title="Вопрос", message="Удалить данные?")
    if answer == True:
        articles.clear()
        db.write_articles(articles)
        demo()
Exemplo n.º 3
0
def result_income():
    '''считаем наши все доходы
    '''
    income_list = []
    articles = db.get_articles()
    for article in articles:
        if article['type_item'] == '+':
            income_list.append(article['cost'])
    summ_income = sum(income_list)

    return summ_income
Exemplo n.º 4
0
def result_expence():
    '''считаем все наши расходы
    '''
    expence_list = []
    articles = db.get_articles()
    for article in articles:
        if article['type_item'] == '-':
            expence_list.append(article['cost'])
    summ_expence = sum(expence_list)

    return summ_expence
Exemplo n.º 5
0
def del_article():
    '''удалаляем в окне не нужный
        пунк дохода или расхода.
    '''
    articles = db.get_articles()
    select = list(text_result.get())
    select.reverse()
    for i in select:
        text_result.delete(i)
        articles.pop(i)

    db.write_articles(articles)
Exemplo n.º 6
0
def sort_article_range_date(start_date, end_date):
    '''функция определения дат по заданным периодам
    '''
    articles = db.get_articles()
    sorted_articles = []
    date_1 = min(start_date, end_date)
    date_2 = max(start_date, end_date)

    while date_1 < date_2:
        date_1 += datetime.timedelta(days=1)
        for article in articles:
            if article['data_create'] == date_1:
                sorted_articles.append(article)

    update_article_list(sorted_articles)
Exemplo n.º 7
0
def update_article_list():
    '''выводим наши все записи
        в окно
    '''
    text_result.delete(0, END)
    index = 0
    articles = db.get_articles()
    for article in articles:
        item_value = article['type_value']
        s = '{cost} руб. {type_item} {type_value} {data_create}'.format(
            get_short_string(item_value), **article)
        text_result.insert(0, s)
        if article['type_item'] == '-':
            text_result.itemconfig(index, bg='red')

        else:
            text_result.itemconfig(index, bg='green')
    index += 1
Exemplo n.º 8
0
def add_type_rashod():
    '''добавляем в спикок словарь с нашими расходами
    '''
    articles = db.get_articles()
    #перехватываем ошибку неправильного ввода
    try:
        result_cost = int(ent_rashod_dohod.get())

    except ValueError as err:
        mb.showerror('Ошибка', 'Должно быть введено число!')
    new_article = {
        'cost': result_cost,
        'type_item': '-',
        'type_value': variable.get(),
        'data_create': datetime.datetime.today()
    }
    #widgets.ent_rashod_dohod.delete(0, END)
    articles.append(new_article)
    db.write_articles(articles)
    update_article_list()
Exemplo n.º 9
0
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline
from nltk.stem.snowball import SnowballStemmer
from scipy.sparse import hstack
from sklearn.metrics import confusion_matrix, classification_report, auc, roc_curve
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import itertools
import pandas as pd
import rethinkdb as r
import re

articles = get_articles(lambda join: {
    'body': join['left']['body'],
    'title': join['left']['title'],
    'credible': join['right']['isReliable']
})

df = pd.DataFrame.from_records(articles)

with open('./tl_stopwords.txt', 'r') as f:
    TL_STOPWORDS = f.read().splitlines()

STOP_WORDS = ENGLISH_STOP_WORDS.union(TL_STOPWORDS)

body_tfidf = TfidfVectorizer(
    token_pattern=r'(?ui)\b\w*[a-z]{2}\w*\b',
    stop_words=STOP_WORDS,
    ngram_range=(1, 2),
    max_df=0.85,
Exemplo n.º 10
0
    plt.ylabel('True label')
    plt.xlabel('Predicted label')


articles = get_articles(
    lambda join: {
        'body': join['left']['body'],
        'title': join['left']['title'],
        # 'src_social_score': join['right']['socialScore'],
        # 'src_has_impt_pages': r.branch(
        #     join['right']['contactUsUrl'],
        #     1,
        #     join['right']['aboutUsUrl'],
        #     1,
        #     0
        # ),
        'src_has_about': 1 if join['right']['aboutUsUrl'] else 0,
        'src_has_contact': 1 if join['right']['contactUsUrl'] else 0,
        # 'src_wot_reputation': join['right']['wotReputation'],
        # 'src_country_rank': join['right']['countryRank'],
        'src_world_rank': join['right']['worldRank'],
        'src_domain_has_number': 1 if join['right']['domainHasNumber'] else 0,
        'src_domain_is_blog': 1 if join['right']['isBlogDomain'] else 0,
        # 'src_domain_is_suspicious': 1 if join['right']['isDomainSuspicious'] else 0,
        # 'src_domain_creation_date': join['right']['domainCreationDate'],
        'src_id': join['right']['id'],
        'credible': join['right']['isReliable']
    })
train_sources = get_sources_sample(20)

train_articles = [a for a in articles if a['src_id'] not in train_sources]