示例#1
0
def make_statistic(topK, is_test=False):
    # 获得开始时间参数
    get_start_time(utils.current_date())
    # 获取全部信息,保存为xlsx表格用于附件发送,保存标题和内容信息用于提取关键词
    get_info()
    # 输出卡顿等特征值表格和图表,返回卡顿等数目
    stuck, danmu, crash, total = out_put_today_statistic(utils.current_date())
    # 获取最新版本的问题反馈
    new_version_html, new_version_problems = utils.output_new_version_table(
        NEW_VERSION)
    # 将新版本的问题id添加到问题id list中
    add_new_version_problems_into_problems_list(new_version_problems)
    # 添加额外关键词
    additon_keyword = ''
    # 如果有闪退的反馈,增加闪退关键词
    if crash > 0:
        additon_keyword = u'闪退'
    # 获取所有关键词的推荐内容
    key_word_table, key_words = get_all_key_word_content(topK, additon_keyword)
    # 邮件主题
    sub = 'iOS组-舆情平台日报'
    # 邮件的内容html文件
    html = utils.make_email_html(sub +
                                 utils.current_date().strftime(" %Y.%m.%d"),
                                 key_words,
                                 key_word_table,
                                 str(total),
                                 str(crash),
                                 str(stuck),
                                 str(danmu),
                                 new_version_table=new_version_html)
    # 生成明日的额外概要内容文件
    utils.create_next_day_additions_file()
    return sub, html
示例#2
0
def get_info():
    page = 1
    max_page = 100
    f = open(utils.get_file_path(date=utils.current_date()), 'w')
    dic = utils.get_output_total_info_dic()
    while page <= max_page:
        print str(page)
        url = "http://yuqing.dz11.com/Home/Nav/getUserFeedbackList?channel=ios&startTime=" + start_time + "%2000%3A00%3A00&endTime=" + end_time + "%2023%3A59%3A59&pageNum=" \
              + str(page) + "&pageSize=20"
        print "当前请求URL: " + url
        try:
            request = urllib2.Request(url)
            response = urllib2.urlopen(request)
            result = json.loads(response.read().decode('utf-8'))
            max_page = int(result['data']['total']) / 20 + 1
            page += 1
            for record in result['data']['records']:
                utils.handle_total_info_data(dic, record)
                f.write(record['title'].encode('utf-8') + ' ')
                f.write(record['content'].encode('utf-8') + '\n')
        except urllib2.URLError, e:
            if hasattr(e, "code"):
                print e.code
            if hasattr(e, "reason"):
                print e.reason
示例#3
0
class Link(db.Model):
    id = db.Column(db.Integer, primary_key=True, autoincrement=True)
    entered_link = db.Column(db.String(2048))
    generated_hash = db.Column(db.String(50))
    generated_day = db.Column(db.Date(),
                              default=current_date(),
                              nullable=False)
    hash_lifetime = db.Column(db.Integer(), default=90, nullable=False)
def gen_data_pool(dataset_name, dataset_dir, path, test_size=0.2, val_size=0.25, pool_size=30):
    now = datetime.datetime.now()
    date = current_date(now)
    pool = {}
    pool_name = dataset_name+'_split_'+str(pool_size)+'_'+str(date)
    pool['pool_name'] = pool_name
    pool['data'] = {}

    for i in range (pool_size):
        print ("Generate dataset split %sth"% str(i+1))
        dataset = MyDataset(dataset_dir, test_size, val_size)

        train_files, train_labels, train_label_names, \
        val_files, val_labels, val_label_names, \
        test_files, test_labels, test_label_names, class_names = dataset.get_data()

        train_report = dataset.data_split_report(train_label_names, 'train')
        val_report= dataset.data_split_report(val_label_names, 'val')
        test_report = dataset.data_split_report(test_label_names, 'test')

        data_i = {}
        data_i['data_name'] = dataset_name+'_'+str(i) +'_' + date
        data_i['train_files'] = train_files
        data_i['train_labels'] = train_labels
        data_i['train_label_names'] = train_label_names
        data_i['train_report'] = train_report

        data_i['test_files'] = test_files
        data_i['test_labels'] = test_labels
        data_i['test_label_names'] = test_label_names
        data_i['test_report'] = test_report

        data_i['val_files'] = val_files
        data_i['val_labels'] = val_labels
        data_i['val_label_names'] = val_label_names
        data_i['val_report'] = val_report

        data_i['class_names'] = class_names

        pool['data'][str(i)]=data_i
        print ('Appended split %sth to pool' %str(i+1))
        print('____________________________________')

    # dump to file
    path = os.path.join(path, pool_name)
    filepath = dump_pickle(pool, path)
    return pool, filepath
def validate_reasonable_age(fams, indis):
    ret_data = []
    lifetime = 150

    for iid in indis:
        if indis[iid]['BIRT'] is not None:
            birth_date = utils.parse_date(indis[iid]['BIRT'])
            death_date = utils.current_date()

            if indis[iid]['DEAT'] is not None:
                death_date = utils.parse_date(indis[iid]['DEAT'])

            if utils.get_age(birth_date, death_date) > lifetime:
                ret_data.append(
                    (iid,
                     f'Individual id={iid} is older than {lifetime} years'))

    return ret_data
示例#6
0
 def __init__(self, url, destination):
     self.url = url
     self.total_size = 1
     self.file_name = "<Requesting...>"
     self.current_size = 0
     self.destination = destination
     self.type = None
     self.progress = 0
     self.status = 'idle'
     self.speed = "0 KB/s"
     #SettingsManager.SETTINGS["Network"]["Traffic"]["limit_numeric"] = 400
     self.pointer = None
     self.added = current_date()
     self.headers = {}
     self.timeout = 60
     self.request = None
     #SettingsManager.SETTINGS["System"]["Downloads"]["Retries"] = 10
     self.resumable = False
     self.scheduler = None
示例#7
0
def get_all_key_word_content(keyword_count, additon_key_word=''):
    # 使用结巴分词得到关键词
    tags = extract_tags.get_topK_words(
        utils.get_file_path(date=utils.current_date()), keyword_count)

    titles = []
    contents = []
    device = []
    version = []
    ids = []
    # 循环获取关键词内容
    if len(additon_key_word) > 0:
        tags.insert(0, additon_key_word)

    keywords = ",".join(tags).encode('utf-8')
    for tag in tags:
        title, content, dev, ver, index_id = get_key_words_content(
            utils.convert_to_utf8(tag))
        if content is None or len(content) == 0:
            continue
        titles.append(title)
        contents.append(content)
        device.append(dev)
        version.append(ver)
        ids.append(index_id)
    label_array = [
        utils.TITLE_NAME, utils.CONTENT_NAME, utils.DEVICE_NAME,
        utils.VERSION_NAME
    ]
    key_word_table = utils.output_keyword_table(
        {
            label_array[0]: titles,
            label_array[1]: contents,
            label_array[2]: device,
            label_array[3]: version
        }, label_array, ids)
    return key_word_table, keywords
示例#8
0
def index():
    year = request.args.get('year')
    race_list = season.race_calendar(year)
    date_today = utils.current_date()
    return render_template('calender.html', races=race_list, today=date_today)
示例#9
0
def totoal_info_excel_file_name():
    tail = '.xlsx'
    date = utils.current_date()
    return utils.get_file_path(tail,
                               date), utils.get_file_prefix_name(tail, date)
示例#10
0
    new_version_html, new_version_problems = utils.output_new_version_table(
        NEW_VERSION)
    # 将新版本的问题id添加到问题id list中
    add_new_version_problems_into_problems_list(new_version_problems)
    # 添加额外关键词
    additon_keyword = ''
    # 如果有闪退的反馈,增加闪退关键词
    if crash > 0:
        additon_keyword = u'闪退'
    # 获取所有关键词的推荐内容
    key_word_table, key_words = get_all_key_word_content(topK, additon_keyword)
    # 邮件主题
    sub = 'iOS组-舆情平台日报'
    # 邮件的内容html文件
    html = utils.make_email_html(sub +
                                 utils.current_date().strftime(" %Y.%m.%d"),
                                 key_words,
                                 key_word_table,
                                 str(total),
                                 str(crash),
                                 str(stuck),
                                 str(danmu),
                                 new_version_table=new_version_html)
    # 生成明日的额外概要内容文件
    utils.create_next_day_additions_file()
    return sub, html


if __name__ == '__main__':
    get_start_time(utils.current_date())
    get_all_key_word_content(8)
def do_task():
    for stock in STOCK_LIST:
        data_fetcher.handle_sixty_minute_k(utils.current_date(), stock,
                                           HOUR_HIST_DOT)
 def check(date_str, is_indi, oid, date_type):
     if utils.parse_date(date_str) > utils.current_date():
         ret_data.append(
             (oid, f'{date_type} {date_str} occurs in the future'))