def get_globo_episodes(channel, show, page): page_size = 1 videos = [] properties = ('id', 'title', 'plot', 'duration', 'date') prop_data = ('id', 'title', 'description', 'duration', 'exhibited') days = get_page(GLOBOPLAT_DAYS % int(show))['days'] for i in range((page-1)*page_size, page*page_size) : for page_num in range (1, 5) : try: data = get_page(GLOBOPLAY_VIDEOS % (int(show), days[i], page_num)) for item in data['videos']: video = util.struct(dict(zip(properties, [item.get(p) for p in prop_data]))) # update attrs video.date = util.time_format(video.date, '%Y-%m-%d') video.duration = sum(int(x) * 60 ** i for i, x in enumerate(reversed(video.duration.split(':')))) # video.duration = video.duration.split(':')[0] video.thumb = EPSTHUMB_URL % video.id # self.cache.set('video|%s' % video.id, repr(video)) videos.append(video) except: break page_num = page_num + 1 page = (page+1 if (page*page_size) < len(days) else None) return videos, page
def get_globo_episodes(channel, show, page): videos = [] properties = ('id', 'title', 'plot', 'duration', 'date') prop_data = ('id', 'title', 'description', 'duration', 'exhibited') days = get_page(GLOBOPLAT_DAYS % int(show))['days'] video_page_size = 10 size_videos = 10 page_num = 1 while size_videos >= video_page_size: try: data = get_page(GLOBOPLAY_VIDEOS % (int(show), days[page - 1], page_num)) size_videos = len(data['videos']) for item in data['videos']: video = util.struct( dict(zip(properties, [item.get(p) for p in prop_data]))) # update attrs video.date = util.time_format(video.date, '%Y-%m-%d') video.duration = sum( int(x) * 60**i for i, x in enumerate(reversed(video.duration.split(':')))) # video.duration = video.duration.split(':')[0] video.thumb = EPSTHUMB_URL % video.id # self.cache.set('video|%s' % video.id, repr(video)) videos.append(video) page_num += 1 except: break page = (page + 1 if page < len(days) else None) return videos, page
def get_gplay_episodes(channel, show, page): videos = [] properties = ('id', 'title', 'plot', 'duration', 'date', 'mpaa', 'thumb') prop_data = ('id_globo_videos', 'title', 'description', 'duration_in_milliseconds', 'exhibition_date', 'content_rating', 'thumb_image') (show_id, show_kind) = show.split('/') if show_kind != 'shows': properties += ('episode', 'season', 'tvshowtitle') prop_data += ('number', 'season', 'program') headers = {'Authorization': GLOBOSAT_API_AUTHORIZATION} apiurl = GLOBOSAT_API_SHOWS if show_kind == 'shows' else GLOBOSAT_API_EPISODES data = get_page(apiurl % (int(show_id), page), headers=headers) results = data['results'] if 'results' in data else [data] for item in results: video = util.struct( dict(zip(properties, [item.get(p) for p in prop_data]))) # update attrs video.date = util.time_format(video.date[:10], '%Y-%m-%d') video.mpaa = util.getMPAAFromCI(video.mpaa) video.tvshowtitle = video.tvshowtitle[ 'title'] if video.tvshowtitle else None video.season = video.season['number'] if video.season else None video.duration = int(video.duration / 1000) videos.append(video) page = (page + 1 if 'next' in data else None) return videos, page
def onSpendTimer(self, evt): self.spendTime += 1 t = self.spendTime # h = t/(60*60) # m = t/(60) % 60 # s = t % 60 #self.textSpendTime.SetLabel('%d:%02d:%02d' % (h,m,s)) self.textSpendTime.SetLabel(util.time_format(t))
def _get_video_info(self, video_id): # get video info data = scraper.get_page(INFO_URL % video_id)['videos'][0] if 'date' not in data: # original date is not part of INFO_URLs metadata response data['date'] = util.time_format() if 'duration' not in data: data['duration'] = sum(x['resources'][0]['duration']/1000 for x in data.get('children') or [data]) return data
def file_list(): file: FileParams = get_params(request, FileParams, params.parent_path, NN=True) c = FileModel.get_file_list(**filter_null_dict(file)) data = {"items": [ {params.file_name: i.file_name, params.is_dir: i.is_dir, params.update_date: time_format(i.update_date), params.size: size_pack(i.size), } for i in c], params.parent_path: file.parent_path} return Successful_response(data=data)
def get_gplay_episodes(channel, show, page): videos = [] properties = ('id', 'title', 'plot', 'duration', 'date', 'episode', 'season', 'mpaa', 'tvshowtitle', 'thumb') prop_data = ('id_globo_videos', 'title', 'description', 'duration_in_milliseconds', 'exhibition_date', 'number', 'season', 'content_rating', 'program', 'thumb_image') headers = {'Authorization': GLOBOSAT_API_AUTHORIZATION} data = get_page(GLOBOSAT_API_EPISODES % (int(show), page), headers=headers) for item in data['results']: video = util.struct(dict(zip(properties, [item.get(p) for p in prop_data]))) # update attrs video.date = util.time_format(video.date[:10], '%Y-%m-%d') video.mpaa = util.getMPAAFromCI(video.mpaa) video.tvshowtitle = video.tvshowtitle['title'] video.season = video.season['number'] if video.season else None video.duration = int(video.duration/1000) videos.append(video) page = (page+1 if data['next'] else None) return videos, page
def get_gplay_episodes(channel, show, page): videos = [] properties = ('id', 'title', 'plot', 'duration', 'date', 'episode', 'season', 'mpaa', 'tvshowtitle') prop_data = ('id', 'titulo', 'descricao', 'duracao_original', 'data_exibicao', 'episodio', 'temporada', 'classificacao_indicativa', 'programa') data = get_page(GLOBOSAT_EPS_JSON % ('%s/%s' % (channel, show), page)) for item in data['resultado']: video = util.struct(dict(zip(properties, [item.get(p) for p in prop_data]))) # update attrs video.date = util.time_format(video.date[:10], '%Y-%m-%d') video.mpaa = util.getMPAAFromCI(video.mpaa) video.tvshowtitle = video.tvshowtitle['titulo'] video.duration = int(video.duration/1000) video.thumb = EPSTHUMB_URL % video.id # self.cache.set('video|%s' % video.id, repr(video)) videos.append(video) page = (page+1 if page < data['total_paginas'] else None) return videos, page
def get_gplay_episodes(channel, show, page): # page_size = 15 # import pydevd; pydevd.settrace() videos = [] properties = ('id', 'title', 'plot', 'duration', 'date') prop_data = ('id', 'titulo', 'descricao', 'duracao_original', 'data_exibicao') data = get_page(GLOBOSAT_EPS_JSON % ('%s/%s' % (channel, show), page)) for item in data['resultado']: video = util.struct(dict(zip(properties, [item.get(p) for p in prop_data]))) # update attrs video.date = util.time_format(video.date[:10], '%Y-%m-%d') video.duration = int(video.duration/1000) video.thumb = EPSTHUMB_URL % video.id # self.cache.set('video|%s' % video.id, repr(video)) videos.append(video) page = (page+1 if page < data['total_paginas'] else None) return videos, page
def get_gplay_episodes(channel, show, page): # page_size = 15 # import pydevd; pydevd.settrace() videos = [] properties = ('id', 'title', 'plot', 'duration', 'date') prop_data = ('id', 'titulo', 'descricao', 'duracao_original', 'data_exibicao') data = get_page(GLOBOSAT_EPS_JSON % ('%s/%s' % (channel, show), page)) for item in data['resultado']: video = util.struct( dict(zip(properties, [item.get(p) for p in prop_data]))) # update attrs video.date = util.time_format(video.date[:10], '%Y-%m-%d') video.duration = int(video.duration / 1000) video.thumb = EPSTHUMB_URL % video.id # self.cache.set('video|%s' % video.id, repr(video)) videos.append(video) page = (page + 1 if page < data['total_paginas'] else None) return videos, page
def get_globo_episodes(channel, show, page): # page_size = 10 videos = [] properties = ('id', 'title', 'plot', 'duration', 'date') prop_data = ('id', 'titulo', 'descricao', 'duracao', 'exibicao') data = get_page(GLOBOTV_EPS_JSON % (show, page)) for item in data: try: video = util.struct(dict(zip(properties, [item.get(p) for p in prop_data]))) # update attrs video.date = util.time_format(video.date, '%d/%m/%Y') video.duration = sum(int(x) * 60 ** i for i, x in enumerate(reversed(video.duration.split(':')))) # video.duration = video.duration.split(':')[0] video.thumb = EPSTHUMB_URL % video.id # self.cache.set('video|%s' % video.id, repr(video)) videos.append(video) except: break page = (page+1 if len(data) == 10 else None) return videos, page
def get_globo_episodes(channel, show, page): # page_size = 10 videos = [] properties = ('id', 'title', 'plot', 'duration', 'date') prop_data = ('id', 'titulo', 'descricao', 'duracao', 'exibicao') data = get_page(GLOBOTV_EPS_JSON % (show, page)) for item in data: try: video = util.struct( dict(zip(properties, [item.get(p) for p in prop_data]))) # update attrs video.date = util.time_format(video.date, '%d/%m/%Y') video.duration = sum( int(x) * 60**i for i, x in enumerate(reversed(video.duration.split(':')))) # video.duration = video.duration.split(':')[0] video.thumb = EPSTHUMB_URL % video.id # self.cache.set('video|%s' % video.id, repr(video)) videos.append(video) except: break page = (page + 1 if len(data) == 10 else None) return videos, page
def OnGetItemText(self, idx, col): if col==0: return unicode(self.data[idx]['id']) elif col==1: return util.time_format(self.data[idx]['time'])
set_default_language() #initial user.InitUserInfo() #user info mode = util.config.get('APP', 'level', 'easy') user = util.config.get('APP', 'user', '') time = 0 cur_puzzle = [] if app.bRecordLastPuzzle and app.lastPuzzle: _id = app.lastPuzzle['id'] time = app.lastPuzzle['time'] puzzle = util.str2puzzle(app.lastPuzzle['puzzleDefault']) cur_puzzle = util.str2puzzle(app.lastPuzzle['puzzleCurrent']) logger.info('RecordLastPuzzle! id=%d, time=%s', _id, util.time_format(time)) logger.info('puzzle=%s', app.lastPuzzle['puzzleDefault']) logger.info('cur puzzle=%s', app.lastPuzzle['puzzleCurrent']) else: #puzzle loader _id, puzzle = get_puzzle_loader().pick(mode) #anim anim.InitAnimManager() #frame frame = MainFrame(None, 'Sudoku Boxer') frame.setDefault(_id, puzzle, cur_puzzle) if user: frame._setUser(user) if time:
def visualize(voteup, ans_time, qid, title): # 将所有回答按时间排序,并获取每个回答的年月日格式日期 d, v = data_sort(ans_time, voteup) dates = [] for i in d: dates.append(time_format(i)) # 提取前n个高赞回答作为标记点 n = 5 max_voteup = [] max_dates = [] z = sorted(zip(v, dates), reverse=True) for i, j in z[:n]: max_voteup.append(i) max_dates.append(j) max_data = [] for i in range(n): markpoint = {} markpoint['coord'] = [max_dates[i], max_voteup[i]] markpoint['name'] = str(i + 1) max_data.append(markpoint) # 分组统计 data = {'日期': dates, '赞数': v} df = pd.DataFrame(data) groupnum = df.groupby(['日期']).size() voteupsum = df['赞数'].groupby(df['日期']).sum() voteupmax = df['赞数'].groupby(df['日期']).max() voteupmean = df['赞数'].groupby(df['日期']).mean() # 默认访问values,提取index需要写明 x = list(groupnum.index) y1 = list(groupnum) y2 = list(voteupsum) y3 = list(voteupmax) y4 = list(map(int, voteupmean)) # 日期补全 x0, y1 = data_full(x, y1) _, y2 = data_full(x, y2) _, y3 = data_full(x, y3) _, y4 = data_full(x, y4) # 获取相对天数列表 x1 = list(range(len(x0))) bar = ( Bar(init_opts=opts.InitOpts(width="700px", height="350px")) .add_xaxis(x0) .add_yaxis('回答数', y1) .extend_axis(xaxis_data=x1, xaxis=opts.AxisOpts( axistick_opts=opts.AxisTickOpts(is_align_with_label=True), axisline_opts=opts.AxisLineOpts( is_on_zero=False, linestyle_opts=opts.LineStyleOpts() ), )) .extend_axis(yaxis=opts.AxisOpts(axislabel_opts=opts.LabelOpts())) .set_series_opts(label_opts=opts.LabelOpts(is_show=False)) .set_global_opts( title_opts=opts.TitleOpts(title=title, title_link=f'https://www.zhihu.com/question/{qid}'), xaxis_opts=opts.AxisOpts( axistick_opts=opts.AxisTickOpts(is_align_with_label=True), axisline_opts=opts.AxisLineOpts( is_on_zero=False, linestyle_opts=opts.LineStyleOpts() ), ), yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts()), tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type="cross"), legend_opts=opts.LegendOpts(pos_left="right")) ) scatter = ( Scatter() .add_xaxis(x0) .add_yaxis("当日总赞数", y2, yaxis_index=1) .add_yaxis("当日最高赞数", y3, yaxis_index=1, markpoint_opts=opts.MarkPointOpts(data=max_data, symbol='pin', symbol_size=50, label_opts=opts.LabelOpts(color='#fff'))) .add_yaxis("当日平均赞数", y4, yaxis_index=1) ) bar.overlap(scatter) filename = f'{qid}-回答统计分析.html' bar.render(filename) print(f'{qid}-图表创建成功') return bar