def get_fixed_media_read_count(media_id): site_url = media_utils.gen_site_url(media_id) from statistics.models import DailyMedia with context_managers.switch_db(DailyMedia, 'statistics') as DailyMedia: media_stats = DailyMedia.objects(site_url=site_url) read_count = sum([normalize_count(stat.uclick_count) for stat in media_stats if stat.uclick_count]) return read_count
def get_unread_comments_count(comment_media_infos): media_info_lang_mapping = {} for info in comment_media_infos: for lang in info.synced_langs: if not media_info_lang_mapping.get(lang): media_info_lang_mapping[lang] = [] media_info_lang_mapping[lang].append(info) media_comments_counts_mapping = {} error_flag = False for lang in media_info_lang_mapping: comment_media_infos = media_info_lang_mapping[lang] media_infos = { media_utils.gen_site_url(info.media_id): info.last_comment_tag_mapping.get(lang) if info.last_comment_tag_mapping.get(lang) else '' for info in comment_media_infos } status, reason, data = comments_request_generator.wrap_func( lang, 'get_unread_comments_count', media_infos) if status: for key in data: if not media_comments_counts_mapping.get(key): media_comments_counts_mapping[key] = 0 media_comments_counts_mapping[key] += data[key] else: error_flag = True if error_flag and not media_comments_counts_mapping: return False, '', {} if error_flag: return True, 'some language maybe down', media_comments_counts_mapping return True, '', media_comments_counts_mapping
def get_rt_media_showable_data(media_id): from articles.models import Article from statistics.models import (RealTimeMedia, DailyMedia, RealTimeArticle) from articles.consts import STATUS_PUBLISHED, STATUS_OFFLINE, STATUS_ADMIN_OFFLINE site_url = media_utils.gen_site_url(media_id) articles = Article.objects(media_id=str(media_id), status__in=[STATUS_PUBLISHED, STATUS_OFFLINE, STATUS_ADMIN_OFFLINE]).only('language', 'online_seq_id') articles = sorted((article for article in articles if article.online_seq_id), key=lambda x: x.language) rt_read_count = 0 for lang, _articles in groupby(articles, lambda x: x.language): online_seq_ids = [article.online_seq_id for article in _articles] with context_managers.switch_db(RealTimeArticle, 'statistics') as RealTimeArticle: rt_stats = RealTimeArticle.objects(online_seq_id__in=online_seq_ids, lang=lang).only('read_count') rt_read_count += sum([normalize_count(stat.read_count) for stat in rt_stats]) with context_managers.switch_db(RealTimeMedia, 'statistics') as RealTimeMedia: rt_medias = RealTimeMedia.objects(site_url=site_url) rt_follow_count = sum([normalize_count(rt_media.follow_count if rt_media else 0) \ for rt_media in rt_medias]) with context_managers.switch_db(DailyMedia, 'statistics') as DailyMedia: media_stats = DailyMedia.objects(site_url=site_url) follow_count = sum([normalize_count(stat.mp_fans_count) for stat in media_stats if stat.mp_fans_count]) read_count = sum([normalize_count(stat.uclick_count) for stat in media_stats if stat.uclick_count]) follow_count += rt_follow_count read_count += rt_read_count return follow_count, read_count
def _get_data(self): info = dict( id=str(self.query_set.id), title=self.query_set.title, content=self.query_set.content, top_images=self.top_images, related_images=self.related_images, youtube_video_ids=self.query_set.youtube_video_ids, source_url=self.query_set.source_url, category=self.query_set.category, published_at=self.published_time, site_url=media_utils.gen_site_url(self.query_set.media_id), site_name=self.query_set.get_media().title, porn_score=4 if self.query_set.porn_score is None else self.query_set.porn_score, ) return info
def site_url(self): return media_utils.gen_site_url(self.id)