def get_recent_sites(request): """ 获取最近提交源 """ user = get_login_user(request) user_sub_feeds = [] if user: user_sub_feeds = get_user_subscribe_feeds(user.oauth_id, user_level=user.level) sites = Site.objects.filter(status='active').order_by('-id')[:100] context = dict() context['user'] = user context['sites'] = sites context['user_sub_feeds'] = user_sub_feeds return render(request, 'explore/recent_sites.html', context=context)
def get_recent_articles(request): """ 获取最近更新内容 TODO 优化查询性能 """ user = get_login_user(request) ids = get_sites_lastids() articles = Article.objects.filter(uindex__in=ids) user_sub_feeds = [] if user: user_sub_feeds = get_user_subscribe_feeds(user.oauth_id, user_level=user.level) context = dict() context['articles'] = articles context['user'] = user context['user_sub_feeds'] = user_sub_feeds return render(request, 'explore/recent_articles.html', context=context)
def get_my_feeds(request): """ 获取我的订阅列表;游客已订阅、推荐订阅;登陆用户已订阅、推荐订阅 """ sub_feeds = json.loads(request.POST.get('sub_feeds') or '[]') unsub_feeds = json.loads(request.POST.get('unsub_feeds') or '[]') user, reach_sub_limit = get_login_user(request), [False, 0] if user is None: visitor_sub_feeds = get_visitor_subscribe_feeds( tuple(sub_feeds), tuple(unsub_feeds)) sub_sites = Site.objects.filter( status='active', pk__in=visitor_sub_feeds).order_by('-star') recom_sites = Site.objects.filter(status='active', star__gte=20).exclude(pk__in=visitor_sub_feeds).\ order_by('-star') if len(visitor_sub_feeds) == settings.VISITOR_SUBS_LIMIT: reach_sub_limit = [True, settings.VISITOR_SUBS_LIMIT] else: user_sub_feeds = get_user_subscribe_feeds(user.oauth_id, user_level=user.level) sub_sites = Site.objects.filter( status='active', pk__in=user_sub_feeds).order_by('-star') recom_sites = Site.objects.filter(status='active', star__gte=20).exclude(pk__in=user_sub_feeds)\ .order_by('-star') if user.level < 10: if len(user_sub_feeds) == settings.USER_SUBS_LIMIT: reach_sub_limit = [True, settings.USER_SUBS_LIMIT] context = dict() context['sub_sites'] = sub_sites context['recom_sites'] = recom_sites context['user'] = user context['reach_sub_limit'] = reach_sub_limit return render(request, 'myfeeds.html', context=context)
def get_recommend_articles(request): """ 获取文章推荐的订阅源,只开放登录用户 TODO 优化性能 :param request: :return: """ uindex = int(request.POST['id']) user = get_login_user(request) if uindex and user: recommend_articles = [] relative_articles = list(get_similar_article(uindex).keys()) user_sub_feeds = get_user_subscribe_feeds(user.oauth_id, user_level=user.level) for relative_uindex in relative_articles: try: article = Article.objects.get(uindex=relative_uindex, status='active') except: continue if article.site_id not in user_sub_feeds: recommend_articles.append(article) if len(recommend_articles) >= 3: break if recommend_articles: logger.info(f'推荐数据条数:`{len(recommend_articles)}`{user.oauth_name}') context = dict() context['recommend_articles'] = recommend_articles return render(request, 'recommend/relative_article.html', context=context) else: return JsonResponse({}) return HttpResponseForbidden("No Recommend Data")
def user_subscribe_feed(request): """ 已登录用户订阅源 """ site_id = request.POST.get('site_id', '').strip()[:32] user = get_login_user(request) site = Site.objects.get(pk=site_id, status='active') if user and site: # 先判断是否达到限制 if user.level < 10: if len(get_user_subscribe_feeds(user.oauth_id, from_user=False, user_level=user.level)) \ == settings.USER_SUBS_LIMIT: logger.warning(f"已达到订阅上限:`{user.oauth_name}") return JsonResponse({ "code": 1, "msg": f"已达到 {settings.USER_SUBS_LIMIT} 个订阅数,请先取消一部分!" }) add_user_sub_feeds(user.oauth_id, [ site_id, ]) # 异步更新 django_rq.enqueue(update_sites_async, [ site.pk, ], result_ttl=1, ttl=3600, failure_ttl=3600) logger.warning(f"登陆用户订阅动作:`{user.oauth_name}`{site_id}") return JsonResponse({"code": 0, "msg": '订阅成功 ^o^'}) return HttpResponseForbidden("Param Error")
def index(request): """ index home page :param request: :return: """ # PC 版、手机版适配 user_agent = parse(request.META.get('HTTP_USER_AGENT', '')) pc = user_agent.is_pc # 判断是否登录用户 user = get_login_user(request) # 默认的渲染列表,区分是否登录用户 if user is None: sub_feeds = get_visitor_subscribe_feeds('', '') else: sub_feeds = get_user_subscribe_feeds(user.oauth_id, user_level=user.level) pre_load_count = 1 if pc else 10 articles = Article.objects.filter( status='active', site_id__in=sub_feeds).order_by('-id')[:pre_load_count] context = dict() context['articles'] = articles context['user'] = user context['github_oauth_key'] = settings.GITHUB_OAUTH_KEY # 记录访问来源 add_referer_stats(request.META.get('HTTP_REFERER', '')) if pc: return render(request, 'index.html', context) else: return render(request, 'mobile/index.html', context)
def user_mark_read_all(request): """ 设置批量已读,如不传 ids 则设置全部已读 """ ids = json.loads(request.POST.get('ids') or '[]') user = get_login_user(request) if user: if not ids: my_sub_feeds = get_user_subscribe_feeds(user.oauth_id, user_level=user.level) ids = set() for site_id in my_sub_feeds: ids.update(get_recent_site_articles(site_id)) set_user_read_articles(user.oauth_id, ids) if ids: return get_lastweek_articles(request) else: return JsonResponse({}) return HttpResponseNotFound("Param Error")
def in_site_search(request): """ 站内搜索 """ user = get_login_user(request) keyword = request.POST.get('keyword', '').strip() scope = request.POST.get('scope', 'all') logger.warning(f"搜索关键字:`{keyword}") keyword = split_cn_words(keyword, join=True) logger.info(f"转换后的关键字:`{keyword}") if scope not in ('all', 'feed', 'article'): return HttpResponseForbidden('Param Error') if not keyword: return HttpResponseNotFound("Empty Keyword") storage = FileStorage(settings.WHOOSH_IDX_DIR) rel_sites, rel_articles = None, None # 查找相关源 if scope in ('feed', 'all'): idx = storage.open_index(indexname="site", schema=whoosh_site_schema) qp = MultifieldParser(['cname', 'author', 'brief'], schema=whoosh_site_schema) query = qp.parse(keyword) sites = [] with idx.searcher() as s: results = s.search(query, limit=50) for ret in results: sites.append(ret['id']) rel_sites = Site.objects.filter(status='active', pk__in=sites).order_by('-star') elif scope == 'article': # 查找相关文章 idx = storage.open_index(indexname="article", schema=whoosh_article_schema) qp = MultifieldParser(['title', 'author', 'content'], schema=whoosh_article_schema) query = qp.parse(keyword) articles = [] with idx.searcher() as s: old_mask = TermRange("uindex", None, str(current_ts() - 7 * 86400 * 1000)) results = s.search(query, mask=old_mask, limit=50) for ret in results: articles.append(ret['uindex']) rel_articles = Article.objects.filter(is_recent=True, status='active', uindex__in=articles).iterator() # 用户订阅 user_sub_feeds = [] if user: user_sub_feeds = get_user_subscribe_feeds(user.oauth_id, user_level=user.level) context = dict() context['user'] = user context['user_sub_feeds'] = user_sub_feeds context['rel_sites'] = rel_sites context['rel_articles'] = rel_articles context['keyword'] = keyword if scope == 'all': return render(request, 'search/search.html', context=context) elif scope == 'feed': return render(request, 'search/search_feeds.html', context=context) elif scope == 'article': return render(request, 'search/search_articles.html', context=context)
def get_site_update_view(request): """ 获取更新的全局站点视图,游客 100 个,登陆用户 200 个站点 """ sub_feeds = json.loads(request.POST.get('sub_feeds') or '[]') unsub_feeds = json.loads(request.POST.get('unsub_feeds') or '[]') page_size = int(request.POST.get('page_size', 10)) page = int(request.POST.get('page', 1)) onlyunread = request.POST.get('onlyunread', 'no') == 'yes' user = get_login_user(request) if user is None: my_feeds = get_visitor_subscribe_feeds(tuple(sub_feeds), tuple(unsub_feeds)) else: my_feeds = get_user_subscribe_feeds(user.oauth_id, user_level=user.level) # 过滤有内容更新的 if user and onlyunread: my_feeds = get_user_unread_sites(user.oauth_id, my_feeds) my_feeds = sorted(my_feeds, key=lambda t: get_site_last_id(t), reverse=True) if my_feeds: # 分页处理 try: paginator_obj = Paginator(my_feeds, page_size) except: logger.warning( f"分页参数错误:`{page}`{page_size}`{sub_feeds}`{unsub_feeds}") return HttpResponseNotFound("Page Number Error") pg = paginator_obj.page(page) num_pages = paginator_obj.num_pages sites = Site.objects.filter(pk__in=pg.object_list, status='active').order_by('-star')[:50] for site in sites: recent_articles = get_recent_site_articles(site.pk) site.update_count = len(recent_articles) site.update_ids = json.dumps(list(recent_articles)) site.update_time = get_site_last_id(site.pk) if user: site.unread_count = get_user_unread_count( user.oauth_id, recent_articles) context = dict() context['pg'] = pg context['sites'] = sites context['num_pages'] = num_pages context['user'] = user return render(request, 'left/site_view.html', context=context) return HttpResponseNotFound("No Feeds Subscribed")