def get(self): action = self.request.path[7:] share_res = Share.find() if 'add' in action: do, share_id, suggestscore = action.split('!') share = Share.by_sid(share_id) share.suggestscore = float(suggestscore) share.save() for share in share_res: share.score = 0.001 * share.hitnum + share.likenum - \ share.dislikenum + 0.5 * share.commentnum - \ share.status + share.suggestscore + 0.5 * share.id share.save() # self.write_json({'objs': list(Share.find().sort('score', # DESCENDING))}) share_res = Share.find().sort('score', DESCENDING) display = '' display += '<p>score sugg hit like dis comment status title id</p>' for share in share_res: display += '<p>%s %s %s %s %s %s %s %s %s</p>' % ( share.score, share.suggestscore, share.hitnum, share.likenum, share.dislikenum, share.commentnum, share.status, share.title, share.id) self.write(display)
def get(self): user_id = self.current_user["user_id"] if user_id not in admin_ids: self.write_json() return share_num = Share.find().count() share_state1_num = Share.find({'status': {'$gte': 1}}).count() share_with_tag_num = share_num - Share.find({'tags': []}).count() share_state1_with_tag_num = share_state1_num - \ Share.find({'status': {'$gte': 1}, 'tags': []}).count() user_num = User.find().count() user_rss_num = User.find({'user_rss': {'$ne': ''}}).count() like_num = Like.find().count() # todo hit_num = Hit.find().count() # todo hit_num_in_5_day = Hit.find({'hittime': {'$gt': time.time()-86400*5}}).count() # 日活 self.res = { 'share_num': share_num, 'share_state1_num': share_state1_num, 'share_with_tag_num': share_with_tag_num, 'share_state1_with_tag_num': share_state1_with_tag_num, 'user_num': user_num, 'user_rss_num': user_rss_num, 'like_num': like_num, 'hit_num': hit_num, 'hit_num_in_5_day': hit_num_in_5_day, } self.write_json()
def get(self, action): action = self.request.path[7:] share_res = Share.find() if 'add' in action: do, share_id, suggestscore = action.split('!') share = Share.by_sid(share_id) share.suggestscore = float(suggestscore) share.save() for share in share_res: share.score = 0.001 * share.hitnum + share.likenum - \ share.dislikenum + 0.5 * share.commentnum - \ share.status + share.suggestscore + 0.5 * share.id share.save() # self.write_json({'objs': list(Share.find().sort('score', # DESCENDING))}) share_res = Share.find().sort('score', DESCENDING) display = '' display += '<p>score sugg hit like dis comment status title id</p>' for share in share_res: display += '<p>%s %s %s %s %s %s %s %s %s</p>' % ( share.score, share.suggestscore, share.hitnum, share.likenum, share.dislikenum, share.commentnum, share.status, share.title, share.id) self.write(display)
def get(self, node='home'): page = self.get_argument("page", 1) # share_res = Share.find({'status': 0}).sort( # 'score', DESCENDING).limit(11).skip((int(page) - 1) * 11) share_res = Share.find({'status': {'$gte': 1}}).sort( '_id', DESCENDING).limit(11).skip((int(page) - 1) * 11) pagesum = (share_res.count() + 10) / 11 shares = [] for share in share_res: if share.id in (48, 47): continue user = User.by_sid(share.user_id) share.name = user.user_name share.published = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(share.published)) share.domain = user.user_domain share.markdown = cutter( markdown2.markdown(share.markdown)) share.title = share.title.split('_')[0] shares.append(share) self.render( "node.html", shares=shares, pagesum=pagesum, page=page, node=node, )
def todo_get_suggest(share, current_user): posts = Share.find() suggest = [] for post in posts: post.score = 100 + post.id - post.user_id + post.commentnum * 3 post.score += post.likenum * 4 + post.hitnum * 0.01 post.score += randint(1, 999) * 0.001 common_tags = [i for i in post.tags.split( ' ') if i in share.tags.split(' ')] # list(set(b1) & set(b2)) post.score += len(common_tags) if post.sharetype == share.sharetype: post.score += 1 # todo if current_user: is_hitted = Hit.find( {'share_id': share.id}, {'user_id': int(current_user["user_id"])}, ).count() > 0 # else: # is_hitted = self.get_cookie(share.id) if is_hitted: post.score -= 50 suggest.append(post) suggest.sort(key=lambda obj: obj.get('score')) suggest = suggest[:5]
def todo_get_suggest(share, current_user): posts = Share.find() suggest = [] for post in posts: post.score = 100 + post.id - post.user_id + post.commentnum * 3 post.score += post.likenum * 4 + post.hitnum * 0.01 post.score += randint(1, 999) * 0.001 common_tags = [ i for i in post.tags.split(' ') if i in share.tags.split(' ') ] # list(set(b1) & set(b2)) post.score += len(common_tags) if post.sharetype == share.sharetype: post.score += 1 # todo if current_user: is_hitted = Hit.find( { 'share_id': share.id }, { 'user_id': int(current_user["user_id"]) }, ).count() > 0 # else: # is_hitted = self.get_cookie(share.id) if is_hitted: post.score -= 50 suggest.append(post) suggest.sort(key=lambda obj: obj.get('score')) suggest = suggest[:5]
def get(self, node): page = self.get_argument("page", 1) share_res = Share.find({'sharetype': node}).sort( '_id', DESCENDING).limit(10).skip((int(page) - 1) * 10) pagesum = (share_res.count() + 9) / 10 shares = [] for share in share_res: user = User.by_sid(share.user_id) share.name = user.user_name share.published = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(share.published)) share.domain = user.user_domain share.markdown = filter_tags( markdown2.markdown(share.markdown))[:100] share.gravatar = get_avatar(user.user_email, 16) shares.append(share) members = User.find().sort('_id', DESCENDING).limit(20) members_dict = [] for member in members: member.gravatar = get_avatar(member.user_email, 25) members_dict.append(member) node_about = options.node_about[node] self.render( "node.html", shares=shares, members=members_dict, pagesum=pagesum, page=page, node=node, node_about=node_about)
def get(self): user_id = self.current_user['user_id'] share_id = self.get_argument("delete", None) s = self.get_argument("s", None) if admin.is_admin(user_id): if share_id and not s: admin.delete_share(share_id) if share_id and s and admin.is_superadmin(user_id): admin.delete_share_by_s(share_id) if s: shares = Share.find({'status': {'$ne': 1}}) self.render('admin/super_share.html', shares=shares) else: shares = Share.find() self.render('admin/share.html', shares=shares) else: self.render('admin/join_admin.html')
def get(self): share_res = Share.find() shares = [] for share in share_res: user = User.by_sid(share.user_id) share.name = user.user_name share.published = datetime.datetime.fromtimestamp(share.published) share.updated = datetime.datetime.fromtimestamp(share.updated) share.domain = user.user_domain share.content = markdown2.markdown(share.markdown) shares.append(share) self.set_header("Content-Type", "application/atom+xml") self.render("feed.xml", shares=shares)
def get(self, node): page = self.get_argument("page", 1) share_res = Share.find({"sharetype": node}).sort("_id", DESCENDING).limit(11).skip((int(page) - 1) * 11) pagesum = (share_res.count() + 10) / 11 shares = [] for share in share_res: user = User.by_sid(share.user_id) share.name = user.user_name share.published = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(share.published)) share.domain = user.user_domain share.markdown = filter_tags(markdown2.markdown(share.markdown))[:400] share.gravatar = get_avatar(user.user_email, 16) shares.append(share) node_about = options.node_about[node] self.render("node.html", shares=shares, pagesum=pagesum, page=page, node=node, node_about=node_about)
def get_share_by_slug(slug): # 特殊id ramdom if slug == 'random': cond = {} cond['status'] = {'$gte': 1} # shares = Share.find(cond, {'_id': 0}) shares = Share.find(cond) share = random.choice(list(shares)) elif slug.isdigit(): share = Share.by_sid(slug) else: share = Share.by_slug(slug) if share: share.hitnum += 1 if isinstance(share.tags, str): share.tags = share.tags.split() share.save() share.pop('_id') return share
def get(self, node): page = self.get_argument("page", 1) share_res = Share.find({'sharetype': node}).sort( '_id', DESCENDING).limit(11).skip((int(page) - 1) * 11) pagesum = (share_res.count() + 10) / 11 shares = [] for share in share_res: user = User.by_sid(share.user_id) share.name = user.user_name share.published = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(share.published)) share.domain = user.user_domain share.markdown = cutter( markdown2.markdown(share.markdown)) shares.append(share) node_info = options.node_about[node] self.render( "node.html", shares=shares, pagesum=pagesum, page=page, node=node, node_info=node_info)
def get(self): user_id = self.current_user['user_id'] delete_share_id = self.get_argument("delete", None) status = self.get_argument("status", 0) status = int(status) s = self.get_argument("s", None) # super if admin.is_admin(user_id): if delete_share_id and not s: admin.delete_share(delete_share_id) if delete_share_id and s and admin.is_superadmin(user_id): admin.delete_share_by_s(delete_share_id) # shares = Share.find({'status': {'$ne': 1}}).sort('_id', -1) if status == 100: status = {'$gt': 3} shares = Share.find({'status': status}).sort('_id', -1) if s: self.render('admin/super_share.html', shares=shares) else: self.render('admin/share.html', shares=shares) else: self.render('admin/join_admin.html')
def get(self, node='home'): page = self.get_argument("page", 1) share_res = Share.find({ 'status': 0 }).sort('score', DESCENDING).limit(11).skip((int(page) - 1) * 11) pagesum = (share_res.count() + 10) / 11 shares = [] for share in share_res: user = User.by_sid(share.user_id) share.name = user.user_name share.published = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(share.published)) share.domain = user.user_domain share.markdown = cutter(markdown2.markdown(share.markdown)) shares.append(share) self.render( "node.html", shares=shares, pagesum=pagesum, page=page, node=node, )
def check(): # share_num = Share.find().count() # share_with_tag_num = share_num - Share.find({'tags': []}).count() for i in adb.Share_Col.find().sort('_id', 1): if i['status'] < 1: continue # if i['tags'] == []: if i['tags']: continue # print(i['id'], i['title']) print(i['user_id']) # adb.Share_Col.update().sort('_id', 1): tags = get_tags(i) adb.Share_Col.update({'_id': i['_id']}, {'$set': {'tags': tags}}) for tag in tags: doc = {'name': tag, 'share_ids': i['id']} Tag.new(doc) share_without_tag_num = Share.find({'tags': []}).count() print(share_without_tag_num)
def get(self, node='home'): page = self.get_argument("page", 1) share_res = Share.find({'status': 0}).sort( 'score', DESCENDING).limit(11).skip((int(page) - 1) * 11) pagesum = (share_res.count() + 10) / 11 shares = [] for share in share_res: user = User.by_sid(share.user_id) share.name = user.user_name share.published = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(share.published)) share.domain = user.user_domain share.markdown = filter_tags( markdown2.markdown(share.markdown))[:400] share.gravatar = get_avatar(user.user_email, 16) shares.append(share) self.render( "node.html", shares=shares, pagesum=pagesum, page=page, node=node, )
def get(self, slug): share = None if slug.isdigit(): share = Share.by_sid(slug) else: share = Share.by_slug(slug) if share: share.hitnum += 1 share.save() share.markdown = markdown2.markdown(share.markdown) user = User.by_sid(share.user_id) share.user_name = user.user_name share.user_domain = user.user_domain tags = '' if share.tags: tags += 'tags:' for i in share.tags.split(' '): tags += '<a href="/tag/%s">%s</a> ' % (i, i) share.tags = tags user_id = int( self.current_user["user_id"]) if self.current_user else None like = Like.find_one( {'share_id': share.id, 'user_id': user_id}) share.is_liking = bool(like.likenum % 2) if like else None share.is_disliking = bool(like.dislikenum % 2) if like else None comments = [] comment_res = Comment.find({'share_id': share.id}) for comment in comment_res: user = User.by_sid(comment.user_id) comment.name = user.user_name comment.domain = user.user_domain comment.gravatar = get_avatar(user.user_email, 50) comments.append(comment) if user_id: hit = Hit.find( {'share_id': share.id}, {'user_id': int(self.current_user["user_id"])}, ) if hit.count() == 0: hit = Hit hit['share_id'] = share.id hit['user_id'] = int(self.current_user["user_id"]) hit.save() else: if not self.get_cookie(share.id): self.set_cookie(str(share.id), "1") posts = Share.find() suggest = [] for post in posts: post.score = 100 + post.id - post.user_id + post.commentnum * 3 post.score += post.likenum * 4 + post.hitnum * 0.01 post.score += randint(1, 999) * 0.001 common_tags = [i for i in post.tags.split( ' ') if i in share.tags.split(' ')] # list(set(b1) & set(b2)) post.score += len(common_tags) if post.sharetype == share.sharetype: post.score += 5 if self.current_user: is_hitted = Hit.find( {'share_id': share._id}, {'user_id': int(self.current_user["user_id"])}, ).count() > 0 else: is_hitted = self.get_cookie(share.id) if is_hitted: post.score -= 50 suggest.append(post) suggest.sort(key=lambda obj: obj.get('score')) suggest = suggest[:5] self.render( "sharee.html", share=share, comments=comments, suggest=suggest) else: old = 'http://blog.anwensf.com/' for i in options.old_links: if slug in i: self.redirect('%s%s' % (old, i), permanent=True) break return self.redirect("/404")
def get(self): shares = Share.find() self.set_header("Content-Type", "application/atom+xml") self.render("feed.xml", shares=shares)
def get(self): token = self.request.headers.get('Authorization', '') page = self.get_argument("page", 1) per_page = self.get_argument("per_page", 10) tag = self.get_argument('tag', '') filter_type = self.get_argument("filter_type", '') # my_tags my_likes last_suggested = self.get_argument("last_suggested", 0) read_status = self.get_argument('read_status', 1) meta_info = self.get_argument("meta_info", 1) read_status = int(read_status) per_page = int(per_page) page = int(page) if not last_suggested: last_suggested = 0 last_suggested = float(last_suggested) / 1000 + 1 user = self.get_user_dict(token) cond = {} tags = None if user and filter_type == 'my_tags': d_user = User.by_sid(user['user_id']) if d_user: tags = d_user['user_tags'] # 按照tag来过滤 if tags: cond['tags'] = {"$in": tags} elif tag: cond['tags'] = tag # 不同的用户显示不同级别的推荐 # if user and user['user_id'] in wx_admin_ids: if user and user['user_id'] == 1: cond['status'] = {'$gte': 1} else: cond['status'] = {'$gte': 1} # 已读列表 20ms l_hitted_share_id = [] if user and read_status: hits = Hit.find({'user_id': user['user_id']}, { '_id': 0, 'share_id': 1 }) l_hitted_share_id = [i['share_id'] for i in hits] filter_d = {} filter_d['_id'] = 0 # 白名单里的属性才展示 filter_d['id'] = 1 filter_d['images'] = 1 filter_d['title'] = 1 filter_d['user_id'] = 1 filter_d['tags'] = 1 filter_d['published'] = 1 filter_d['post_img'] = 1 shares = Share.find(cond, filter_d).sort('suggested', -1).limit(per_page).skip( (page - 1) * per_page) # 过滤 new_shares = [] for share in shares: user = User.by_sid(share.user_id) # share = dict(share) share['type'] = 1 # if share.post_img: # if hasattr(share, 'post_img'): if share.get('post_img'): share['type'] = 2 share['images'] = [ IMG_BASE + share['post_img'].replace('_1200.jpg', '_260.jpg') ] share.pop('post_img') else: share['images'] = [] share['author'] = user.user_name share['published'] = int(share['published'] * 1000) # share.published if read_status: share['read'] = bool(share['id'] in l_hitted_share_id) if 0: # 不展示作者头像 if user.user_email.endswith('@wechat'): share['user_img'] = options.site_url + \ get_avatar_by_wechat(user._id) if user.user_email.endswith('@anwensf.com'): share['user_img'] = options.site_url + \ get_avatar_by_feed(user.id) else: share['user_img'] = options.site_url + \ get_avatar(user.user_email, 100) new_shares.append(share) if meta_info: meta = {} if last_suggested: cond_update = copy.deepcopy(cond) cond_update['suggested'] = {'$gt': last_suggested} number_of_update = Share.find(cond_update, { '_id': 0, 'id': 1 }).count() meta['number_of_update'] = number_of_update if tag: # 子标签的文章数量 d_tags = get_tags() d_tags_parents = get_tags_parents() # get_tags_parent if tag in d_tags: sub_tags = [] for name in d_tags[tag]: info = {} info['name'] = name # num = Share.find({'tags': name}, {'_id': 0}).count() # num_recent = Share.find( # {'tags': name, 'published': {'$gt': time.time() - 86400 * 30}}, {'_id': 0}).count() # info['num'] = num # info['num_recent'] = num_recent sub_tags.append(info) meta['sub_tags'] = sub_tags meta['parent_tags'] = [] if tag in d_tags_parents: # meta['parent_tags'].append(d_tags_parent[tag]) meta['parent_tags'] = d_tags_parents[tag] # hypernym number = Share.find(cond, {'_id': 0}).count() # 'id': 1 meta['number'] = number # if filter_type == 'my_tags': # meta['tags'] = tags self.res = {'articles': new_shares} self.meta = meta return self.write_json()
def get(self, node='home'): page = self.get_argument("page", 1) per_page = self.get_argument("per_page", 11) status = self.get_argument("status", 'gte_1') per_page = int(per_page) # 控制显示级别 assert '_' in status st_type, st_num = status.split('_') status = {'${}'.format(st_type): int(st_num)} # {'$gte': 1} # 当node不是home时,不控制显示级别 conds = {'status': status} if node not in 'home'.split(): conds['sharetype'] = node # if node not in 'rss'.split(): conds['status'] = {'$gte': 0} # sort type # 'score', DESCENDING share_res = Share.find(conds).sort('_id', DESCENDING).limit(per_page).skip( (int(page) - 1) * per_page) pagesum = int((share_res.count() + per_page - 1) / per_page) shares = [] if per_page >= 20: # 另外一种显示UI for share in share_res: user = User.by_sid(share.user_id) share.name = user.user_name share.published = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(share.published)) share.domain = user.user_domain md = share.markdown md = md.replace('>\n', '> ') share.markdown = cutter(markdown2.markdown(md)) share.title = share.title.split('_')[0] shares.append(share) del user tpl_name = 'node_alot' else: for share in share_res: # if share.id in (48, 47): # 临时屏蔽 # continue user = User.by_sid(share.user_id) # 获取用户信息,需要多次查表!!! share.name = user.user_name share.domain = user.user_domain share.published = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(share.published)) md = share.markdown md = md.replace('>\n', '> ') share.markdown = cutter(markdown2.markdown(md)) share.title = share.title.split('_')[0] shares.append(share) del user del share del md tpl_name = 'node' self.render( "{}.html".format(tpl_name), shares=shares, pagesum=pagesum, page=page, per_page=per_page, node=node, ) del shares, share_res del tpl_name, pagesum, page, per_page, node, status return
def get(self, slug): share = get_share_by_slug(slug) if not share: return self.write_error(404) # for web user = User.by_sid(share.user_id) share.author_name = user.user_name share.author_domain = user.user_domain share.tags = format_tags(share) share.title = share.title.split('_')[0] if share.markdown: md = share.markdown md = md.replace('>\n', '> ') share.content = markdown2.markdown(md) # 对于链接分享类,增加原文预览 if share.sharetype == 'rss': assert share.link if share.link: if share.sharetype == 'rss': pass else: # Webcache should add index doc = Webcache.find_one({'url': share.link}, {'_id': 0}) # 此文章须经作者同意 转载 禁止转载 # 禁止任何形式的转载 # ('禁止' not in doc['markdown'] and '转载' not in doc['markdown']): if doc and doc['markdown'] and ('禁止转载' not in doc['markdown'] or '禁止任何形式的转载' not in doc['markdown']): doc['markdown'] = doc['markdown'].replace('本文授权转自', '') md = share['markdown'] md += '\n\n--预览(快照)(以后会默认折叠)--\n\n' + doc['markdown'] md += '\n\n[阅读原文]({})'.format(doc['url']) parsed_uri = urlparse(share.link) domain = '{uri.scheme}://{uri.netloc}/'.format( uri=parsed_uri) md = md.replace('![image](/', '![image]({}/'.format(domain)) md = md.replace('\n* \n', '\n\n') md = md.replace('\n*\n', '\n\n') md = md.replace('>\n', '> ') # md = md.replace('>\n\n', '') # ??? while '\n\n\n' in md: md = md.replace('\n\n\n', '\n\n') share.content = markdown2.markdown(md) user_id = self.current_user["user_id"] if self.current_user else None # user_id like = Like.find_one({ 'entity_id': share.id, 'user_id': user_id, 'entity_type': 'share' }) collect = Collect.find_one({ 'entity_id': share.id, 'user_id': user_id, 'entity_type': 'share' }) share.is_liking = bool(like.likenum) if like else False share.is_disliking = bool(like.dislikenum) if like else False share.is_collecting = bool(collect.collectnum) if collect else False # logger.info('user_id: {}'.format(user_id)) # logger.info('share.is_liking: {}'.format(share.is_liking)) suggest = [] # is_liking = db.getCollection('Like_Col').find({'entity_id':1,'entity_type':'share','user_id':1}) if user_id: likes = Like.find({'user_id': user_id, 'entity_type': 'comment'}) likes = list(likes) like_commentids = [ alike.entity_id for alike in likes if alike.likenum > 0 ] dislike_commentids = [ alike.entity_id for alike in likes if alike.dislikenum > 0 ] else: like_commentids = [] dislike_commentids = [] comments = get_comments(share, like_commentids, dislike_commentids) share.viewpoints = Viewpoint.find({'share_id': share.id}) # 未登录用户记录访问cookie if not user_id and not self.get_cookie(share.id): self.set_cookie(str(share.id), "1") self.render("share_view.html", share=share, comments=comments, suggest=suggest) add_hit_stat(user_id, share) return # 文章推荐 suggest = [] posts = Share.find() for post in posts: post.score = 100 + post.id - post.user_id # post.score += post.likenum * 4 + post.hitnum * 0.01 + post.commentnum * 3 post.score += randint(1, 999) * 0.001 common_tags = [ i for i in post.tags.split(' ') if i in share.tags.split(' ') ] # list(set(b1) & set(b2)) post.score += len(common_tags) if post.sharetype == share.sharetype: post.score += 1 # todo if self.current_user: is_hitted = Hit.find( { 'share_id': share.id }, { 'user_id': int(self.current_user["user_id"]) }, ).count() > 0 else: is_hitted = self.get_cookie(share.id) if is_hitted: post.score -= 50 suggest.append(post) suggest.sort(key=lambda obj: obj.get('score')) suggest = suggest[:5]
def get(self, name): user = User.find_one({'user_domain': name}) user.user_say = markdown2.markdown(user.user_say) user.user_jointime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(user.user_jointime)) likenum = User.find({'user_id': user._id}).count() user.gravatar = get_avatar(user.user_email, 100) shares = Share.find({ 'user_id': user.id }, { 'markdown': 0, 'summary': 0 }).sort('_id', -1).limit(100) likes = set() dislikes = set() collects = set() if self.current_user: user_id = self.current_user["user_id"] _likes = Like.find({ 'entity_type': 'share', 'user_id': user_id }, { '_id': 0, 'entity_id': 1, 'likenum': 1, 'dislikenum': 1 }) _likes = list(_likes) print(_likes[0]) likes = set(i.entity_id for i in _likes if i.likenum > 0) dislikes = set(i.entity_id for i in _likes if i.dislikenum > 0) collects = Collect.find( { 'entity_type': 'share', 'user_id': user_id }, { '_id': 0, 'entity_id': 1, 'collectnum': 1 }) collects = set(i.entity_id for i in collects if i.collectnum > 0) l_share = [] print(shares[0]) for share in shares: # d_share = dict(share) # d_share = share # if self.current_user: # user_id = self.current_user["user_id"] # like = Like.find_one( # {'entity_id': share.id, 'entity_type': 'share', 'user_id': user_id}) # collect = Collect.find_one( # {'entity_id': share.id, 'entity_type': 'share', 'user_id': user_id}) # d_share.is_liking = bool(like.likenum) if like else False # d_share.is_disliking = bool(like.dislikenum) if like else False # d_share.is_collecting = bool(collect.collectnum) if collect else False # print(d_share.id, len(likes)) share.is_liking = True if likes and share.id in likes else False share.is_disliking = True if dislikes and share.id in dislikes else False share.is_collecting = True if collects and share.id in collects else False l_share.append(share) self.render('userhome.html', user=user, shares=l_share, is_login=bool(self.current_user), likenum=likenum)
def get(self): page = self.get_argument("page", 1) per_page = self.get_argument("per_page", 10) filter_type = self.get_argument("filter_type", '') # my_tags tag = self.get_argument('tag', '') meta_info = self.get_argument("meta_info", 1) last_suggested = self.get_argument("last_suggested", 0) read_status = self.get_argument('read_status', 1) token = self.request.headers.get('Authorization', '') # has_vote = self.get_argument("has_vote", None) # vote_open = self.get_argument("vote_open", None) read_status = int(read_status) per_page = int(per_page) page = int(page) last_suggested = float(last_suggested) / 1000 + 1 user = self.get_user_dict(token) cond = {} # 按照tags来过滤 tags = None if user and filter_type == 'my_tags': d_user = User.by_sid(user['user_id']) if d_user: tags = d_user['user_tags'] # 按照tag来过滤 if tags: cond['tags'] = {"$in": tags} elif tag: cond['tags'] = tag # 不同的用户显示不同级别的推荐 if user and user['user_id'] in wx_admin_ids: cond['status'] = {'$gte': 1} else: cond['status'] = {'$gte': 1} l_hitted_share_id = [] if user and read_status: hits = Hit.find({'user_id': user['user_id']}) l_hitted_share_id = [i['share_id'] for i in hits] # if vote_open: # if not vote_open.isdigit(): # return self.write_error(422) # cond['vote_open'] = int(vote_open) # if has_vote: # cond['vote_title'] = {'$ne': ''} number = Share.find(cond, {'_id': 0}).count() # sort: _id if last_suggested: cond_update = copy.deepcopy(cond) cond_update['suggested'] = {'$gt': last_suggested} number_of_update = Share.find(cond_update, {'_id': 0}).sort( 'suggested', -1).count() logger.info('number_of_update 1: {}'.format(number_of_update)) num_shares = Share.find(cond, {'_id': 0, 'id': 1}).count() shares = Share.find(cond, {'_id': 0}).sort( 'suggested', -1).limit(per_page).skip((page - 1) * per_page) # shares = [fix_share(share) for share in shares] new_shares = [] for share in shares: share = fix_share(share) user = User.by_sid(share.user_id) share = dict(share) share['user_name'] = user.user_name share['markdown'] = '' if read_status: share['read'] = bool(share['id'] in l_hitted_share_id) soup = BeautifulSoup(share['content'], "lxml") # kill all script and style elements for script in soup(["script", "style"]): script.extract() # rip it out # get text text = soup.get_text() # break into lines and remove leading and trailing space on each lines = (line.strip() for line in text.splitlines()) # break multi-headlines into a line each chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) # drop blank lines text = '\n'.join(chunk for chunk in chunks if chunk) # print(text) share['summary'] = text[:150] share['content'] = '' if user.user_email.endswith('@wechat'): share['user_img'] = options.site_url + \ get_avatar_by_wechat(user._id) if user.user_email.endswith('@anwensf.com'): share['user_img'] = options.site_url + \ get_avatar_by_feed(user.id) else: share['user_img'] = options.site_url + \ get_avatar(user.user_email, 100) new_shares.append(share) # if tag: # shares = [share for share in shares if tag in share['tags']] meta = {} meta['page'] = page meta['articleNumber'] = num_shares if meta_info and last_suggested: meta['number_of_update'] = number_of_update if meta_info and tag: d_tags = get_tags() # d_tags_parent = get_tags_parent() d_tags_parents = get_tags_parents() if tag in d_tags: sub_tags = [] for name in d_tags[tag]: num = Share.find({'tags': name}, {'_id': 0}).count() num_recent = Share.find( {'tags': name, 'published': {'$gt': time.time() - 86400 * 30}}, {'_id': 0}).count() info = {} info['name'] = name info['num'] = num info['num_recent'] = num_recent sub_tags.append(info) meta['sub_tags'] = sub_tags meta['parent_tags'] = [] if tag in d_tags_parents: # hypernym # meta['parent_tags'].append(d_tags_parent[tag]) meta['parent_tags'] = d_tags_parents[tag] logger.info('last_suggested time: {}'.format(last_suggested)) if new_shares: logger.info('new_shares[0] time: {}'.format(new_shares[0]['title'])) logger.info('new_shares[0] published time: {}'.format( new_shares[0]['published'])) logger.info('new_shares[0] suggested time: {}'.format( new_shares[0]['suggested'])) self.res = {'articles': list(new_shares)} self.meta = meta # number=len(self.res) # number=number return self.write_json()
def get(self): page = self.get_argument("page", 1) per_page = self.get_argument("per_page", 10) meta_info = self.get_argument("meta_info", None) my_tags = self.get_argument("my_tags", None) tag = self.get_argument('tag', '') per_page = int(per_page) page = int(page) user = None tags = None token = self.request.headers.get('Authorization', '') if token: key, token = token.split() if key == 'token' and token: user_json = self.get_secure_cookie('user', token) if user_json: user = json_decode(user_json) else: user_json = self.get_secure_cookie("user") if user_json: user = json_decode(user_json) print(user) print(my_tags) if user and my_tags: d_user = User.by_sid(user['user_id']) print(d_user, 1111) if d_user: print(d_user['user_tags']) tags = d_user['user_tags'] vote_open = self.get_argument("vote_open", None) has_vote = self.get_argument("has_vote", None) cond = {} if tags: print('1111111111111', tags) cond['tags'] = {"$in": tags} elif tag: cond['tags'] = tag if user: logger.info('user_id: {}'.format(user['user_id'])) if user and user['user_id'] in wx_admin_ids: cond['status'] = {'$gte': 1} else: cond['status'] = {'$gte': 1} if vote_open: if not vote_open.isdigit(): return self.write_error(422) cond['vote_open'] = int(vote_open) if has_vote: cond['vote_title'] = {'$ne': ''} number = Share.find(cond, {'_id': 0}).count() shares = Share.find(cond, {'_id': 0}).sort( '_id', -1).limit(per_page).skip((page - 1) * per_page) shares = [fix_share(share) for share in shares] # if tag: # shares = [share for share in shares if tag in share['tags']] meta = {} if meta_info and tag: d_tags = get_tags() # d_tags_parent = get_tags_parent() d_tags_parents = get_tags_parents() if tag in d_tags: sub_tags = [] print(d_tags[tag]) for name in d_tags[tag]: num = Share.find({'tags': name}, {'_id': 0}).count() num_recent = Share.find( {'tags': name, 'published': {'$gt': time.time()-86400*30}}, {'_id': 0}).count() info = {} info['name'] = name info['num'] = num info['num_recent'] = num_recent sub_tags.append(info) meta['sub_tags'] = sub_tags meta['parent_tags'] = [] if tag in d_tags_parents: # hypernym # meta['parent_tags'].append(d_tags_parent[tag]) meta['parent_tags'] = d_tags_parents[tag] self.res = list(shares) self.meta = meta print(meta) # number=len(self.res) return self.write_json(number=number)
def get(self, slug): share = None if slug.isdigit(): share = Share.by_sid(slug) else: share = Share.by_slug(slug) if share: share.hitnum += 1 share.save() if share.markdown: share.content = markdown2.markdown(share.markdown) user = User.by_sid(share.user_id) share.user_name = user.user_name share.user_domain = user.user_domain tags = '' if share.tags: tags += 'tags:' for i in share.tags.split(' '): tags += '<a href="/tag/%s">%s</a> ' % (i, i) share.tags = tags user_id = int( self.current_user["user_id"]) if self.current_user else None like = Like.find_one( {'share_id': share.id, 'user_id': user_id}) share.is_liking = bool(like.likenum % 2) if like else None share.is_disliking = bool(like.dislikenum % 2) if like else None comments = [] comment_res = Comment.find({'share_id': share.id}) for comment in comment_res: user = User.by_sid(comment.user_id) comment.name = user.user_name comment.domain = user.user_domain comment.gravatar = get_avatar(user.user_email, 50) comments.append(comment) if user_id: hit = Hit.find( {'share_id': share.id}, {'user_id': int(self.current_user["user_id"])}, ) if hit.count() == 0: hit = Hit hit['share_id'] = share.id hit['user_id'] = int(self.current_user["user_id"]) hit.save() else: if not self.get_cookie(share.id): self.set_cookie(str(share.id), "1") posts = Share.find() suggest = [] for post in posts: post.score = 100 + post.id - post.user_id + post.commentnum * 3 post.score += post.likenum * 4 + post.hitnum * 0.01 post.score += randint(1, 999) * 0.001 common_tags = [i for i in post.tags.split( ' ') if i in share.tags.split(' ')] # list(set(b1) & set(b2)) post.score += len(common_tags) if post.sharetype == share.sharetype: post.score += 5 if self.current_user: is_hitted = Hit.find( {'share_id': share._id}, {'user_id': int(self.current_user["user_id"])}, ).count() > 0 else: is_hitted = self.get_cookie(share.id) if is_hitted: post.score -= 50 suggest.append(post) suggest.sort(key=lambda obj: obj.get('score')) suggest = suggest[:5] self.render( "sharee.html", share=share, comments=comments, suggest=suggest) else: old = 'http://blog.anwensf.com/' for i in options.old_links: if slug in i: self.redirect('%s%s' % (old, i), permanent=True) break return self.redirect("/404")
def add_from_file(rss_url, rss_hostname, rss_name): # rss_file = 'content/gen/qdaily_2019-04-20 15:07:12.xml' n = Share.find().count() print(n) print(rss_name) feeds = feedparser.parse(rss_url) for post in feeds.entries[::-1]: # authors # itunes_episodetype full # itunes_episode # itunes_explicit # itunes_title # itunes_duration # published link subtitle id image title tags # links title_detail author_detail summary_detail guidislink published_parsed summary content author # subtitle_detail # title title_detail # published published_parsed # summary summary_detail # author # link links guidislink # authors # 'itunes_title', 'itunes_episode' # 'author_detail', 'id', 'itunes_duration' # <itunes:duration>6957</itunes:duration> # TODO # 修正内容 目前暂时不支持 # <enclosure type="audio/mpeg" url="https://kernelpanic.fm/55/audio.mp3"/> # <media:content url="https://cdn.flipboard.com/telegraph.co.uk/1356d637c7438f6fcffda0d5de177b6058904de6/original.jpg" medium="image" type="image/jpeg" width="480" height="300" /> # media_content # print(post.keys()) if hasattr(post, 'summary'): summary = post.summary assert post.summary == post.description else: summary = '' # 部分rss没有content if hasattr(post, 'content'): content = post.content[0]['value'] else: if hasattr(post, 'summary'): content = post.summary else: print('no content', rss_url, rss_hostname, rss_name) continue if content.startswith('<![CDATA[') and content.endswith(']]>'): # m = rgx.search(content) # content = m.group(1) content = content[9:-3] if summary.startswith('<![CDATA[') and summary.endswith(']]>'): summary = summary[9:-3] if hasattr(post, 'published'): if 'GMT' == post.published[-3:]: published = datetime.strptime(post.published, "%a, %d %b %Y %H:%M:%S GMT") elif ',' in post.published: if post.published.endswith('2019'): pass # May 19, 2019 published = datetime.strptime(post.published, "%b %d, %Y") else: published = datetime.strptime(post.published, "%a, %d %b %Y %H:%M:%S %z") # Thu, 18 Apr 2019 19:32:58 +0800 elif '/' in post.published: published = datetime.strptime(post.published, "%Y/%m/%d %H:%M:%S %z") elif 'Z' == post.published[-1]: post.published = post.published.replace('.000Z', 'Z') published = datetime.strptime(post.published, "%Y-%m-%dT%H:%M:%SZ") # <pubDate>15 Jun 2019 06:30:00 EST</pubDate> elif 'EST' in post.published: post.published = post.published[:-4] published = datetime.strptime(post.published, "%d %b %Y %H:%M:%S") elif 'T' in post.published: # 2019-05-24T15:05:50-04:00 post.published = post.published[:-6] # tz = post.published[-6:].replace(':', '') published = datetime.strptime(post.published, "%Y-%m-%dT%H:%M:%S") # published = published.replace(tzinfo=FixedOffset(tz)) elif post.published.count(' ') == 1: published = datetime.strptime(post.published, "%Y-%m-%d %H:%M:%S") else: published = datetime.strptime(post.published, "%Y-%m-%d %H:%M:%S %z") published = published.timestamp() else: if random.random() > 0.9: print('no published time') published = time.time() title = post.title link = post.link author = '' if hasattr(post, 'source'): source_title = post.source.title # print(source_title) print(rss_name, source_title) if rss_name == '虎嗅': pass author = source_title else: assert rss_name in source_title # assert rss_name == source_title source = rss_name if hasattr(post, 'category_title'): category = post.category_title assert ' ' not in category assert ',' not in category tags = [category] elif hasattr(post, 'tags'): tags = post.tags # print(tags) # assert len(tags) == 1 # tags = tags[0]['term'] tags = ','.join([t['term'] for t in tags]) category = '' if '-' in tags: print(tags) tags = tags.replace(' ', '-') tags = tags.split(',') for tag in tags: if ' ' in tag: print(tag) else: # print('no category') category = '' tags = [] sharetype = 'rss' try: markdown = html2text.html2text(content) except Exception as e: print('error in html-to-markdown: {}'.format(e)) continue assert link res = { 'title': title, 'link': link, 'source': source, 'category': category, 'content': content, 'summary': summary, 'sharetype': sharetype, 'tags': tags, 'markdown': markdown, 'published': published, 'updated': time.time(), } # print(post.keys()) if hasattr(post, 'author'): # TODO print('author: ', post.author) res['author'] = post.author else: res['author'] = author # 去重方案 # - 标题重复 found = Share.find({'title': title}) if found.count(): if found.count() > 1: print('!! repeated article title: {}'.format(title)) elif found.count() == 1: # continue share = Share.by_sid(found[0].id) if share and summary and not share.link and link: print(res['link']) print('title {} updated'.format(title)) share.update(res) share.save() else: print('title {} adding'.format(title)) email = '{}@anwensf.com'.format(rss_hostname) auser = User.by_email(email) assert auser share = Share user_id = auser.id res['user_id'] = user_id # just use 1 as default # continue assert res['link'] share = share.new(res) user = User.by_sid(user_id) user.user_leaf += 10 user.save() for i in tags: doc = {'name': i, 'share_ids': share.id} Tag.new(doc)