def blogs_from_tag(self, tagname, vfrom=0, vlim=20): from article.blog import Blog if not tagname: return [], 0 if tagname == 'default': toview = self.blogids else: tag_blogs = self.lib.tag_lib.sub_list(tagname) toview = tag_blogs.load_all() if toview is None: toview = [] toview = sorted(toview, key=itemgetter(1), reverse=True) len_toview = len(toview) if vfrom >= len_toview: return [], len_toview if vfrom + vlim > len_toview: vlim = len_toview - vfrom if tagname == 'default': toview = Blog.by_ids(toview, auto_cache=False) else: toview = [tuple(each) for each in toview[vfrom:(vfrom + vlim)]] tmp = set(toview) toview = Blog.by_ids([each[0] for each in toview], auto_cache=False) toview.sort() tmp2 = set((each._id, each.release_time) for each in toview) tag_blogs.pull(*tuple(tmp - tmp2)) #try remove blog not existed for each in toview: each.set_cache('author', self) each.set_cache('env', self) return toview, len(toview)
def blogs_from_tag(self, tagname, vfrom=0, vlim=20): from article.blog import Blog if not tagname: return [], 0 if tagname == 'default': toview = self.blogids else: tag_blogs = self.lib.tag_lib.sub_list(tagname) toview = tag_blogs.load_all() if toview is None: toview = [] toview = sorted(toview, key=itemgetter(1), reverse=True) len_toview = len(toview) if vfrom >= len_toview: return [], len_toview if vfrom + vlim > len_toview: vlim = len_toview - vfrom if tagname == 'default': toview = Blog.by_ids(toview, auto_cache=False) else: toview = [tuple(each) for each in toview[vfrom:(vfrom+vlim)]] tmp = set(toview) toview = Blog.by_ids([each[0] for each in toview], auto_cache=False) toview.sort() tmp2 = set((each._id, each.release_time) for each in toview) tag_blogs.pull(*tuple(tmp - tmp2)) #try remove blog not existed for each in toview: each.set_cache('author', self) each.set_cache('env', self) return toview, len(toview)
def fun_get_comment_by_position(article_id=None, pos=0, article_type="blog", page_cap=10, load_one='no', load_before='no', before_pos=0): ''' get comment by position, return [1|0, 'info', first_post, last_pos, len] ''' ''' info contains a dict{ 'comment':{'0': comment(0) }, 'ref_commet':{'1':comment(0) } } ''' try: if article_type == "blog": AF_Object = Blog(_id=article_id) elif article_type == "group-topic": AF_Object = Topic(_id=article_id) elif article_type == 'group-notice': AF_Object = Bulletin(_id=article_id) elif article_type == 'group-feedback': AF_Object = Feedback(_id=article_id) elif article_type == "group-doc": AF_Object = Blog(_id=article_id) else: return [1, '不支持当前类型!'] except Exception, e: logging.error(traceback.format_exc()) logging.error('%s not exist, id %s' % (article_type, article_id)) return [1, '文章不存在!']
def get(self): handler_page = RSSRecentBlogPage(self) blogs = sorted(Blog.by_ids(recent_blogs.get_slice(-10))) handler_page['items'] = [each.rss_info for each in blogs] handler_page.init_page() handler_page.render() return #0
def post(self): user = self.current_user article_id = is_value(self.get_argument("article_id", None)) article_type = is_value(self.get_argument("article_type", None)) result = {'kind': 1, 'info': ''} if article_id is None or article_type is None or (article_type not in [ 'blog', 'comment', 'feedback', 'topic' ]): result['info'] = '参数错误!' self.write(json_encode(result)) return if article_type == "blog": try: obj = Blog(_id=article_id) if obj.author_id != user._id: result['info'] = '无权删除!' return self.write(json_encode(result)) except Exception, e: logging.error(traceback.format_exc()) logging.error('Blog not exist, id %s' % article_id) else: user.drafts_lib.delete_obj(article_id) result['kind'] = 0 return self.write(json_encode(result))
def get(self): from global_info import recent_blogs from article.blog import Blog page = RegisterPage(self) page["blog_list"] = [each.obj_info_view_by() for each in Blog.by_ids(recent_blogs.get_slice(-20))] return page.render()
def own_data(self): ans = list() ans += [each for each in list_generator( [(ek, ev[0]) for ek, ev in self.drafts_lib.items()]) if each is not None] ans += Blog.by_ids(self.blog_list.load_all()) return ans
def fun_get_doc_list(group=None, page=1, page_cap=10, tag='default'): if tag == "default": tmp_list = group.blog_list.load_all() else: tmp_list = group.tag_lib[tag] if tmp_list is None: return ([], 0, page) sum_count = len(tmp_list) #print 'total num', sum_count sum_page = (sum_count % page_cap and sum_count / page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0, page) tmp_list.reverse() min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) #print tmp_list #print tmp_list[min_index:max_index] doc_list_instance = Blog.get_instances('_id', tmp_list[min_index:max_index]) res_list = [] for item in doc_list_instance: res_list.append({ 'doc_id': item._id, 'doc_title': item.name, 'doc_time': item.release_time }) res_list.reverse() return (res_list, sum_count, page)
def get(self): from global_info import recent_blogs from article.blog import Blog page = RegisterPage(self) page['blog_list'] = [each.obj_info_view_by() for each in Blog.by_ids(recent_blogs.get_slice(-20))] return page.render()
def fun_get_doc_list(group=None, page=1, page_cap=10, tag='default'): if tag == "default": tmp_list = group.blog_list.load_all() else: tmp_list = group.tag_lib[tag] if tmp_list is None: return ([], 0, page) sum_count = len(tmp_list) #print 'total num', sum_count sum_page = (sum_count % page_cap and sum_count /page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0, page) tmp_list.reverse() min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) #print tmp_list #print tmp_list[min_index:max_index] doc_list_instance = Blog.get_instances('_id', tmp_list[min_index:max_index]) res_list = [] for item in doc_list_instance: res_list.append({'doc_id':item._id, 'doc_title':item.name, 'doc_time':item.release_time}) res_list.reverse() return (res_list, sum_count, page)
def fun_get_blog_list_by_tag(user, tag='default', page=1, page_cap=10): page = int(page) if tag == "default": tmp_list = user.blog_list.load_all() else: tmp_list = user.tag_lib[tag] if tmp_list is None: return ([], 0) sum_count = len(tmp_list) #print 'total num', sum_count sum_page = (sum_count % page_cap and sum_count /page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0) tmp_list.reverse() min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) #print tmp_list #print tmp_list[min_index:max_index] blog_con = Blog.get_instances('_id', tmp_list[min_index:max_index]) tmp_con = [] for iii in blog_con: tmp_con.append({'id':iii._id, 'title':iii.name, 'time':iii.release_time, 'summary':iii.abstract, 'view_body':strip_tags(iii.view_body) + '<a target="_blank" class="blog_detail" href="/blog/'+ str(iii._id) +'">...</a>'}) tmp_con.reverse() return (tmp_con, sum_count)
def get(self): handler_page = AtomRecentBlogPage(self) blogs = sorted(Blog.by_ids(recent_blogs.get_slice(-10))) atominfos = [each.atom_info for each in blogs] handler_page.add_entries(*atominfos) handler_page.init_page() handler_page.render() return #0
def fun_get_feed_by_id(user=None, obj_id=0, page_cap=20): # find by the post id #feed_id_list = [] #BlogPool().load_all() AFW_Group = BasicGroup(_id=AFWConfig.afewords_group_id) feed_id_list = AFW_Group.recommended_list.load_all() feed_id_list.reverse() #feed_id_list = [str(kk) for kk in feed_id_list] #print feed_id_list if obj_id == '0' or obj_id == 0: index = -1 else: index = index_at_list(feed_id_list, convert_id(obj_id)) if index is None: return [1, '操作出错!'] load_list_id = feed_id_list[index + 1:index + page_cap + 1] if len(load_list_id) < page_cap: is_all = 'yes' else: is_all = 'no' #print 'index', index #print 'load list', load_list_id if load_list_id == []: last_id = 0 else: last_id = load_list_id[len(load_list_id) - 1] tmp_blog_con = Blog.get_instances('_id', load_list_id) tmp_blog_list = [] for one_blog in tmp_blog_con: try: tmp_user = User(_id=one_blog.author_id) except Exception, e: log_error('User is not exist, User ID: ' + one_blog.author_id) continue else: tmp_avatar = tmp_user.avatar tmp_blog_list.append({ 'blog_id': str(one_blog._id), 'title': one_blog.name, 'author_id': str(tmp_user._id), 'view_body': strip_tags(one_blog.view_body) + '<a target="_blank" class="blog_detail" href="/blog/' + str(one_blog._id) + '">...</a>', 'summary': one_blog.abstract, 'author_avatar': tmp_avatar.thumb_name, 'author_name': tmp_user.name })
def own_data(self): ans = list() ans += [ each for each in list_generator([(ek, ev[0]) for ek, ev in self.drafts_lib.items()]) if each is not None ] ans += Blog.by_ids(self.blog_list.load_all()) return ans
def get(self): handler_page = AFFeedPage(self) usr = self.current_user try: blogs = sorted(Blog.by_ids(recent_blogs.get_slice(-10))) except: blogs = [] handler_page["id_list"] = [str(each) + "##blog" for each in (recent_blogs.load_all()[100:-10][::-1]) if each] handler_page["feed_list"] = [each.obj_info_view_by("basic_info", usr=usr, env=None) for each in blogs] handler_page.render() return # 0
def fun_do_like(user, kind="blog", obj_id='0', want="like"): #print user.favorite_lib.load_all() try: #print want if kind == "blog": obj = Blog(_id=obj_id) else: return [1, '暂不支持此类型!'] except Exception, e: logging.error('Blog not exist, id %s ' % str(obj_id)) return [1, '参数错误,对象不存在!']
def fun_update_article(user, group=None, group_id='-1', article_id = 0, article_type='blog', title='', summary='', body='', permission='public', keys=[], classes=[], father_id='-1', father_type='blog', do="post", ref_comments='', isedit='false'): if keys != []: keys = keys_to_list(keys) if article_type == "blog": # write blog start try: AF_Object = Blog(_id=article_id) if str(AF_Object.author_id) != str(user._id): return [1, '无权限操作他人的文章!'] except Exception, e: AF_Object = Blog() AF_Object.author = user AF_Object.env = user article_id = AF_Object._id (user.drafts_lib).add_obj(AF_Object) return fun_update_blog(user=user, blog=AF_Object, title=title, summary=summary, body=body, keys=keys, permission=permission, classes=classes, do=do)
def get(self): handler_page = AFFeedPage(self) usr = self.current_user try: blogs = sorted(Blog.by_ids(recent_blogs.get_slice(-10))) except: blogs = [] handler_page['id_list'] = [ str(each) + '##blog' for each in (recent_blogs.load_all()[100:-10][::-1]) if each ] handler_page['feed_list'] = [ each.obj_info_view_by('basic_info', usr=usr, env=None) for each in blogs ] handler_page.render() return #0
def fun_get_blog_list_by_tag(user, tag='default', page=1, page_cap=10): page = int(page) if tag == "default": tmp_list = user.blog_list.load_all() else: tmp_list = user.tag_lib[tag] if tmp_list is None: return ([], 0) sum_count = len(tmp_list) #print 'total num', sum_count sum_page = (sum_count % page_cap and sum_count / page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0) tmp_list.reverse() min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) #print tmp_list #print tmp_list[min_index:max_index] blog_con = Blog.get_instances('_id', tmp_list[min_index:max_index]) tmp_con = [] for iii in blog_con: tmp_con.append({ 'id': iii._id, 'title': iii.name, 'time': iii.release_time, 'summary': iii.abstract, 'view_body': strip_tags(iii.view_body) + '<a target="_blank" class="blog_detail" href="/blog/' + str(iii._id) + '">...</a>' }) tmp_con.reverse() return (tmp_con, sum_count)
def fun_get_feed_by_id(user=None, obj_id=0, page_cap=20): # find by the post id #feed_id_list = [] #BlogPool().load_all() AFW_Group = BasicGroup(_id=AFWConfig.afewords_group_id) feed_id_list = AFW_Group.recommended_list.load_all() feed_id_list.reverse() #feed_id_list = [str(kk) for kk in feed_id_list] #print feed_id_list if obj_id == '0' or obj_id == 0: index = -1 else: index = index_at_list(feed_id_list, convert_id(obj_id)) if index is None: return [1, '操作出错!'] load_list_id = feed_id_list[index+1: index+page_cap+1] if len(load_list_id) < page_cap: is_all = 'yes' else: is_all = 'no' #print 'index', index #print 'load list', load_list_id if load_list_id == []: last_id = 0 else: last_id = load_list_id[len(load_list_id)-1] tmp_blog_con = Blog.get_instances('_id', load_list_id) tmp_blog_list = [] for one_blog in tmp_blog_con: try: tmp_user = User(_id=one_blog.author_id) except Exception, e: log_error('User is not exist, User ID: ' + one_blog.author_id) continue else: tmp_avatar = tmp_user.avatar tmp_blog_list.append({'blog_id':str(one_blog._id), 'title':one_blog.name, 'author_id':str(tmp_user._id), 'view_body':strip_tags(one_blog.view_body) + '<a target="_blank" class="blog_detail" href="/blog/'+ str(one_blog._id) +'">...</a>', 'summary':one_blog.abstract, 'author_avatar': tmp_avatar.thumb_name, 'author_name':tmp_user.name})
def get(self, bid): from article.blog import Blog usr = self.current_user userstat = self.userstat blog_to = Blog.by_id(bid) if(blog_to is None): return self.send_error(404); preview = self.get_esc_arg('preview', 'no') page = BlogPage(self) page['ispreview'] = True if preview == 'yes' else False page['article'] = blog_to.obj_info_view_by('view_info', usr=usr, env=usr) if preview != 'yes' and userstat.view_article(blog_to): blog_to.statistics.view_count += 1 page['islike'] = False if usr is None else usr.is_like(blog_to) page['title'] = page['article']['title'] + u' - 子曰博文' page['meta_keywords'] = page['article']['keywords'] page['description'] = page['article']['title'] page.render() return
def get(self, bid): from article.blog import Blog usr = self.current_user userstat = self.userstat blog_to = Blog.by_id(bid) if blog_to is None: return self.send_error(404) preview = self.get_esc_arg("preview", "no") page = BlogPage(self) page["ispreview"] = True if preview == "yes" else False page["article"] = blog_to.obj_info_view_by("view_info", usr=usr, env=usr) if preview != "yes" and userstat.view_article(blog_to): blog_to.statistics.view_count += 1 page["islike"] = False if usr is None else usr.is_like(blog_to) page["title"] = page["article"]["title"] + u" - 子曰博文" page["meta_keywords"] = page["article"]["keywords"] page["description"] = page["article"]["title"] page.render() return
def blogs_info_view_by(self, usr=None, tagname=None, vfrom=0, vlim=20): from article.blog import Blog if tagname and tagname != 'default': toview, len_toview = self.blogs_from_tag(tagname, vfrom, vlim) else: toview = self.blogids len_toview = len(toview) if vfrom > len_toview: return [], len_toview if vfrom + vlim > len_toview: vlim = len_toview - vfrom toview = toview[vfrom:(vfrom+vlim)] tmp = set(toview) toview = Blog.by_ids(toview) toview.sort() tmp2 = set(each._id for each in toview) self.lib.blog_list.pull(*tuple(tmp - tmp2)) #try remove item not exist return [each.obj_info_view_by('basic_info', usr, env=usr) for each in toview], len_toview
def fun_update_comment(user, group_id='-1', article_id=0, article_type='blog', title='', summary='', body='', permission='public', keys=[], classes=[], father_id='-1', father_type='blog', do="post", ref_comments=''): # update comment, all comments group = None if father_type == 'blog': try: AF_Object = Blog(_id=father_id) except Exception, e: return [1, '文章不存在!']
def fun_get_like_list(user, page=1, page_cap=10): tmp_dict = user.favorite_lib.load_all() tmp_list = tmp_dict.keys() tmp_list.sort() tmp_list.reverse() sum_count = len(tmp_list) sum_page = (sum_count % page_cap and sum_count /page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0) min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) tmp_con_blog = Blog.get_instances('_id', tmp_list[min_index:max_index]) like_list = [] for kkk in tmp_con_blog: like_list.append({'id':kkk._id, 'title':kkk.name}) like_list.reverse() return (like_list, sum_count)
def blogs_info_view_by(self, usr=None, tagname=None, vfrom=0, vlim=20): from article.blog import Blog if tagname and tagname != 'default': toview, len_toview = self.blogs_from_tag(tagname, vfrom, vlim) else: toview = self.blogids len_toview = len(toview) if vfrom > len_toview: return [], len_toview if vfrom + vlim > len_toview: vlim = len_toview - vfrom toview = toview[vfrom:(vfrom + vlim)] tmp = set(toview) toview = Blog.by_ids(toview) toview.sort() tmp2 = set(each._id for each in toview) self.lib.blog_list.pull(*tuple(tmp - tmp2)) #try remove item not exist return [ each.obj_info_view_by('basic_info', usr, env=usr) for each in toview ], len_toview
def fun_get_recommender_list(blog_id): try: hour = time.localtime().tm_hour if hour > 12: SD_Object = SimilarityDB_A(objectid=blog_id) else: SD_Object = SimilarityDB_B(objectid=blog_id) #print SD_Object if SD_Object is None: return [] relate_list = SD_Object['sim_list'] relate_list_instance = Blog.get_instances('_id', relate_list[0:10]) res_list = [] for item in relate_list_instance: res_list.append({'blog_id': item._id, 'blog_title': item.name }) return res_list except TypeError: return [] except Exception: logging.error(traceback.format_exc()) return []
def fun_get_recommender_list(blog_id): try: hour = time.localtime().tm_hour if hour > 12: SD_Object = SimilarityDB_A(objectid=blog_id) else: SD_Object = SimilarityDB_B(objectid=blog_id) #print SD_Object if SD_Object is None: return [] relate_list = SD_Object['sim_list'] relate_list_instance = Blog.get_instances('_id', relate_list[0:10]) res_list = [] for item in relate_list_instance: res_list.append({'blog_id': item._id, 'blog_title': item.name}) return res_list except TypeError: return [] except Exception: logging.error(traceback.format_exc()) return []
def fun_get_like_list(user, page=1, page_cap=10): tmp_dict = user.favorite_lib.load_all() tmp_list = tmp_dict.keys() tmp_list.sort() tmp_list.reverse() sum_count = len(tmp_list) sum_page = (sum_count % page_cap and sum_count / page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0) min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) tmp_con_blog = Blog.get_instances('_id', tmp_list[min_index:max_index]) like_list = [] for kkk in tmp_con_blog: like_list.append({'id': kkk._id, 'title': kkk.name}) like_list.reverse() return (like_list, sum_count)
def fun_update_article(user, group=None, group_id='-1', article_id=0, article_type='blog', title='', summary='', body='', permission='public', keys=[], classes=[], father_id='-1', father_type='blog', do="post", ref_comments='', isedit='false'): if keys != []: keys = keys_to_list(keys) if article_type == "blog": # write blog start try: AF_Object = Blog(_id=article_id) if str(AF_Object.author_id) != str(user._id): return [1, '无权限操作他人的文章!'] except Exception, e: AF_Object = Blog() AF_Object.author = user AF_Object.env = user article_id = AF_Object._id (user.drafts_lib).add_obj(AF_Object) return fun_update_blog(user=user, blog=AF_Object, title=title, summary=summary, body=body, keys=keys, permission=permission, classes=classes, do=do)
#!/usr/bin/env python from article.blog import Blog from user import User bids = ['5020dbdb37251703ac000005', '502266ba3725170e83000002', '502267a83725170e82000002', '502267a83725170e85000002', '50226ddd3725170ef3000003', '50227fef3725170f90000008'] blogobjs = Blog.by_ids(bids) user_af = User.find_one({'email':'*****@*****.**'}) for each in blogobjs: each.set_propertys(env=user_af, author=user_af) user_af.add_tags(each.tag) user_af.post_blog(each)
AF_User = User(_id=rrauthor) rrname = AF_User.name rravatar = (AF_User.avatar).thumb_name return [0, rrtitle, rrabstract, rrview, rrname, rravatar, rrtime] def fun_get_feed_home(page): try: page = int(page) except Exception, e: return [1, False, '页数出错!'] tmp_con = BlogPool().load_all() tmp_con_test = BlogPool().get_slice(-20,10) tmp_blog = Blog.get_instances('_id', tmp_con) tmp_blog_list = [] for kkk in tmp_blog: tmp_user = User(_id=kkk.author_id) tmp_avatar = tmp_user.avatar tmp_blog_list.append({'blog_id':str(kkk._id), 'title':kkk.name, 'author_id':str(tmp_user._id), 'blog_body':kkk.view_body, 'summary':kkk.abstract, 'author_avatar': tmp_avatar.thumb_name, 'author_name':tmp_user.name}) #print tmp_blog_list # 20 page one time return [0, False, tmp_blog_list] def fun_get_recommender_list(blog_id): try: hour = time.localtime().tm_hour if hour > 12:
feed_id_list = [] #BlogPool().load_all() feed_id_list.reverse() try: page = int(page) except Exception, e: return [1, '页数错误!'] page_start = page * count page_end = (page + 1) * count load_list_id = feed_id_list[page_start:page_end] if len(load_list_id) < count: isall = True else: isall = False tmp_blog_con = Blog.get_instances('_id', load_list_id) tmp_blog_list = [] for one_blog in tmp_blog_con: if one_blog.author_id is not None and one_blog.author_id != '': tmp_blog_list.append({ 'blog_id': str(one_blog._id), 'title': one_blog.name, 'author_id': str(one_blog.author_id), 'author_name': one_blog.author_name }) tmp_blog_list.reverse() return [0, tmp_blog_list, isall]
#!/usr/bin/env python from user import User from article.blog import Blog user_all = [User(each) for each in User.datatype.find()] for each in user_all: tag_lib = each.lib.tag_lib tmp = tag_lib.load_all() for tag in tmp.keys(): if tag == u'alltags': continue if not tmp[tag]: continue tmp[tag] = [[e._id, e.release_time] for e in Blog.by_ids( [each[0] if isinstance(each, list) else each for each in tmp[tag]])] tag_lib.set_all(tmp)
url_tmp = urlparse.urlparse(section_url) url_tmp_info = url_tmp.path.split('/') if len(url_tmp_info ) < 5 or url_tmp_info[1] != 'book' or url_tmp_info[3] != 'catalog': return [1, '链接错误!'] book_id = url_tmp_info[2] node_id = url_tmp_info[4] try: book = Catalog(_id=book_id) except Exception, err: logging.error(traceback.format_exc()) logging.error('Catalog not exist, id %s' % book_id) return [1, '未找到该知识谱!'] if article_type == "blog": try: AF_Object = Blog(_id=article_id) if not AF_Object.is_posted: return [1, '该文章未发布!'] except Exception, err: logging.error(traceback.format_exc()) logging.error('%s not exist, id %s' % (article_type, article_id)) return [1, '未找到该文章!'] else: return [1, '暂时不支持!'] AF_Object.add_to_catalog(book, node_id) #book.recommend_blog_to(url_query["id"], AF_Object) restr = '成功添加到知识谱<a href="/book/%s" target="_blank">《%s》</a>的章节中!' % ( book_id, book.name) return [0, restr]
# find post by page pass def fun_get_feed_by_page_simple(page): count = 10 feed_id_list = [] #BlogPool().load_all() feed_id_list.reverse() try: page = int(page) except Exception, e: return [1, '页数错误!'] page_start = page * count page_end = (page+1) * count load_list_id = feed_id_list[page_start:page_end] if len(load_list_id) < count: isall = True else: isall = False tmp_blog_con = Blog.get_instances('_id', load_list_id) tmp_blog_list = [] for one_blog in tmp_blog_con: if one_blog.author_id is not None and one_blog.author_id !='': tmp_blog_list.append({'blog_id':str(one_blog._id), 'title':one_blog.name, 'author_id':str(one_blog.author_id), 'author_name':one_blog.author_name}) tmp_blog_list.reverse() return [0, tmp_blog_list, isall]
isnew = False elif article_type == "book-about": isnew = False try: book = Catalog(_id=group_id) AF_Object = book.about limit = book.authority_verify(user) if test_auth(limit, A_WRITE) is False: return [1, '您无权修改摘要!'] except Exception, err: logging.error(traceback.format_exc()) logging.error('Catalog not exist, id %s' % group_id) return [1, '未找到知识谱!'] elif article_type == "blog": try: AF_Object = Blog(_id=article_id) isnew = False except Exception, e: logging.error(traceback.format_exc()) logging.error('%s not exist, id %s' % (article_type, article_id)) AF_Object = Blog() AF_Object.author = user AF_Object.env = user article_id = str(AF_Object._id) isnew = True (user.drafts_lib).add_obj(AF_Object) elif article_type == "comment": return [1, '完善中'] elif article_type in Group_Article: # for group isnew = False
#!/usr/bin/env python from user import User from article.blog import Blog usrs = [User(data=each) for each in User.datatype.find()] for each in usrs: blogs = Blog.by_ids(each.lib.blog_list.load_all()) for eb in blogs: eb.env = each eb.author = eb.author each.reset_blog_tags(eb)
#!/usr/bin/env python from global_info import * from article.blog import Blog from article.catalog import Catalog from user import User recent_blogs.set_all([None for i in range(500)]) blog_all = sorted([Blog(each) for each in Blog.datatype.find()], reverse=True) for each in blog_all: if each.is_posted is False: continue recent_blogs.push(each._id) recent_blogs.pop_head() recent_books.set_all([None for i in range(500)]) book_all = sorted([Catalog(each) for each in Catalog.datatype.find()], reverse=True) for each in book_all: recent_books.push(each._id) recent_books.pop_head() recent_users.set_all([None for i in range(500)]) user_all = sorted([User(each) for each in User.datatype.find()], reverse=True) for each in user_all: recent_users.push(each._id) recent_users.pop_head() unreg_users.set_all([None for i in range(5000)]) recent_feedbacks.set_all([None for i in range(500)])
return [1, '文章不存在!'] else: # for group if father_type == 'group-notice': try: AF_Object = Bulletin(_id=father_id) group = BasicGroup(_id=AF_Object.group_id) if group.get_member_type(user) is None: return [1, '您不是该小组成员'] except Exception, e: logging.error(traceback.format_exc()) logging.error('%s not exist, id %s' % (father_type, father_id)) return [1, '该公告不存在!'] elif father_type == "group-doc": try: AF_Object = Blog(_id=father_id) group = BasicGroup(_id=AF_Object.group_id) if group.get_member_type(user) is None: return [1, '您不是该小组成员'] except Exception, e: logging.error(traceback.format_exc()) logging.error('%s not exist, id %s' % (father_type, father_id)) return [1, '该文档不存在!'] elif father_type == "group-topic": try: AF_Object = Topic(_id=father_id) group = BasicGroup(_id=AF_Object.group_id) if group.get_member_type(user) is None: return [1, '您不是该小组成员'] AF_Object.update_time = datetime.datetime.now() group.topic_list.pull(AF_Object._id)
#!/usr/bin/env python from article.blog import Blog from article.about import About from article.comment import Comment from article.reference import Reference from article.tableform import Tableform from article.langcode import Langcode from tornado.escape import xhtml_unescape blogs_all = [Blog(each) for each in Blog.datatype.find()] comments_all = [Comment(each) for each in Comment.datatype.find()] about_all = About.find() ref_all = Reference.find() table_all = Tableform.find() code_all = Langcode.find() for each in blogs_all + comments_all + about_all: each.abstract = xhtml_unescape(each.abstract) each.body = xhtml_unescape(each.body) for each in ref_all: each.body = xhtml_unescape(each.body) for each in code_all: each.code = xhtml_unescape(each.code) for each in table_all: each.tableform = xhtml_unescape(each.tableform)
def fun_view_blog(bid): try: AF_Article = Blog(_id=bid) except Exception, e: return [1, '无此文章!', '', '', '', '']
return [1, "参数错误!"] url_tmp = urlparse.urlparse(section_url) url_tmp_info = url_tmp.path.split("/") if len(url_tmp_info) < 5 or url_tmp_info[1] != "book" or url_tmp_info[3] != "catalog": return [1, "链接错误!"] book_id = url_tmp_info[2] node_id = url_tmp_info[4] try: book = Catalog(_id=book_id) except Exception, err: logging.error(traceback.format_exc()) logging.error("Catalog not exist, id %s" % book_id) return [1, "未找到该知识谱!"] if article_type == "blog": try: AF_Object = Blog(_id=article_id) if not AF_Object.is_posted: return [1, "该文章未发布!"] except Exception, err: logging.error(traceback.format_exc()) logging.error("%s not exist, id %s" % (article_type, article_id)) return [1, "未找到该文章!"] else: return [1, "暂时不支持!"] AF_Object.add_to_catalog(book, node_id) # book.recommend_blog_to(url_query["id"], AF_Object) restr = '成功添加到知识谱<a href="/book/%s" target="_blank">《%s》</a>的章节中!' % (book_id, book.name) return [0, restr] def fun_recommended_to_book(user=None, book_id="", node_id="", article_url=""):