def fun_get_blog_list_by_tag(user, tag='default', page=1, page_cap=10): page = int(page) if tag == "default": tmp_list = user.blog_list.load_all() else: tmp_list = user.tag_lib[tag] if tmp_list is None: return ([], 0) sum_count = len(tmp_list) #print 'total num', sum_count sum_page = (sum_count % page_cap and sum_count /page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0) tmp_list.reverse() min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) #print tmp_list #print tmp_list[min_index:max_index] blog_con = Blog.get_instances('_id', tmp_list[min_index:max_index]) tmp_con = [] for iii in blog_con: tmp_con.append({'id':iii._id, 'title':iii.name, 'time':iii.release_time, 'summary':iii.abstract, 'view_body':strip_tags(iii.view_body) + '<a target="_blank" class="blog_detail" href="/blog/'+ str(iii._id) +'">...</a>'}) tmp_con.reverse() return (tmp_con, sum_count)
def fun_get_doc_list(group=None, page=1, page_cap=10, tag='default'): if tag == "default": tmp_list = group.blog_list.load_all() else: tmp_list = group.tag_lib[tag] if tmp_list is None: return ([], 0, page) sum_count = len(tmp_list) #print 'total num', sum_count sum_page = (sum_count % page_cap and sum_count /page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0, page) tmp_list.reverse() min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) #print tmp_list #print tmp_list[min_index:max_index] doc_list_instance = Blog.get_instances('_id', tmp_list[min_index:max_index]) res_list = [] for item in doc_list_instance: res_list.append({'doc_id':item._id, 'doc_title':item.name, 'doc_time':item.release_time}) res_list.reverse() return (res_list, sum_count, page)
def fun_get_doc_list(group=None, page=1, page_cap=10, tag='default'): if tag == "default": tmp_list = group.blog_list.load_all() else: tmp_list = group.tag_lib[tag] if tmp_list is None: return ([], 0, page) sum_count = len(tmp_list) #print 'total num', sum_count sum_page = (sum_count % page_cap and sum_count / page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0, page) tmp_list.reverse() min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) #print tmp_list #print tmp_list[min_index:max_index] doc_list_instance = Blog.get_instances('_id', tmp_list[min_index:max_index]) res_list = [] for item in doc_list_instance: res_list.append({ 'doc_id': item._id, 'doc_title': item.name, 'doc_time': item.release_time }) res_list.reverse() return (res_list, sum_count, page)
def fun_get_feed_by_id(user=None, obj_id=0, page_cap=20): # find by the post id #feed_id_list = [] #BlogPool().load_all() AFW_Group = BasicGroup(_id=AFWConfig.afewords_group_id) feed_id_list = AFW_Group.recommended_list.load_all() feed_id_list.reverse() #feed_id_list = [str(kk) for kk in feed_id_list] #print feed_id_list if obj_id == '0' or obj_id == 0: index = -1 else: index = index_at_list(feed_id_list, convert_id(obj_id)) if index is None: return [1, '操作出错!'] load_list_id = feed_id_list[index + 1:index + page_cap + 1] if len(load_list_id) < page_cap: is_all = 'yes' else: is_all = 'no' #print 'index', index #print 'load list', load_list_id if load_list_id == []: last_id = 0 else: last_id = load_list_id[len(load_list_id) - 1] tmp_blog_con = Blog.get_instances('_id', load_list_id) tmp_blog_list = [] for one_blog in tmp_blog_con: try: tmp_user = User(_id=one_blog.author_id) except Exception, e: log_error('User is not exist, User ID: ' + one_blog.author_id) continue else: tmp_avatar = tmp_user.avatar tmp_blog_list.append({ 'blog_id': str(one_blog._id), 'title': one_blog.name, 'author_id': str(tmp_user._id), 'view_body': strip_tags(one_blog.view_body) + '<a target="_blank" class="blog_detail" href="/blog/' + str(one_blog._id) + '">...</a>', 'summary': one_blog.abstract, 'author_avatar': tmp_avatar.thumb_name, 'author_name': tmp_user.name })
def fun_get_feed_by_id(user=None, obj_id=0, page_cap=20): # find by the post id #feed_id_list = [] #BlogPool().load_all() AFW_Group = BasicGroup(_id=AFWConfig.afewords_group_id) feed_id_list = AFW_Group.recommended_list.load_all() feed_id_list.reverse() #feed_id_list = [str(kk) for kk in feed_id_list] #print feed_id_list if obj_id == '0' or obj_id == 0: index = -1 else: index = index_at_list(feed_id_list, convert_id(obj_id)) if index is None: return [1, '操作出错!'] load_list_id = feed_id_list[index+1: index+page_cap+1] if len(load_list_id) < page_cap: is_all = 'yes' else: is_all = 'no' #print 'index', index #print 'load list', load_list_id if load_list_id == []: last_id = 0 else: last_id = load_list_id[len(load_list_id)-1] tmp_blog_con = Blog.get_instances('_id', load_list_id) tmp_blog_list = [] for one_blog in tmp_blog_con: try: tmp_user = User(_id=one_blog.author_id) except Exception, e: log_error('User is not exist, User ID: ' + one_blog.author_id) continue else: tmp_avatar = tmp_user.avatar tmp_blog_list.append({'blog_id':str(one_blog._id), 'title':one_blog.name, 'author_id':str(tmp_user._id), 'view_body':strip_tags(one_blog.view_body) + '<a target="_blank" class="blog_detail" href="/blog/'+ str(one_blog._id) +'">...</a>', 'summary':one_blog.abstract, 'author_avatar': tmp_avatar.thumb_name, 'author_name':tmp_user.name})
def fun_get_blog_list_by_tag(user, tag='default', page=1, page_cap=10): page = int(page) if tag == "default": tmp_list = user.blog_list.load_all() else: tmp_list = user.tag_lib[tag] if tmp_list is None: return ([], 0) sum_count = len(tmp_list) #print 'total num', sum_count sum_page = (sum_count % page_cap and sum_count / page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0) tmp_list.reverse() min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) #print tmp_list #print tmp_list[min_index:max_index] blog_con = Blog.get_instances('_id', tmp_list[min_index:max_index]) tmp_con = [] for iii in blog_con: tmp_con.append({ 'id': iii._id, 'title': iii.name, 'time': iii.release_time, 'summary': iii.abstract, 'view_body': strip_tags(iii.view_body) + '<a target="_blank" class="blog_detail" href="/blog/' + str(iii._id) + '">...</a>' }) tmp_con.reverse() return (tmp_con, sum_count)
def fun_get_like_list(user, page=1, page_cap=10): tmp_dict = user.favorite_lib.load_all() tmp_list = tmp_dict.keys() tmp_list.sort() tmp_list.reverse() sum_count = len(tmp_list) sum_page = (sum_count % page_cap and sum_count /page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0) min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) tmp_con_blog = Blog.get_instances('_id', tmp_list[min_index:max_index]) like_list = [] for kkk in tmp_con_blog: like_list.append({'id':kkk._id, 'title':kkk.name}) like_list.reverse() return (like_list, sum_count)
def fun_get_recommender_list(blog_id): try: hour = time.localtime().tm_hour if hour > 12: SD_Object = SimilarityDB_A(objectid=blog_id) else: SD_Object = SimilarityDB_B(objectid=blog_id) #print SD_Object if SD_Object is None: return [] relate_list = SD_Object['sim_list'] relate_list_instance = Blog.get_instances('_id', relate_list[0:10]) res_list = [] for item in relate_list_instance: res_list.append({'blog_id': item._id, 'blog_title': item.name}) return res_list except TypeError: return [] except Exception: logging.error(traceback.format_exc()) return []
def fun_get_recommender_list(blog_id): try: hour = time.localtime().tm_hour if hour > 12: SD_Object = SimilarityDB_A(objectid=blog_id) else: SD_Object = SimilarityDB_B(objectid=blog_id) #print SD_Object if SD_Object is None: return [] relate_list = SD_Object['sim_list'] relate_list_instance = Blog.get_instances('_id', relate_list[0:10]) res_list = [] for item in relate_list_instance: res_list.append({'blog_id': item._id, 'blog_title': item.name }) return res_list except TypeError: return [] except Exception: logging.error(traceback.format_exc()) return []
def fun_get_like_list(user, page=1, page_cap=10): tmp_dict = user.favorite_lib.load_all() tmp_list = tmp_dict.keys() tmp_list.sort() tmp_list.reverse() sum_count = len(tmp_list) sum_page = (sum_count % page_cap and sum_count / page_cap + 1) or sum_count / page_cap if page > sum_page: return ([], 0) min_index, max_index = get_index_list_by_page(tmp_list, page=page, page_cap=page_cap) tmp_con_blog = Blog.get_instances('_id', tmp_list[min_index:max_index]) like_list = [] for kkk in tmp_con_blog: like_list.append({'id': kkk._id, 'title': kkk.name}) like_list.reverse() return (like_list, sum_count)
# find post by page pass def fun_get_feed_by_page_simple(page): count = 10 feed_id_list = [] #BlogPool().load_all() feed_id_list.reverse() try: page = int(page) except Exception, e: return [1, '页数错误!'] page_start = page * count page_end = (page+1) * count load_list_id = feed_id_list[page_start:page_end] if len(load_list_id) < count: isall = True else: isall = False tmp_blog_con = Blog.get_instances('_id', load_list_id) tmp_blog_list = [] for one_blog in tmp_blog_con: if one_blog.author_id is not None and one_blog.author_id !='': tmp_blog_list.append({'blog_id':str(one_blog._id), 'title':one_blog.name, 'author_id':str(one_blog.author_id), 'author_name':one_blog.author_name}) tmp_blog_list.reverse() return [0, tmp_blog_list, isall]
*('name', 'abstract', 'view_body', 'author_id', 'release_time')) AF_User = User(_id=rrauthor) rrname = AF_User.name rravatar = (AF_User.avatar).thumb_name return [0, rrtitle, rrabstract, rrview, rrname, rravatar, rrtime] def fun_get_feed_home(page): try: page = int(page) except Exception, e: return [1, False, '页数出错!'] tmp_con = BlogPool().load_all() tmp_con_test = BlogPool().get_slice(-20, 10) tmp_blog = Blog.get_instances('_id', tmp_con) tmp_blog_list = [] for kkk in tmp_blog: tmp_user = User(_id=kkk.author_id) tmp_avatar = tmp_user.avatar tmp_blog_list.append({ 'blog_id': str(kkk._id), 'title': kkk.name, 'author_id': str(tmp_user._id), 'blog_body': kkk.view_body, 'summary': kkk.abstract, 'author_avatar': tmp_avatar.thumb_name, 'author_name': tmp_user.name }) #print tmp_blog_list # 20 page one time
AF_User = User(_id=rrauthor) rrname = AF_User.name rravatar = (AF_User.avatar).thumb_name return [0, rrtitle, rrabstract, rrview, rrname, rravatar, rrtime] def fun_get_feed_home(page): try: page = int(page) except Exception, e: return [1, False, '页数出错!'] tmp_con = BlogPool().load_all() tmp_con_test = BlogPool().get_slice(-20,10) tmp_blog = Blog.get_instances('_id', tmp_con) tmp_blog_list = [] for kkk in tmp_blog: tmp_user = User(_id=kkk.author_id) tmp_avatar = tmp_user.avatar tmp_blog_list.append({'blog_id':str(kkk._id), 'title':kkk.name, 'author_id':str(tmp_user._id), 'blog_body':kkk.view_body, 'summary':kkk.abstract, 'author_avatar': tmp_avatar.thumb_name, 'author_name':tmp_user.name}) #print tmp_blog_list # 20 page one time return [0, False, tmp_blog_list] def fun_get_recommender_list(blog_id): try: hour = time.localtime().tm_hour if hour > 12:
feed_id_list = [] #BlogPool().load_all() feed_id_list.reverse() try: page = int(page) except Exception, e: return [1, '页数错误!'] page_start = page * count page_end = (page + 1) * count load_list_id = feed_id_list[page_start:page_end] if len(load_list_id) < count: isall = True else: isall = False tmp_blog_con = Blog.get_instances('_id', load_list_id) tmp_blog_list = [] for one_blog in tmp_blog_con: if one_blog.author_id is not None and one_blog.author_id != '': tmp_blog_list.append({ 'blog_id': str(one_blog._id), 'title': one_blog.name, 'author_id': str(one_blog.author_id), 'author_name': one_blog.author_name }) tmp_blog_list.reverse() return [0, tmp_blog_list, isall]