def maybe_translate(self, pref_collection): from assembl.tasks.translate import ( translate_content, PrefCollectionTranslationTable) service = self.discussion.translation_service() if service.canTranslate is not None: translations = PrefCollectionTranslationTable( service, pref_collection) translate_content( self, translation_table=translations, service=service)
def maybe_translate(self, pref_collection=None, target_locales=None): from assembl.tasks.translate import ( translate_content, PrefCollectionTranslationTable, LanguagesTranslationTable) service = self.discussion.translation_service() if service.canTranslate is not None: translations = None if pref_collection: translations = PrefCollectionTranslationTable( service, pref_collection) elif target_locales: translations = LanguagesTranslationTable(service, target_locales) translate_content( self, translation_table=translations, service=service)
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order == None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') PostClass = SynthesisPost if only_synthesis == "true" else Post ideaContentLinkQuery = discussion.db.query( PostClass.id, PostClass.idea_content_links_above_post) if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.discussion_id == discussion_id) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get('only_orphan') if only_orphan == "true": if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = text(Idea._get_orphan_posts_statement(), bindparams=[bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('orphans') posts = posts.join(orphans, PostClass.id==orphans.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( orphans, PostClass.id==orphans.c.post_id) elif only_orphan == "false": raise HTTPBadRequest(localizer.translate( _("Getting non-orphan posts isn't supported."))) # "true" means hidden only, "false" (default) means visible only. "any" means both. hidden = request.GET.get('hidden_messages', "false") if hidden != 'any': posts = posts.filter(PostClass.hidden==asbool(hidden)) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.hidden==asbool(hidden)) if root_idea_id: related = text(Idea._get_related_posts_statement(), bindparams=[bindparam('root_idea_id', root_idea_id), bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('related') #Virtuoso bug: This should work... #posts = posts.join(related, PostClass.id==related.c.post_id) posts = posts.join(related, PostClass.id == related.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( related, PostClass.id == related.c.post_id) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) ideaContentLinkQuery = ideaContentLinkQuery.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) ideaContentLinkQuery = ideaContentLinkQuery.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) ideaContentLinkQuery = posts.filter( ideaContentLinkQuery.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creator_id == post_author_id) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) ideaContentLinkQuery = ideaContentLinkQuery.join( parent_alias, PostClass.parent) ideaContentLinkQuery = ideaContentLinkQuery.filter( parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = {v.post_id for v in discussion.db.query( ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id))} liked_posts = {l.post_id: l.id for l in discussion.db.query( LikedPost).filter( LikedPost.tombstone_condition(), LikedPost.actor_id == user_id, *LikedPost.get_discussion_conditions(discussion_id))} if is_unread != None: posts = posts.outerjoin( ViewPost, and_( ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service: translations = user_pref_as_translation_table(user, service) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter(Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) ideaContentLinkQuery = ideaContentLinkQuery.filter( Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) else: posts = posts.order_by(Content.id) print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: score, viewpost, likedpost = None, None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if user_id != Everyone: viewpost = post.id in read_posts likedpost = liked_posts.get(post.id, None) if view_def != "id_only": translate_content( post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False # serializable_post['liked'] = likedpost.uri() if likedpost else False serializable_post['liked'] = ( LikedPost.uri_generic(likedpost) if likedpost else False) if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!" """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get( post_author_id ), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get( post_replies_to ), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') message_classifiers = request.GET.getall('classifier') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter(PostClass.discussion_id == discussion_id, ) ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. # v0 # deleted = request.GET.get('deleted', None) # end v0 # v1: we would like something like that # deleted = request.GET.get('deleted', None) # if deleted is None: # if view_def == 'id_only': # deleted = None # else: # deleted = False # end v1 # v2 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # # if deleted == 'false': # deleted = False # posts = posts.filter(PostClass.tombstone_condition()) # elif deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # elif deleted == 'any': # deleted = None # # result will contain deleted and non-deleted posts # pass # end v2 # v3 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # if deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # end v3 # v4 deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest( localizer.translate( _("Getting orphan posts of a specific idea isn't supported." ))) orphans = Idea._get_orphan_posts_statement( discussion_id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c(discussion_id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter(PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id)) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if message_classifiers: if any([len(classifier) == 0 for classifier in message_classifiers]): return {'total': 0, 'posts': []} polarities = [ classifier[0] != "!" for classifier in message_classifiers ] polarity = all(polarities) if not polarity: message_classifiers = [c.strip("!") for c in message_classifiers] if polarity != any(polarities): raise HTTPBadRequest( _("Do not combine negative and positive classifiers")) # Treat null as no classifier includes_null = 'null' in message_classifiers if includes_null: message_classifiers_nonull = filter(lambda c: c != "null", message_classifiers) if polarity: if len(message_classifiers) == 1: term = PostClass.message_classifier == ( None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.in_( message_classifiers_nonull) if includes_null: term = term | (PostClass.message_classifier == None) else: if len(message_classifiers) == 1: term = PostClass.message_classifier != ( None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.notin_( message_classifiers_nonull) if not includes_null: term = term | (PostClass.message_classifier == None) posts = posts.filter(term) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = { v.post_id for v in discussion.db.query(ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id)) } my_sentiments = { l.post_id: l for l in discussion.db.query(SentimentOfPost).filter( SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id == user_id, *SentimentOfPost.get_discussion_conditions(discussion_id)) } if is_unread != None: posts = posts.outerjoin( ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread my_sentiments = {} if is_unread == "false": raise HTTPBadRequest( localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter( Post.body_text_index.contains(text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) # Note: we could count the like the same way and kill the subquery. # But it interferes with the popularity order, # and the benefit is not that high. sentiment_counts = discussion.db.query( PostClass.id, SentimentOfPost.type, count(SentimentOfPost.id)).join(SentimentOfPost).filter( PostClass.id.in_(posts.with_entities(PostClass.id).subquery()), SentimentOfPost.tombstone_condition()).group_by( PostClass.id, SentimentOfPost.type) sentiment_counts_by_post_id = defaultdict(dict) for (post_id, sentiment_type, sentiment_count) in sentiment_counts: sentiment_counts_by_post_id[post_id][sentiment_type[ SentimentOfPost.TYPE_PREFIX_LEN:]] = sentiment_count posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.disagree_count - Content.like_count, Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query(PostClass).filter( PostClass.id.in_(ancestor_ids)) if view_def == 'id_only': pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) for query_result in posts: score, viewpost = None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts if view_def != "id_only": translate_content(post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json(view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False my_sentiment = my_sentiments.get(post.id, None) if my_sentiment is not None: my_sentiment = my_sentiment.generic_json('default', user_id, permissions) serializable_post['my_sentiment'] = my_sentiment if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) serializable_post[ 'sentiment_counts'] = sentiment_counts_by_post_id[post.id] post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author keyword: use full-text search locale: restrict to locale """ localizer = request.localizer discussion = request.context discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = request.permissions DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 keywords = request.GET.getall('keyword') order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score' and not keywords: raise HTTPBadRequest("Cannot ask for a score without keywords") if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = Post.get_database_id(root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = Post.get_database_id(family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = Idea.get_database_id(root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [Post.get_database_id(id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = AgentProfile.get_database_id(post_author_id) assert AgentProfile.get( post_author_id ), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = AgentProfile.get_database_id(post_replies_to) assert AgentProfile.get( post_replies_to ), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') PostClass = SynthesisPost if only_synthesis == "true" else Post posts = discussion.db.query(PostClass) posts = posts.filter(PostClass.discussion == discussion, ) ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest( localizer.translate( _("Getting orphan posts of a specific idea isn't supported." ))) orphans = Idea._get_orphan_posts_statement( discussion.id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c(discussion.id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter(PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id)) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) if keywords: locales = request.GET.getall('locale') posts, rank = add_text_search(posts, (PostClass.body_id, ), keywords, locales, order == 'score') # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = { v.post_id for v in discussion.db.query(ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion.id)) } liked_posts = { l.post_id: l.id for l in discussion.db.query(LikedPost).filter( LikedPost.tombstone_condition(), LikedPost.actor_id == user_id, *LikedPost.get_discussion_conditions(discussion.id)) } if is_unread != None: posts = posts.outerjoin( ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service.canTranslate is not None: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest( localizer.translate( _("You must be logged in to view which posts are read"))) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def in ('partial_post', 'id_only'): pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(rank.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.like_count.desc(), Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query(PostClass).filter( PostClass.id.in_(ancestor_ids)) if view_def in ('partial_post', 'id_only'): pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) if view_def == 'id_only': posts = posts.with_entities(PostClass.id) for query_result in posts: score, viewpost, likedpost = None, None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] no_of_posts += 1 if view_def == 'id_only': post_data.append(Content.uri_generic(post)) continue if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts likedpost = liked_posts.get(post.id, None) if view_def not in ("partial_post", "id_only"): translate_content(post, translation_table=translations, service=service) serializable_post = post.generic_json(view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False # serializable_post['liked'] = likedpost.uri() if likedpost else False serializable_post['liked'] = (LikedPost.uri_generic(likedpost) if likedpost else False) if view_def not in ("partial_post", "id_only"): serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion.id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(data["total"] / page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!" """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = request.authenticated_userid or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') message_classifiers = request.GET.getall('classifier') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ).filter(PostClass.type != 'proposition_post') ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. # v0 # deleted = request.GET.get('deleted', None) # end v0 # v1: we would like something like that # deleted = request.GET.get('deleted', None) # if deleted is None: # if view_def == 'id_only': # deleted = None # else: # deleted = False # end v1 # v2 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # # if deleted == 'false': # deleted = False # posts = posts.filter(PostClass.tombstone_condition()) # elif deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # elif deleted == 'any': # deleted = None # # result will contain deleted and non-deleted posts # pass # end v2 # v3 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # if deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # end v3 # v4 deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = Idea._get_orphan_posts_statement( discussion_id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c( discussion_id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter( PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if message_classifiers: if any([len(classifier) == 0 for classifier in message_classifiers]): return {'total': 0, 'posts': []} polarities = [classifier[0] != "!" for classifier in message_classifiers] polarity = all(polarities) if not polarity: message_classifiers = [c.strip("!") for c in message_classifiers] if polarity != any(polarities): raise HTTPBadRequest(_("Do not combine negative and positive classifiers")) # Treat null as no classifier includes_null = 'null' in message_classifiers if includes_null: message_classifiers_nonull = filter(lambda c: c != "null", message_classifiers) if polarity: if len(message_classifiers) == 1: term = PostClass.message_classifier == (None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.in_(message_classifiers_nonull) if includes_null: term = term | (PostClass.message_classifier == None) else: if len(message_classifiers) == 1: term = PostClass.message_classifier != (None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.notin_(message_classifiers_nonull) if not includes_null: term = term | (PostClass.message_classifier == None) posts = posts.filter(term) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = {v.post_id for v in discussion.db.query( ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id))} my_sentiments = {l.post_id: l for l in discussion.db.query( SentimentOfPost).filter( SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id == user_id, *SentimentOfPost.get_discussion_conditions(discussion_id))} if is_unread != None: posts = posts.outerjoin( ViewPost, and_( ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service.canTranslate is not None: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread my_sentiments = {} if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter(Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) # Note: we could count the like the same way and kill the subquery. # But it interferes with the popularity order, # and the benefit is not that high. sentiment_counts = discussion.db.query( PostClass.id, SentimentOfPost.type, count(SentimentOfPost.id) ).join(SentimentOfPost ).filter(PostClass.id.in_(posts.with_entities(PostClass.id).subquery()), SentimentOfPost.tombstone_condition() ).group_by(PostClass.id, SentimentOfPost.type) sentiment_counts_by_post_id = defaultdict(dict) for (post_id, sentiment_type, sentiment_count) in sentiment_counts: sentiment_counts_by_post_id[post_id][ sentiment_type[SentimentOfPost.TYPE_PREFIX_LEN:] ] = sentiment_count posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.disagree_count - Content.like_count, Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query( PostClass).filter(PostClass.id.in_(ancestor_ids)) if view_def == 'id_only': pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) for query_result in posts: score, viewpost = None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts if view_def != "id_only": translate_content( post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False my_sentiment = my_sentiments.get(post.id, None) if my_sentiment is not None: my_sentiment = my_sentiment.generic_json('default', user_id, permissions) serializable_post['my_sentiment'] = my_sentiment if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) serializable_post['sentiment_counts'] = sentiment_counts_by_post_id[post.id] post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data