def _get_ideas_real(discussion, view_def=None, ids=None, user_id=None): user_id = user_id or Everyone # optimization: Recursive widget links. from assembl.models import ( Widget, IdeaWidgetLink, IdeaDescendantsShowingWidgetLink) universal_widget_links = [] by_idea_widget_links = defaultdict(list) widget_links = discussion.db.query(IdeaWidgetLink ).join(Widget).join(Discussion).filter( Widget.test_active(), Discussion.id == discussion.id, IdeaDescendantsShowingWidgetLink.polymorphic_filter() ).options(joinedload_all(IdeaWidgetLink.idea)).all() for wlink in widget_links: if isinstance(wlink.idea, RootIdea): universal_widget_links.append({ '@type': wlink.external_typename(), 'widget': Widget.uri_generic(wlink.widget_id)}) else: for id in wlink.idea.get_all_descendants(True): by_idea_widget_links[Idea.uri_generic(id)].append({ '@type': wlink.external_typename(), 'widget': Widget.uri_generic(wlink.widget_id)}) next_synthesis = discussion.get_next_synthesis() ideas = discussion.db.query(Idea).filter_by( discussion_id=discussion.id ) ideas = ideas.outerjoin(SubGraphIdeaAssociation, and_(SubGraphIdeaAssociation.sub_graph_id==next_synthesis.id, SubGraphIdeaAssociation.idea_id==Idea.id) ) ideas = ideas.outerjoin(IdeaLink, and_(IdeaLink.target_id==Idea.id) ) ideas = ideas.order_by(IdeaLink.order, Idea.creation_date) if ids: ids = [get_database_id("Idea", id) for id in ids] ideas = ideas.filter(Idea.id.in_(ids)) # remove tombstones ideas = ideas.filter(and_(*Idea.base_conditions())) ideas = ideas.options( joinedload_all(Idea.source_links), joinedload_all(Idea.has_showing_widget_links), undefer(Idea.num_children)) permissions = get_permissions(user_id, discussion.id) Idea.prepare_counters(discussion.id, True) retval = [idea.generic_json(view_def, user_id, permissions) for idea in ideas] retval = [x for x in retval if x is not None] for r in retval: if r.get('widget_links', None) is not None: links = r['widget_links'][:] links.extend(universal_widget_links) links.extend(by_idea_widget_links[r['@id']]) r['active_widget_links'] = links return retval
def _get_ideas_real(discussion, view_def=None, ids=None): next_synthesis = discussion.get_next_synthesis() ideas = Idea.db.query(Idea).filter_by( discussion_id=discussion.id ) ideas = ideas.outerjoin(SubGraphIdeaAssociation, and_(SubGraphIdeaAssociation.sub_graph_id==next_synthesis.id, SubGraphIdeaAssociation.idea_id==Idea.id) ) ideas = ideas.outerjoin(IdeaLink, and_(IdeaLink.target_id==Idea.id) ) ideas = ideas.order_by(IdeaLink.order, Idea.creation_date) if ids: ids = [get_database_id("Idea", id) for id in ids] ideas = ideas.filter(Idea.id.in_(ids)) retval = [] for idea in ideas: if view_def: serialized_idea = idea.generic_json(view_def) else: serialized_idea = idea.generic_json() retval.append(serialized_idea) return retval
def put_extract(request): """ Updating an Extract """ extract_id = request.matchdict['id'] user_id = authenticated_userid(request) updated_extract_data = json.loads(request.body) extract = Extract.get_instance(extract_id) if not extract: raise HTTPNotFound("Extract with id '%s' not found." % extract_id) extract.owner_id = user_id or get_database_id("User", extract.owner_id) extract.order = updated_extract_data.get('order', extract.order) idea_id = updated_extract_data.get('idIdea', None) if idea_id: idea = Idea.get_instance(idea_id) if(idea.get_discussion_id() != extract.get_discussion_id()): raise HTTPBadRequest( "Extract from discussion %s cannot be associated with an idea from a different discussion." % extract.get_discussion_id()) extract.idea = idea else: extract.idea = None Extract.db.add(extract) #TODO: Merge ranges. Sigh. return {'ok': True}
def get_discussion_roles_for_user(request): discussion_id = request.matchdict['discussion_id'] user_id = get_database_id("User", request.matchdict['user_id']) session = Discussion.db() if not session.query(User).get(user_id): raise HTTPNotFound("User id %s does not exist" % (user_id,)) rolenames = session.query(Role.name).join( LocalUserRole).filter(LocalUserRole.user_id == user_id, LocalUserRole.discussion_id == discussion_id) return [x[0] for x in rolenames]
def _get_extracts_real(discussion, view_def=None, ids=None): all_extracts = Extract.db.query(Extract).filter( Extract.discussion_id == discussion.id ) if ids: ids = [get_database_id("Extract", id) for id in ids] all_extracts = all_extracts.filter(Extract.id.in_(ids)) all_extracts = all_extracts.options(joinedload_all( Extract.creator)) if view_def: return [extract.generic_json(view_def) for extract in all_extracts] else: return [extract.serializable() for extract in all_extracts]
def put_extract(request): """ Updating an Extract """ extract_id = request.matchdict['id'] user_id = request.authenticated_userid discussion_id = int(request.matchdict['discussion_id']) if not user_id: # Straight from annotator token = request.headers.get('X-Annotator-Auth-Token') if token: token = decode_token(token, request.registry.settings['session.secret']) if token: user_id = token['userId'] user_id = user_id or Everyone updated_extract_data = json.loads(request.body) extract = Extract.get_instance(extract_id) if not extract: raise HTTPNotFound("Extract with id '%s' not found." % extract_id) if not (user_has_permission(discussion_id, user_id, P_EDIT_EXTRACT) or (user_has_permission(discussion_id, user_id, P_EDIT_MY_EXTRACT) and user_id == extract.owner_id)): return HTTPForbidden() extract.owner_id = user_id or get_database_id("User", extract.owner_id) extract.order = updated_extract_data.get('order', extract.order) extract.important = updated_extract_data.get('important', extract.important) idea_id = updated_extract_data.get('idIdea', None) if idea_id: idea = Idea.get_instance(idea_id) if (idea.discussion != extract.discussion): raise HTTPBadRequest( "Extract from discussion %s cannot be associated with an idea from a different discussion." % extract.get_discussion_id()) extract.idea = idea else: extract.idea = None Extract.default_db.add(extract) #TODO: Merge ranges. Sigh. return {'ok': True}
def _get_extracts_real(discussion, view_def='default', ids=None, user_id=None): user_id = user_id or Everyone all_extracts = discussion.db.query(Extract).filter( Extract.discussion_id == discussion.id) if ids: ids = [get_database_id("Extract", id) for id in ids] all_extracts = all_extracts.filter(Extract.id.in_(ids)) all_extracts = all_extracts.options(joinedload_all(Extract.content)) all_extracts = all_extracts.options( joinedload_all(Extract.text_fragment_identifiers)) permissions = get_permissions(user_id, discussion.id) return [ extract.generic_json(view_def, user_id, permissions) for extract in all_extracts ]
def put_extract(request): """ Updating an Extract """ extract_id = request.matchdict['id'] user_id = authenticated_userid(request) discussion_id = int(request.matchdict['discussion_id']) if not user_id: # Straight from annotator token = request.headers.get('X-Annotator-Auth-Token') if token: token = decode_token( token, request.registry.settings['session.secret']) if token: user_id = token['userId'] if not user_id: user_id = Everyone updated_extract_data = json.loads(request.body) extract = Extract.get_instance(extract_id) if not extract: raise HTTPNotFound("Extract with id '%s' not found." % extract_id) if not (user_has_permission(discussion_id, user_id, P_EDIT_EXTRACT) or (user_has_permission(discussion_id, user_id, P_EDIT_MY_EXTRACT) and user_id == extract.owner_id)): return HTTPForbidden() extract.owner_id = user_id or get_database_id("User", extract.owner_id) extract.order = updated_extract_data.get('order', extract.order) extract.important = updated_extract_data.get('important', extract.important) idea_id = updated_extract_data.get('idIdea', None) if idea_id: idea = Idea.get_instance(idea_id) if(idea.discussion != extract.discussion): raise HTTPBadRequest( "Extract from discussion %s cannot be associated with an idea from a different discussion." % extract.get_discussion_id()) extract.idea = idea else: extract.idea = None Extract.db.add(extract) #TODO: Merge ranges. Sigh. return {'ok': True}
def _get_extracts_real(discussion, view_def='default', ids=None, user_id=None): user_id = user_id or Everyone all_extracts = discussion.db.query(Extract).filter( Extract.discussion_id == discussion.id ) if ids: ids = [get_database_id("Extract", id) for id in ids] all_extracts = all_extracts.filter(Extract.id.in_(ids)) all_extracts = all_extracts.options(joinedload_all( Extract.content)) all_extracts = all_extracts.options(joinedload_all( Extract.text_fragment_identifiers)) permissions = get_permissions(user_id, discussion.id) return [extract.generic_json(view_def, user_id, permissions) for extract in all_extracts]
def _get_ideas_real(discussion, view_def=None, ids=None, user_id=None): next_synthesis = discussion.get_next_synthesis() ideas = Idea.db.query(Idea).filter_by( discussion_id=discussion.id ) ideas = ideas.outerjoin(SubGraphIdeaAssociation, and_(SubGraphIdeaAssociation.sub_graph_id==next_synthesis.id, SubGraphIdeaAssociation.idea_id==Idea.id) ) ideas = ideas.outerjoin(IdeaLink, and_(IdeaLink.target_id==Idea.id) ) ideas = ideas.order_by(IdeaLink.order, Idea.creation_date) if ids: ids = [get_database_id("Idea", id) for id in ids] ideas = ideas.filter(Idea.id.in_(ids)) permissions = get_permissions(user_id, discussion.id) retval = [idea.generic_json(view_def, user_id, permissions) for idea in ideas] retval = [x for x in retval if x is not None] widget_data = defaultdict(list) for widget in discussion.widgets: url = widget.get_ui_endpoint() for data in widget.idea_data(user_id): try: data['widget_url'] = url data['widget_uri'] = widget.uri() widget_data[data['idea']].append(data) except KeyError as e: pass for idea_rep in retval: if '@id' not in idea_rep: continue data = widget_data[idea_rep['@id']] if data: idea_rep['widget_data'] = data return retval
def get_extracts(request): discussion_id = int(request.matchdict['discussion_id']) view_def = request.GET.get('view') ids = request.GET.getall('ids') all_extracts = Extract.db.query(Extract).join( Content, Source ).filter( Source.discussion_id == discussion_id, Content.source_id == Source.id ) if ids: ids = [get_database_id("Extract", id) for id in ids] all_extracts = all_extracts.filter(Extract.id.in_(ids)) # all_extracts = all_extracts.options(joinedload_all( # Extract.source, Content.post, Post.creator)) all_extracts = all_extracts.options(joinedload_all( Extract.creator, AgentProfile.user)) if view_def: return [extract.generic_json(view_def) for extract in all_extracts] else: return [extract.serializable() for extract in all_extracts]
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!" """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get( post_author_id ), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get( post_replies_to ), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') message_classifiers = request.GET.getall('classifier') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter(PostClass.discussion_id == discussion_id, ) ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. # v0 # deleted = request.GET.get('deleted', None) # end v0 # v1: we would like something like that # deleted = request.GET.get('deleted', None) # if deleted is None: # if view_def == 'id_only': # deleted = None # else: # deleted = False # end v1 # v2 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # # if deleted == 'false': # deleted = False # posts = posts.filter(PostClass.tombstone_condition()) # elif deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # elif deleted == 'any': # deleted = None # # result will contain deleted and non-deleted posts # pass # end v2 # v3 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # if deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # end v3 # v4 deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest( localizer.translate( _("Getting orphan posts of a specific idea isn't supported." ))) orphans = Idea._get_orphan_posts_statement( discussion_id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c(discussion_id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter(PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id)) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if message_classifiers: if any([len(classifier) == 0 for classifier in message_classifiers]): return {'total': 0, 'posts': []} polarities = [ classifier[0] != "!" for classifier in message_classifiers ] polarity = all(polarities) if not polarity: message_classifiers = [c.strip("!") for c in message_classifiers] if polarity != any(polarities): raise HTTPBadRequest( _("Do not combine negative and positive classifiers")) # Treat null as no classifier includes_null = 'null' in message_classifiers if includes_null: message_classifiers_nonull = filter(lambda c: c != "null", message_classifiers) if polarity: if len(message_classifiers) == 1: term = PostClass.message_classifier == ( None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.in_( message_classifiers_nonull) if includes_null: term = term | (PostClass.message_classifier == None) else: if len(message_classifiers) == 1: term = PostClass.message_classifier != ( None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.notin_( message_classifiers_nonull) if not includes_null: term = term | (PostClass.message_classifier == None) posts = posts.filter(term) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = { v.post_id for v in discussion.db.query(ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id)) } my_sentiments = { l.post_id: l for l in discussion.db.query(SentimentOfPost).filter( SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id == user_id, *SentimentOfPost.get_discussion_conditions(discussion_id)) } if is_unread != None: posts = posts.outerjoin( ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread my_sentiments = {} if is_unread == "false": raise HTTPBadRequest( localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter( Post.body_text_index.contains(text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) # Note: we could count the like the same way and kill the subquery. # But it interferes with the popularity order, # and the benefit is not that high. sentiment_counts = discussion.db.query( PostClass.id, SentimentOfPost.type, count(SentimentOfPost.id)).join(SentimentOfPost).filter( PostClass.id.in_(posts.with_entities(PostClass.id).subquery()), SentimentOfPost.tombstone_condition()).group_by( PostClass.id, SentimentOfPost.type) sentiment_counts_by_post_id = defaultdict(dict) for (post_id, sentiment_type, sentiment_count) in sentiment_counts: sentiment_counts_by_post_id[post_id][sentiment_type[ SentimentOfPost.TYPE_PREFIX_LEN:]] = sentiment_count posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.disagree_count - Content.like_count, Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query(PostClass).filter( PostClass.id.in_(ancestor_ids)) if view_def == 'id_only': pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) for query_result in posts: score, viewpost = None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts if view_def != "id_only": translate_content(post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json(view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False my_sentiment = my_sentiments.get(post.id, None) if my_sentiment is not None: my_sentiment = my_sentiment.generic_json('default', user_id, permissions) serializable_post['my_sentiment'] = my_sentiment if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) serializable_post[ 'sentiment_counts'] = sentiment_counts_by_post_id[post.id] post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 order = request.GET.get('order') if order == None: order = 'chronological' assert order in ('chronological', 'reverse_chronological') if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') PostClass = SynthesisPost if only_synthesis == "true" else Post posts = Post.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get('only_orphan') if only_orphan == "true": if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = text(Idea._get_orphan_posts_statement(), bindparams=[bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('orphans') posts = posts.join(orphans, PostClass.id==orphans.c.post_id) elif only_orphan == "false": raise HTTPBadRequest(localizer.translate( _("Getting non-orphan posts isn't supported."))) if root_idea_id: related = text(Idea._get_related_posts_statement(), bindparams=[bindparam('root_idea_id', root_idea_id), bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('related') #Virtuoso bug: This should work... #posts = posts.join(related, PostClass.id==related.c.post_id) posts = posts.filter(PostClass.id.in_(related)) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) # Post read/unread management is_unread = request.GET.get('is_unread') if user_id: posts = posts.outerjoin(ViewPost, and_(ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id) ) posts = posts.add_entity(ViewPost) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) #posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'partial': pass # posts = posts.options(defer(Post.body)) else: posts = posts.options(joinedload_all(Post.creator), undefer(Email.recipients)) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: if user_id: post, viewpost = query_result else: post, viewpost = query_result, None no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id and root_post is not None and root_post.id == post.id: #Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) Post.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = Post.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def get_posts(request): discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(id=int(discussion_id)) if not discussion: raise HTTPNotFound(_("No discussion found with id=%s" % discussion_id)) discussion.import_from_sources() user_id = authenticated_userid(request) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') #Rename "inbox" to "unread", the number of unread messages for the current user. no_of_messages_viewed_by_user = Post.db.query(ViewPost).join( Post, Content, Source ).filter( Source.discussion_id == discussion_id, Content.source_id == Source.id, ViewPost.actor_id == user_id, ).count() if user_id else 0 posts = Post.db.query(Post).join( Content, Source, ).filter( Source.discussion_id == discussion_id, Content.source_id == Source.id, ) no_of_posts_to_discussion = posts.count() post_data = [] if root_idea_id: if root_idea_id == Idea.ORPHAN_POSTS_IDEA_ID: ideas_query = Post.db.query(Post) \ .filter(Post.id.in_(text(Idea._get_orphan_posts_statement(), bindparams=[bindparam('discussion_id', discussion_id)] ))) else: ideas_query = Post.db.query(Post) \ .filter(Post.id.in_(text(Idea._get_related_posts_statement(), bindparams=[bindparam('root_idea_id', root_idea_id)] ))) posts = ideas_query.join(Content, Source, ) elif root_post_id: root_post = Post.get(id=root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (Post.id==root_post.id) ) #Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) elif ids: posts = posts.filter(Post.id.in_(ids)) if user_id: posts = posts.outerjoin(ViewPost, and_(ViewPost.actor_id==user_id, ViewPost.post_id==Post.id) ) posts = posts.add_entity(ViewPost) posts = posts.options(contains_eager(Post.content, Content.source)) posts = posts.options(joinedload_all(Post.creator, AgentProfile.user)) posts = posts.order_by(Content.creation_date) if 'synthesis' in filter_names: posts = posts.filter(Post.is_synthesis==True) if user_id: for post, viewpost in posts: if view_def: serializable_post = post.generic_json(view_def) else: serializable_post = post.serializable() if viewpost: serializable_post['read'] = True else: serializable_post['read'] = False if root_post_id: viewed_post = ViewPost( actor_id=user_id, post=post ) Post.db.add(viewed_post) post_data.append(serializable_post) else: for post in posts: if view_def: serializable_post = post.generic_json(view_def) else: serializable_post = post.serializable() post_data.append(serializable_post) data = {} data["page"] = page data["inbox"] = no_of_posts_to_discussion - no_of_messages_viewed_by_user #What is "total", the total messages in the current context? #This gave wrong count, I don't know why. benoitg #data["total"] = discussion.posts().count() data["total"] = no_of_posts_to_discussion data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = get_localizer(request) discussion_id = int(request.matchdict["discussion_id"]) discussion = Discussion.get(id=int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = ( [filter_name for filter_name in request.GET.getone("filters").split(",") if filter_name] if request.GET.get("filters") else [] ) try: page = int(request.GET.getone("page")) except (ValueError, KeyError): page = 1 order = request.GET.get("order") if order == None: order = "chronological" assert order in ("chronological", "reverse_chronological") if page < 1: page = 1 root_post_id = request.GET.getall("root_post_id") if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) root_idea_id = request.GET.getall("root_idea_id") if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall("ids") if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get("view") only_synthesis = request.GET.get("only_synthesis") if only_synthesis == "true": posts = Post.db.query(SynthesisPost) else: posts = Post.db.query(Post) posts = posts.filter(Post.discussion_id == discussion_id) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get("only_orphan") if only_orphan == "true": if root_idea_id: raise HTTPBadRequest(localizer.translate(_("Getting orphan posts of a specific idea isn't supported."))) posts = posts.filter( Post.id.in_( text(Idea._get_orphan_posts_statement(), bindparams=[bindparam("discussion_id", discussion_id)]) ) ) elif only_orphan == "false": raise HTTPBadRequest(localizer.translate(_("Getting non-orphan posts isn't supported."))) if root_idea_id: posts = posts.filter( Post.id.in_( text( Idea._get_related_posts_statement(), bindparams=[bindparam("root_idea_id", root_idea_id), bindparam("discussion_id", discussion_id)], ) ) ) if root_post_id: root_post = Post.get(id=root_post_id) posts = posts.filter( (Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ",%")) | (Post.id == root_post.id) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) # Post read/unread management is_unread = request.GET.get("is_unread") print "\n" + repr(is_unread) + "\n" if user_id: posts = posts.outerjoin(ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == Post.id)) posts = posts.add_entity(ViewPost) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) else: # If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest(localizer.translate(_("You must be logged in to view which posts are read"))) # posts = posts.options(contains_eager(Post.source)) posts = posts.options(joinedload_all(Post.creator)) if order == "chronological": posts = posts.order_by(Content.creation_date) elif order == "reverse_chronological": posts = posts.order_by(Content.creation_date.desc()) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: if user_id: post, viewpost = query_result else: post, viewpost = query_result, None no_of_posts += 1 if view_def: serializable_post = post.generic_json(view_def) else: serializable_post = post.serializable() if viewpost: serializable_post["read"] = True no_of_posts_viewed_by_user += 1 elif user_id and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) Post.db.add(viewed_post) serializable_post["read"] = True else: serializable_post["read"] = False post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent # handling of pagination. Disabling # posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results # no_of_messages_viewed_by_user = Post.db.query(ViewPost).join( # Post # ).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, # ).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) # TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order == None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get( post_author_id ), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get( post_replies_to ), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter(PostClass.discussion_id == discussion_id, ) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get('only_orphan') if only_orphan == "true": if root_idea_id: raise HTTPBadRequest( localizer.translate( _("Getting orphan posts of a specific idea isn't supported." ))) orphans = text(Idea._get_orphan_posts_statement(), bindparams=[ bindparam('discussion_id', discussion_id) ]).columns(column('post_id')).alias('orphans') posts = posts.join(orphans, PostClass.id == orphans.c.post_id) elif only_orphan == "false": raise HTTPBadRequest( localizer.translate( _("Getting non-orphan posts isn't supported."))) # "true" means hidden only, "false" (default) means visible only. "any" means both. hidden = request.GET.get('hidden_messages', "false") if hidden != 'any': posts = posts.filter(PostClass.hidden == asbool(hidden)) if root_idea_id: related = text(Idea._get_related_posts_statement(), bindparams=[ bindparam('root_idea_id', root_idea_id), bindparam('discussion_id', discussion_id) ]).columns(column('post_id')).alias('related') #Virtuoso bug: This should work... #posts = posts.join(related, PostClass.id==related.c.post_id) posts = posts.join(related, PostClass.id == related.c.post_id) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id)) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = { v.post_id for v in discussion.db.query(ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id)) } liked_posts = { l.post_id: l.id for l in discussion.db.query(LikedPost).filter( LikedPost.tombstone_condition(), LikedPost.actor_id == user_id, *LikedPost.get_discussion_conditions(discussion_id)) } if is_unread != None: posts = posts.outerjoin( ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest( localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter( Post.body_text_index.contains(text_search.encode('utf-8'), offband=offband)) #posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: posts = posts.options(joinedload_all(Post.creator)) posts = posts.options(joinedload_all(Post.extracts)) posts = posts.options(joinedload_all(Post.widget_idea_links)) posts = posts.options(joinedload_all( SynthesisPost.publishes_synthesis)) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: score, viewpost, likedpost = None, None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if user_id != Everyone: viewpost = post.id in read_posts likedpost = liked_posts.get(post.id, None) no_of_posts += 1 serializable_post = post.generic_json(view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False # serializable_post['liked'] = likedpost.uri() if likedpost else False serializable_post['liked'] = LikedPost.uri_generic( likedpost) if likedpost else False post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order == None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') PostClass = SynthesisPost if only_synthesis == "true" else Post ideaContentLinkQuery = discussion.db.query( PostClass.id, PostClass.idea_content_links_above_post) if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.discussion_id == discussion_id) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get('only_orphan') if only_orphan == "true": if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = text(Idea._get_orphan_posts_statement(), bindparams=[bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('orphans') posts = posts.join(orphans, PostClass.id==orphans.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( orphans, PostClass.id==orphans.c.post_id) elif only_orphan == "false": raise HTTPBadRequest(localizer.translate( _("Getting non-orphan posts isn't supported."))) # "true" means hidden only, "false" (default) means visible only. "any" means both. hidden = request.GET.get('hidden_messages', "false") if hidden != 'any': posts = posts.filter(PostClass.hidden==asbool(hidden)) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.hidden==asbool(hidden)) if root_idea_id: related = text(Idea._get_related_posts_statement(), bindparams=[bindparam('root_idea_id', root_idea_id), bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('related') #Virtuoso bug: This should work... #posts = posts.join(related, PostClass.id==related.c.post_id) posts = posts.join(related, PostClass.id == related.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( related, PostClass.id == related.c.post_id) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) ideaContentLinkQuery = ideaContentLinkQuery.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) ideaContentLinkQuery = ideaContentLinkQuery.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) ideaContentLinkQuery = posts.filter( ideaContentLinkQuery.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creator_id == post_author_id) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) ideaContentLinkQuery = ideaContentLinkQuery.join( parent_alias, PostClass.parent) ideaContentLinkQuery = ideaContentLinkQuery.filter( parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = {v.post_id for v in discussion.db.query( ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id))} liked_posts = {l.post_id: l.id for l in discussion.db.query( LikedPost).filter( LikedPost.tombstone_condition(), LikedPost.actor_id == user_id, *LikedPost.get_discussion_conditions(discussion_id))} if is_unread != None: posts = posts.outerjoin( ViewPost, and_( ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service: translations = user_pref_as_translation_table(user, service) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter(Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) ideaContentLinkQuery = ideaContentLinkQuery.filter( Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) else: posts = posts.order_by(Content.id) print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: score, viewpost, likedpost = None, None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if user_id != Everyone: viewpost = post.id in read_posts likedpost = liked_posts.get(post.id, None) if view_def != "id_only": translate_content( post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False # serializable_post['liked'] = likedpost.uri() if likedpost else False serializable_post['liked'] = ( LikedPost.uri_generic(likedpost) if likedpost else False) if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!" """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = request.authenticated_userid or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') message_classifiers = request.GET.getall('classifier') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ).filter(PostClass.type != 'proposition_post') ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. # v0 # deleted = request.GET.get('deleted', None) # end v0 # v1: we would like something like that # deleted = request.GET.get('deleted', None) # if deleted is None: # if view_def == 'id_only': # deleted = None # else: # deleted = False # end v1 # v2 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # # if deleted == 'false': # deleted = False # posts = posts.filter(PostClass.tombstone_condition()) # elif deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # elif deleted == 'any': # deleted = None # # result will contain deleted and non-deleted posts # pass # end v2 # v3 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # if deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # end v3 # v4 deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = Idea._get_orphan_posts_statement( discussion_id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c( discussion_id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter( PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if message_classifiers: if any([len(classifier) == 0 for classifier in message_classifiers]): return {'total': 0, 'posts': []} polarities = [classifier[0] != "!" for classifier in message_classifiers] polarity = all(polarities) if not polarity: message_classifiers = [c.strip("!") for c in message_classifiers] if polarity != any(polarities): raise HTTPBadRequest(_("Do not combine negative and positive classifiers")) # Treat null as no classifier includes_null = 'null' in message_classifiers if includes_null: message_classifiers_nonull = filter(lambda c: c != "null", message_classifiers) if polarity: if len(message_classifiers) == 1: term = PostClass.message_classifier == (None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.in_(message_classifiers_nonull) if includes_null: term = term | (PostClass.message_classifier == None) else: if len(message_classifiers) == 1: term = PostClass.message_classifier != (None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.notin_(message_classifiers_nonull) if not includes_null: term = term | (PostClass.message_classifier == None) posts = posts.filter(term) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = {v.post_id for v in discussion.db.query( ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id))} my_sentiments = {l.post_id: l for l in discussion.db.query( SentimentOfPost).filter( SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id == user_id, *SentimentOfPost.get_discussion_conditions(discussion_id))} if is_unread != None: posts = posts.outerjoin( ViewPost, and_( ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service.canTranslate is not None: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread my_sentiments = {} if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter(Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) # Note: we could count the like the same way and kill the subquery. # But it interferes with the popularity order, # and the benefit is not that high. sentiment_counts = discussion.db.query( PostClass.id, SentimentOfPost.type, count(SentimentOfPost.id) ).join(SentimentOfPost ).filter(PostClass.id.in_(posts.with_entities(PostClass.id).subquery()), SentimentOfPost.tombstone_condition() ).group_by(PostClass.id, SentimentOfPost.type) sentiment_counts_by_post_id = defaultdict(dict) for (post_id, sentiment_type, sentiment_count) in sentiment_counts: sentiment_counts_by_post_id[post_id][ sentiment_type[SentimentOfPost.TYPE_PREFIX_LEN:] ] = sentiment_count posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.disagree_count - Content.like_count, Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query( PostClass).filter(PostClass.id.in_(ancestor_ids)) if view_def == 'id_only': pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) for query_result in posts: score, viewpost = None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts if view_def != "id_only": translate_content( post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False my_sentiment = my_sentiments.get(post.id, None) if my_sentiment is not None: my_sentiment = my_sentiment.generic_json('default', user_id, permissions) serializable_post['my_sentiment'] = my_sentiment if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) serializable_post['sentiment_counts'] = sentiment_counts_by_post_id[post.id] post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data