def reply_deleted_post_4(request, participant2_user, discussion, reply_post_1, test_session): """ From participant2_user, in reply to reply_post_1 """ from assembl.models import Post, LangString, PublicationStates p = Post(discussion=discussion, creator=participant2_user, subject=LangString.create(u"re2: root post"), body=LangString.create(u"post body"), publication_state=PublicationStates.DELETED_BY_ADMIN, type="post", message_id="*****@*****.**") test_session.add(p) test_session.flush() p.set_parent(reply_post_1) test_session.flush() def fin(): print "finalizer reply_deleted_post_4" test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def reply_post_1(request, participant2_user, discussion, root_post_1, test_session): """ From participant2_user, in reply to root_post_1 """ from assembl.models import Post, LangString p = Post( discussion=discussion, creator=participant2_user, subject=LangString.create(u"re1: root post"), body=LangString.create( u"post body with some text so we can test harvesting features. I'm writing a very topical comment with an unrelated source, hoping it would make people angry and make them write answers. I have read in '17O Solid-State NMR Spectroscopy of Functional Oxides for Energy Conversion' thesis by Halat, D. M. (2018) that variable-temperature spectra indicate the onset of oxide-ion motion involving the interstitials at 130 °C, which is linked to an orthorhombic−tetragonal phase transition. For the V-doped phases, an oxide-ion conduction mechanism is observed that involves oxygen exchange between the Bi-O sublattice and rapidly rotating VO4 tetrahedral units. The more poorly conducting P-doped phase exhibits only vacancy conduction with no evidence of sublattice exchange, a result ascribed to the differing propensities of the dopants to undergo variable oxygen coordination. So I think it would be a very bad idea to allow hot beverages in coworking spaces. But it looks like people don't really care about scientific evidence around here." ), creation_date=datetime(year=2000, month=1, day=4), type="post", message_id="*****@*****.**") test_session.add(p) test_session.flush() p.set_parent(root_post_1) test_session.flush() def fin(): print("finalizer reply_post_1") test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def root_post_with_tags(request, participant1_user, discussion, test_session, tags): """ From participant1_user """ from assembl.models import Post, LangString p = Post(discussion=discussion, creator=participant1_user, subject=LangString.create(u"a root post"), body=LangString.create(u"post body"), moderator=None, creation_date=datetime(year=2000, month=1, day=1), type="post", message_id="*****@*****.**") p.tags = tags test_session.add(p) test_session.flush() def fin(): print("finalizer root_post_with_tags") p.tags = [] test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def reply_post_1(request, participant2_user, discussion, root_post_1, test_session): """ From participant2_user, in reply to root_post_1 """ from assembl.models import Post, LangString p = Post(discussion=discussion, creator=participant2_user, subject=LangString.create(u"re1: root post"), body=LangString.create(u"post body"), creation_date=datetime(year=2000, month=1, day=4), type="post", message_id="*****@*****.**") test_session.add(p) test_session.flush() p.set_parent(root_post_1) test_session.flush() def fin(): print "finalizer reply_post_1" test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def test_idea_with_posts_answers_to_my_posts_show_answers( graphql_request, graphql_registry, idea_in_thread_phase, top_post_in_thread_phase, participant1_post_in_thread_phase, third_post_in_thread_phase, participant1_user): from assembl.models import Post # top_post_in_thread_phase admin's post is an answer of participant1_post_in_thread_phase participant_instance_id = int( from_global_id(participant1_post_in_thread_phase)[1]) admin_instance_id = int(from_global_id(top_post_in_thread_phase)[1]) Post.get(admin_instance_id).parent_id = participant_instance_id graphql_request.authenticated_userid = participant1_user.id res = schema.execute( graphql_registry['IdeaWithPostsQuery'], context_value=graphql_request, variable_values={ "id": idea_in_thread_phase, "lang": "en", "additionalFields": True, "onlyMyPosts": False, "myPostsAndAnswers": True }, ) assert res.errors is None posts = res.data['idea']['posts']['edges'] assert len(posts) == 2
def post_draft_for_bright_mirror( request, test_session, discussion, moderator_user, bright_mirror): from assembl.models import Post, Idea, LangString, IdeaRelatedPostLink, PublicationStates from graphene.relay import Node idea_id = bright_mirror raw_id = int(Node.from_global_id(idea_id)[1]) idea = Idea.get(raw_id) p = Post( discussion=discussion, creator=moderator_user, subject=LangString.create(u"Draft"), body=LangString.create(u"A simple draft fiction"), type='post', publication_state=PublicationStates.DRAFT, message_id="*****@*****.**", creation_date = datetime.utcnow() - timedelta(days=7)) idc = IdeaRelatedPostLink( idea=idea, creator=moderator_user, content=p) test_session.add(p) test_session.add(idc) test_session.flush() def fin(): print "finalizer post_draft_for_bright_mirror" test_session.delete(p) test_session.delete(idc) test_session.flush() request.addfinalizer(fin) return p
def post_related_to_sub_idea_1_participant2(request, test_session, discussion, participant2_user, subidea_1): from assembl.models import Post, LangString, IdeaRelatedPostLink idea = subidea_1 p = Post(discussion=discussion, creator=participant2_user, subject=LangString.create(u"A post related to sub_idea_1 "), body=LangString.create(u"A post related to sub_idea_1"), creation_date=datetime(year=2000, month=1, day=6), type='post', message_id="*****@*****.**") idc = IdeaRelatedPostLink(idea=idea, creator=participant2_user, content=p) test_session.add(p) test_session.add(idc) test_session.flush() def fin(): test_session.delete(p) test_session.delete(idc) test_session.flush() request.addfinalizer(fin) return p
def root_post_en_under_positive_column_of_idea( request, test_session, discussion, admin_user, idea_message_column_positive): from assembl.models import Post, LangString, IdeaRelatedPostLink idea = idea_message_column_positive.idea p = Post( discussion=discussion, creator=admin_user, subject=LangString.create(u"A simple positive subject"), body=LangString.create(u"A simple positive body"), type='post', message_id="*****@*****.**", message_classifier=idea_message_column_positive.message_classifier) idc = IdeaRelatedPostLink( idea=idea, creator=admin_user, content=p) test_session.add(p) test_session.add(idc) test_session.flush() def fin(): print "finalizer root_post_en_under_positive_column_of_idea" test_session.delete(p) test_session.delete(idc) test_session.flush() request.addfinalizer(fin) return p
def root_post_1_with_positive_message_classifier(request, participant1_user, idea_message_column_positive, discussion, test_session): """ From participant1_user """ from assembl.models import Post, LangString p = Post( discussion=discussion, creator=participant1_user, subject=LangString.create(u"a root post"), body=LangString.create(u"post body"), moderator=None, creation_date=datetime(year=2000, month=1, day=1), type="post", message_id="*****@*****.**", message_classifier=idea_message_column_positive.message_classifier) test_session.add(p) test_session.flush() def fin(): print("finalizer root_post_1") test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def post_related_to_sub_idea_1_1_1( request, test_session, discussion, admin_user, subidea_1_1_1): from assembl.models import Post, LangString, IdeaRelatedPostLink idea = subidea_1_1_1 p = Post( discussion=discussion, creator=admin_user, subject=LangString.create(u"A post subject related to sub_idea_1_1_1"), body=LangString.create(u"A post body related to sub_idea_1_1_1"), creation_date=datetime(2018, 2, 17, 9, 0, 0), # in the thread phase date range (see phases fixture) type='post', message_id="*****@*****.**") idc = IdeaRelatedPostLink( idea=idea, creator=admin_user, content=p) test_session.add(p) test_session.add(idc) test_session.flush() def fin(): print "finalizer post_related_to_sub_idea_1_1_1" test_session.delete(p) test_session.delete(idc) test_session.flush() request.addfinalizer(fin) return p
def post_related_to_sub_idea_1( request, test_session, discussion, admin_user, subidea_1): from assembl.models import Post, LangString, IdeaRelatedPostLink idea = subidea_1 p = Post( discussion=discussion, creator=admin_user, subject=LangString.create(u"A post related to sub_idea_1 "), body=LangString.create(u"A post related to sub_idea_1"), type='post', message_id="*****@*****.**") idc = IdeaRelatedPostLink( idea=idea, creator=admin_user, content=p) test_session.add(p) test_session.add(idc) test_session.flush() def fin(): print "finalizer root_post_en_under_positive_column_of_idea" test_session.delete(p) test_session.delete(idc) test_session.flush() request.addfinalizer(fin) return p
def creativity_session_widget_post(request, test_session, discussion, participant1_user, creativity_session_widget, creativity_session_widget_new_idea): """A Post fixture with a bound to a creativity widget to a new idea and an idea content link""" from assembl.models import (Post, IdeaContentWidgetLink, LangString) p = Post(discussion=discussion, creator=participant1_user, subject=LangString.create(u"re: generated idea"), body=LangString.create(u"post body"), type="post", message_id="*****@*****.**") test_session.add(p) test_session.flush() icwl = IdeaContentWidgetLink(content=p, idea=creativity_session_widget_new_idea, creator=participant1_user) test_session.add(icwl) def fin(): print "finalizer creativity_session_widget_post" test_session.delete(icwl) test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def test_extract_get_comment(admin_user, graphql_request, top_post_in_thread_phase, extract_post_1_to_subidea_1_1, extract_comment): from graphene.relay import Node raw_id = int(Node.from_global_id(top_post_in_thread_phase)[1]) from assembl.models import Post post = Post.get(raw_id) post.extracts.append(extract_post_1_to_subidea_1_1) post.db.flush() res = schema.execute(u""" query Post($id: ID!, $lang: String!) { post: node(id: $id) { ... on Post { extracts { comments { subject(lang: $lang) } } } } } """, context_value=graphql_request, variable_values={ "id": top_post_in_thread_phase, "lang": u'en', }) assert res.data['post']['extracts'][0]['comments'][0][ 'subject'] == 'comment of extract title'
def post_published_for_bright_mirror_participant( request, test_session, discussion, admin_user, participant1_user, bright_mirror): from assembl.models import Post, Idea, LangString, IdeaRelatedPostLink, PublicationStates from graphene.relay import Node idea_id = bright_mirror raw_id = int(Node.from_global_id(idea_id)[1]) idea = Idea.get(raw_id) p = Post( discussion=discussion, creator=participant1_user, subject=LangString.create(u"Published by participant"), body=LangString.create(u"A simple published fiction by participant"), type='post', publication_state=PublicationStates.PUBLISHED, message_id="*****@*****.**", creation_date = datetime.utcnow()) idc = IdeaRelatedPostLink( idea=idea, creator=admin_user, content=p) test_session.add(p) test_session.add(idc) test_session.flush() def fin(): print "finalizer post_published_for_bright_mirror" test_session.delete(p) test_session.delete(idc) test_session.flush() request.addfinalizer(fin) return p
def test_mutation_add_extract_comment(admin_user, graphql_request, idea_in_thread_phase, top_post_in_thread_phase, extract_post_1_to_subidea_1_1): from graphene.relay import Node raw_id = int(Node.from_global_id(top_post_in_thread_phase)[1]) from assembl.models import Post post = Post.get(raw_id) post.extracts.append(extract_post_1_to_subidea_1_1) post.db.flush() extract_id = extract_post_1_to_subidea_1_1.graphene_id() idea_id = idea_in_thread_phase res = schema.execute(u""" mutation createPost { createPost( ideaId:"%s", extractId:"%s", subject:"Manger des choux à la crème", body:"Je recommande de manger des choux à la crème, c'est très bon, et ça permet de maintenir l'industrie de la patisserie française." ) { post { ... on Post { parentExtractId } } } } """ % (idea_id, extract_id), context_value=graphql_request) assert res.data['createPost']['post']['parentExtractId'] == extract_id
def get_post(request): post_id = request.matchdict["id"] post = Post.get_instance(post_id) view_def = request.GET.get("view") if not post: raise HTTPNotFound("Post with id '%s' not found." % post_id) if view_def: return post.generic_json(view_def) else: return post.serializable()
def get_post(request): post_id = request.matchdict['id'] post = Post.get_instance(post_id) view_def = request.GET.get('view') or 'default' if not post: raise HTTPNotFound("Post with id '%s' not found." % post_id) discussion_id = int(request.matchdict['discussion_id']) user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) return post.generic_json(view_def, user_id, permissions)
def reply_post_3(request, participant2_user, discussion, root_post_1, test_session): """ From participant2_user, in reply to reply_post_2 """ from assembl.models import Post p = Post( discussion=discussion, creator=participant2_user, subject=u"re2: root post", body=u"post body", type="post", message_id="msg4") test_session.add(p) test_session.flush() p.set_parent(root_post_1) test_session.flush() def fin(): print "finalizer reply_post_3" test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def reply_post_2(request, participant1_user, discussion, reply_post_1, test_session): """ From participant1_user, in reply to reply_post_1 """ from assembl.models import Post p = Post( discussion=discussion, creator=participant1_user, subject=u"re2: root post", body=u"post body", creation_date=datetime(year=2000, month=1, day=5), type="post", message_id="msg3") test_session.add(p) test_session.flush() p.set_parent(reply_post_1) test_session.flush() def fin(): print "finalizer reply_post_2" test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def test_extracts_on_post(admin_user, graphql_request, discussion, top_post_in_thread_phase): from graphene.relay import Node raw_id = int(Node.from_global_id(top_post_in_thread_phase)[1]) from assembl.models import Extract, Post post = Post.get(raw_id) post.extracts.append( Extract(body=u"super quote", important=False, creator=admin_user, owner=admin_user, discussion=discussion, extract_hash=u"extract1")) post.extracts.append( Extract(body=u"super important quote", important=True, creator=admin_user, owner=admin_user, discussion=discussion, extract_hash=u"extract2")) post.db.flush() res = schema.execute(u""" query Post($id: ID!) { post: node(id: $id) { ... on Post { extracts { body important } } } } """, context_value=graphql_request, variable_values={ "id": top_post_in_thread_phase, }) assert json.loads(json.dumps(res.data)) == { u'post': { u'extracts': [ { u'body': u'super quote', u'important': False }, { u'body': u'super important quote', u'important': True }, ] } }
def reply_to_deleted_post_5( request, participant1_user, discussion, reply_deleted_post_4, test_session): """ From participant2_user, in reply to root_post_1 """ from assembl.models import Post, LangString p = Post( discussion=discussion, creator=participant1_user, subject=LangString.create(u"re3: root post"), body=LangString.create(u"post body"), type="post", message_id="*****@*****.**") test_session.add(p) test_session.flush() p.set_parent(reply_deleted_post_4) test_session.flush() def fin(): print "finalizer reply_to_deleted_post_5" test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def reply_post_1(request, participant2_user, discussion, root_post_1, test_session): """ From participant2_user, in reply to root_post_1 """ from assembl.models import Post, LangString p = Post( discussion=discussion, creator=participant2_user, subject=LangString.create(u"re1: root post"), body=LangString.create(u"post body"), creation_date=datetime(year=2000, month=1, day=4), type="post", message_id="*****@*****.**") test_session.add(p) test_session.flush() p.set_parent(root_post_1) test_session.flush() def fin(): print "finalizer reply_post_1" test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def reply_post_1(request, participant2_user, discussion, root_post_1, test_session): """ From participant2_user, in reply to root_post_1 """ from assembl.models import Post, LangString p = Post( discussion=discussion, creator=participant2_user, subject=LangString.create(u"re1: root post"), body=LangString.create(u"post body with some text so we can test harvesting features. I'm writing a very topical comment with an unrelated source, hoping it would make people angry and make them write answers. I have read in '17O Solid-State NMR Spectroscopy of Functional Oxides for Energy Conversion' thesis by Halat, D. M. (2018) that variable-temperature spectra indicate the onset of oxide-ion motion involving the interstitials at 130 °C, which is linked to an orthorhombic−tetragonal phase transition. For the V-doped phases, an oxide-ion conduction mechanism is observed that involves oxygen exchange between the Bi-O sublattice and rapidly rotating VO4 tetrahedral units. The more poorly conducting P-doped phase exhibits only vacancy conduction with no evidence of sublattice exchange, a result ascribed to the differing propensities of the dopants to undergo variable oxygen coordination. So I think it would be a very bad idea to allow hot beverages in coworking spaces. But it looks like people don't really care about scientific evidence around here."), creation_date=datetime(year=2000, month=1, day=4), type="post", message_id="*****@*****.**") test_session.add(p) test_session.flush() p.set_parent(root_post_1) test_session.flush() def fin(): print "finalizer reply_post_1" test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def reply_deleted_post_4(request, participant2_user, discussion, reply_post_1, test_session): """ From participant2_user, in reply to reply_post_1 """ from assembl.models import Post, LangString, PublicationStates p = Post( discussion=discussion, creator=participant2_user, subject=LangString.create(u"re2: root post"), body=LangString.create(u"post body"), publication_state=PublicationStates.DELETED_BY_ADMIN, type="post", message_id="*****@*****.**") test_session.add(p) test_session.flush() p.set_parent(reply_post_1) test_session.flush() def fin(): print "finalizer reply_deleted_post_4" test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def upgrade(pyramid_env): from assembl.models import Extract, TextFragmentIdentifier, Content, Post db = Extract.db() reg = re.compile(r"^//div\[@id='message-([0-9]+)'\](.*)") with transaction.manager: db.query(TextFragmentIdentifier).filter_by(extract=None).delete() for tfi in db.query(TextFragmentIdentifier).join( Extract, Content, Post).all(): xpo = tfi.xpath_start print xpo match = reg.match(xpo) if match: id, remainder = match.groups() uri = Post.uri_generic(id) xp = "//div[@id='message-%s']%s" % ( uri, remainder) print xp tfi.xpath_start = tfi.xpath_end = xp
def post_body_locale_determined_by_creator( request, test_session, discussion, admin_user, user_language_preference_fr_cookie): from assembl.models import Post, LangString p = Post( discussion=discussion, creator=admin_user, subject=LangString.create(u"testa"), body=LangString.create(u"testa"), message_id="*****@*****.**") test_session.add(p) test_session.flush() def fin(): print("finalizer post_subject_locale_determined_by_creator") test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def discussion2_root_post_1(request, participant1_user, discussion2, test_session): """ From participant1_user """ from assembl.models import Post p = Post( discussion=discussion2, creator=participant1_user, subject=u"a root post", body=u"post body", creation_date=datetime(year=2000, month=1, day=2), type="post", message_id="msg1") test_session.add(p) test_session.flush() def fin(): print "finalizer discussion2_root_post_1" test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def mark_post_read(request): """Mark this post as un/read. Return the read post count for all affected ideas.""" discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get_instance(discussion_id) post_id = request.matchdict['id'] post = Post.get_instance(post_id) if not post: raise HTTPNotFound("Post with id '%s' not found." % post_id) post_id = post.id user_id = authenticated_userid(request) if not user_id: raise HTTPUnauthorized() read_data = json.loads(request.body) db = discussion.db change = False with transaction.manager: if read_data.get('read', None) is False: view = db.query(ViewPost).filter_by(post_id=post_id, actor_id=user_id, tombstone_date=None).first() if view: change = True view.is_tombstone = True else: count = db.query(ViewPost).filter_by(post_id=post_id, actor_id=user_id, tombstone_date=None).count() if not count: change = True db.add(ViewPost(post=post, actor_id=user_id)) new_counts = [] if change: new_counts = Idea.idea_read_counts(discussion_id, post_id, user_id) return { "ok": True, "ideas": [{ "@id": Idea.uri_generic(idea_id), "num_read_posts": read_posts } for (idea_id, read_posts) in new_counts] }
def mark_post_read(request): discussion_id = int(request.matchdict["discussion_id"]) discussion = Discussion.get_instance(discussion_id) post_id = request.matchdict["id"] post = Post.get_instance(post_id) if not post: raise HTTPNotFound("Post with id '%s' not found." % post_id) post_id = post.id user_id = authenticated_userid(request) if not user_id: raise HTTPUnauthorized() read_data = json.loads(request.body) db = Discussion.db() change = False with transaction.manager: if read_data.get("read", None) is False: view = db.query(ViewPost).filter(ViewPost.post_id == post_id).filter(Action.actor_id == user_id).first() if view: change = True db.delete(view) else: count = db.query(ViewPost).filter(ViewPost.post_id == post_id).filter(Action.actor_id == user_id).count() if not count: change = True db.add(ViewPost(post=post, actor_id=user_id)) new_counts = [] if change: new_counts = Idea.idea_counts(discussion_id, post_id, user_id) return { "ok": True, "ideas": [ { "@id": Idea.uri_generic(idea_id), "@type": db.query(Idea).get(idea_id).external_typename(), "num_posts": total_posts, "num_read_posts": read_posts, } for (idea_id, total_posts, read_posts) in new_counts ], }
def discussion2_root_post_1(request, participant1_user, discussion2, test_session): """ From participant1_user """ from assembl.models import Post, LangString p = Post( discussion=discussion2, creator=participant1_user, subject=LangString.create(u"a root post"), body=LangString.create(u"post body"), creation_date=datetime(year=2000, month=1, day=2), parent=None, type="post", message_id="*****@*****.**") test_session.add(p) test_session.flush() def fin(): print("finalizer discussion2_root_post_1") test_session.delete(p) test_session.flush() request.addfinalizer(fin) return p
def mark_post_read(request): """Mark this post as un/read. Return the read post count for all affected ideas.""" discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get_instance(discussion_id) post_id = request.matchdict['id'] post = Post.get_instance(post_id) if not post: raise HTTPNotFound("Post with id '%s' not found." % post_id) post_id = post.id user_id = request.authenticated_userid if not user_id: raise HTTPUnauthorized() read_data = json.loads(request.body) db = discussion.db change = False with transaction.manager: if read_data.get('read', None) is False: view = db.query(ViewPost).filter_by( post_id=post_id, actor_id=user_id, tombstone_date=None).first() if view: change = True view.is_tombstone = True else: count = db.query(ViewPost).filter_by( post_id=post_id, actor_id=user_id, tombstone_date=None).count() if not count: change = True db.add(ViewPost(post=post, actor_id=user_id)) new_counts = [] if change: new_counts = Idea.idea_read_counts(discussion_id, post_id, user_id) return { "ok": True, "ideas": [ {"@id": Idea.uri_generic(idea_id), "num_read_posts": read_posts } for (idea_id, read_posts) in new_counts] }
def create_post(request): """ We use post, not put, because we don't know the id of the post """ request_body = json.loads(request.body) user_id = authenticated_userid(request) user = Post.db.query(User).filter_by(id=user_id).one() message = request_body.get('message', None) html = request_body.get('html', None) reply_id = request_body.get('reply_id', None) subject = request_body.get('subject', None) if not user_id: raise HTTPUnauthorized() if not message: raise HTTPUnauthorized() if reply_id: post = Post.get_instance(reply_id) post.content.reply(user, message) return {"ok": True} discussion_id = request.matchdict['discussion_id'] discussion = Discussion.get(id=int(discussion_id)) subject = subject or discussion.topic if not discussion: raise HTTPNotFound( _("No discussion found with id=%s" % discussion_id) ) for source in discussion.sources: source.send(user, message, subject=subject, html_body=html) return {"ok": True}
def creativity_session_widget_post( request, test_session, discussion, participant1_user, creativity_session_widget, creativity_session_widget_new_idea): from assembl.models import (Post, IdeaContentWidgetLink) p = Post( discussion=discussion, creator=participant1_user, subject=u"re: generated idea", body=u"post body", type="post", message_id="comment_generated") test_session.add(p) test_session.flush() icwl = IdeaContentWidgetLink( content=p, idea=creativity_session_widget_new_idea, creator=participant1_user) test_session.add(icwl) def fin(): print "finalizer creativity_session_widget_post" test_session.delete(icwl) test_session.delete(p) test_session.flush() request.addfinalizer(fin) return i
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!" """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get( post_author_id ), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get( post_replies_to ), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') message_classifiers = request.GET.getall('classifier') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter(PostClass.discussion_id == discussion_id, ) ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. # v0 # deleted = request.GET.get('deleted', None) # end v0 # v1: we would like something like that # deleted = request.GET.get('deleted', None) # if deleted is None: # if view_def == 'id_only': # deleted = None # else: # deleted = False # end v1 # v2 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # # if deleted == 'false': # deleted = False # posts = posts.filter(PostClass.tombstone_condition()) # elif deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # elif deleted == 'any': # deleted = None # # result will contain deleted and non-deleted posts # pass # end v2 # v3 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # if deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # end v3 # v4 deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest( localizer.translate( _("Getting orphan posts of a specific idea isn't supported." ))) orphans = Idea._get_orphan_posts_statement( discussion_id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c(discussion_id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter(PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id)) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if message_classifiers: if any([len(classifier) == 0 for classifier in message_classifiers]): return {'total': 0, 'posts': []} polarities = [ classifier[0] != "!" for classifier in message_classifiers ] polarity = all(polarities) if not polarity: message_classifiers = [c.strip("!") for c in message_classifiers] if polarity != any(polarities): raise HTTPBadRequest( _("Do not combine negative and positive classifiers")) # Treat null as no classifier includes_null = 'null' in message_classifiers if includes_null: message_classifiers_nonull = filter(lambda c: c != "null", message_classifiers) if polarity: if len(message_classifiers) == 1: term = PostClass.message_classifier == ( None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.in_( message_classifiers_nonull) if includes_null: term = term | (PostClass.message_classifier == None) else: if len(message_classifiers) == 1: term = PostClass.message_classifier != ( None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.notin_( message_classifiers_nonull) if not includes_null: term = term | (PostClass.message_classifier == None) posts = posts.filter(term) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = { v.post_id for v in discussion.db.query(ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id)) } my_sentiments = { l.post_id: l for l in discussion.db.query(SentimentOfPost).filter( SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id == user_id, *SentimentOfPost.get_discussion_conditions(discussion_id)) } if is_unread != None: posts = posts.outerjoin( ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread my_sentiments = {} if is_unread == "false": raise HTTPBadRequest( localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter( Post.body_text_index.contains(text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) # Note: we could count the like the same way and kill the subquery. # But it interferes with the popularity order, # and the benefit is not that high. sentiment_counts = discussion.db.query( PostClass.id, SentimentOfPost.type, count(SentimentOfPost.id)).join(SentimentOfPost).filter( PostClass.id.in_(posts.with_entities(PostClass.id).subquery()), SentimentOfPost.tombstone_condition()).group_by( PostClass.id, SentimentOfPost.type) sentiment_counts_by_post_id = defaultdict(dict) for (post_id, sentiment_type, sentiment_count) in sentiment_counts: sentiment_counts_by_post_id[post_id][sentiment_type[ SentimentOfPost.TYPE_PREFIX_LEN:]] = sentiment_count posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.disagree_count - Content.like_count, Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query(PostClass).filter( PostClass.id.in_(ancestor_ids)) if view_def == 'id_only': pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) for query_result in posts: score, viewpost = None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts if view_def != "id_only": translate_content(post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json(view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False my_sentiment = my_sentiments.get(post.id, None) if my_sentiment is not None: my_sentiment = my_sentiment.generic_json('default', user_id, permissions) serializable_post['my_sentiment'] = my_sentiment if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) serializable_post[ 'sentiment_counts'] = sentiment_counts_by_post_id[post.id] post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def create_post(request): """ Create a new post in this discussion. We use post, not put, because we don't know the id of the post """ localizer = request.localizer request_body = json.loads(request.body) user_id = authenticated_userid(request) if not user_id: raise HTTPUnauthorized() user = Post.default_db.query(User).filter_by(id=user_id).one() body = request_body.get('body', None) html = request_body.get('html', None) # BG: Is this used now? I cannot see it. reply_id = request_body.get('reply_id', None) idea_id = request_body.get('idea_id', None) subject = request_body.get('subject', None) publishes_synthesis_id = request_body.get('publishes_synthesis_id', None) message_classifier = request_body.get('message_classifier', None) if not body and not publishes_synthesis_id: # Should we allow empty messages otherwise? raise HTTPBadRequest(localizer.translate(_("Your message is empty"))) if reply_id: in_reply_to_post = Post.get_instance(reply_id) else: in_reply_to_post = None if idea_id: in_reply_to_idea = Idea.get_instance(idea_id) else: in_reply_to_idea = None discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get_instance(discussion_id) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s")) % (discussion_id, )) ctx = DummyContext({Discussion: discussion}) if html: log.warning("Still using html") # how to guess locale in this case? body = LangString.create(html) elif body: body = LangString.create_from_json(body, context=ctx, user_id=user_id) else: body = LangString.EMPTY(discussion.db) if subject: subject = LangString.create_from_json(subject, context=ctx, user_id=user_id) else: # print(in_reply_to_post.subject, discussion.topic) if in_reply_to_post: subject = (in_reply_to_post.get_title().first_original().value or '' if in_reply_to_post.get_title() else '') elif in_reply_to_idea: # TODO: THis should use a cascade like the frontend subject = (in_reply_to_idea.short_title if in_reply_to_idea.short_title else '') else: subject = discussion.topic if discussion.topic else '' # print subject if subject is not None and len(subject): new_subject = "Re: " + restrip_pat.sub('', subject).strip() if (in_reply_to_post and new_subject == subject and in_reply_to_post.get_title()): # reuse subject and translations subject = in_reply_to_post.get_title().clone(discussion.db) else: # how to guess locale in this case? subject = LangString.create(new_subject) else: capture_message( "A message is about to be written to the database with an " "empty subject. This is not supposed to happen.") subject = LangString.EMPTY(discussion.db) post_constructor_args = { 'discussion': discussion, 'creator_id': user_id, 'message_classifier': message_classifier, 'subject': subject, 'body': body } if publishes_synthesis_id: published_synthesis = Synthesis.get_instance(publishes_synthesis_id) post_constructor_args['publishes_synthesis'] = published_synthesis new_post = SynthesisPost(**post_constructor_args) new_post.finalize_publish() else: new_post = AssemblPost(**post_constructor_args) discussion.db.add(new_post) discussion.db.flush() if in_reply_to_post: new_post.set_parent(in_reply_to_post) if in_reply_to_idea: idea_post_link = IdeaRelatedPostLink(creator_id=user_id, content=new_post, idea=in_reply_to_idea) discussion.db.add(idea_post_link) idea = in_reply_to_idea while idea: idea.send_to_changes() parents = idea.get_parents() idea = next(iter(parents)) if parents else None else: discussion.root_idea.send_to_changes() for source in discussion.sources: if 'send_post' in dir(source): source.send_post(new_post) permissions = get_permissions(user_id, discussion_id) return new_post.generic_json('default', user_id, permissions)
def test_creativity_session_widget( discussion, test_app, subidea_1, subidea_1_1, participant1_user, test_session, request): # Post the initial configuration format = lambda x: x.strftime('%Y-%m-%dT%H:%M:%S') new_widget_loc = test_app.post_json( '/data/Discussion/%d/widgets' % (discussion.id,), { '@type': 'CreativitySessionWidget', 'settings': { 'idea': 'local:Idea/%d' % (subidea_1.id), 'notifications': [ { 'start': '2014-01-01T00:00:00', 'end': format(datetime.utcnow() + timedelta(1)), 'message': 'creativity_session' }, { 'start': format(datetime.utcnow() + timedelta(1)), 'end': format(datetime.utcnow() + timedelta(2)), 'message': 'creativity_session' } ] } }) assert new_widget_loc.status_code == 201 # Get the widget from the db discussion.db.flush() new_widget = Widget.get_instance(new_widget_loc.location) assert new_widget assert new_widget.base_idea == subidea_1 assert not new_widget.generated_ideas widget_id = new_widget.id # There should be a link widget_uri = new_widget.uri() widget_link = discussion.db.query(BaseIdeaWidgetLink).filter_by( idea_id=subidea_1.id, widget_id=widget_id).all() assert widget_link assert len(widget_link) == 1 # Get the widget from the api widget_rep = test_app.get( local_to_absolute(widget_uri), headers={"Accept": "application/json"} ) assert widget_rep.status_code == 200 widget_rep = widget_rep.json print widget_rep assert 'messages_url' in widget_rep assert 'ideas_url' in widget_rep assert 'user' in widget_rep # Get the list of new ideas # should be empty, despite the idea having a non-widget child idea_endpoint = local_to_absolute(widget_rep['ideas_url']) idea_hiding_endpoint = local_to_absolute(widget_rep['ideas_hiding_url']) test = test_app.get(idea_endpoint) assert test.status_code == 200 assert test.json == [] discussion.db.flush() assert new_widget.base_idea == subidea_1 ctx_url = "http://example.com/cardgame.xml#card_1" # Create a new sub-idea new_idea_create = test_app.post_json(idea_hiding_endpoint, { "@type": "Idea", "short_title": "This is a brand new idea", "context_url": ctx_url }) assert new_idea_create.status_code == 201 # Get the sub-idea from the db discussion.db.flush() assert new_widget.base_idea == subidea_1 new_idea1_id = new_idea_create.location new_idea1 = Idea.get_instance(new_idea1_id) assert new_idea1.proposed_in_post assert new_idea1 in new_widget.generated_ideas assert new_idea1.hidden assert new_idea1.proposed_in_post.hidden assert not subidea_1.hidden # Get the sub-idea from the api new_idea1_rep = test_app.get( local_to_absolute(new_idea_create.location), headers={"Accept": "application/json"} ) assert new_idea1_rep.status_code == 200 new_idea1_rep = new_idea1_rep.json # It should have a link to the root idea idea_link = discussion.db.query(IdeaLink).filter_by( source_id=subidea_1.id, target_id=new_idea1.id).one() assert idea_link # It should have a link to the widget widget_link = discussion.db.query(GeneratedIdeaWidgetLink).filter_by( idea_id=new_idea1.id, widget_id=widget_id).all() assert widget_link assert len(widget_link) == 1 # It should be linked to its creating post. content_link = discussion.db.query(IdeaContentWidgetLink).filter_by( idea_id=new_idea1.id, content_id=new_idea1.proposed_in_post.id).first() assert content_link # The new idea should now be in the collection api test = test_app.get(idea_endpoint) assert test.status_code == 200 test = test.json assert new_idea1_id in test or new_idea1_id in [ x['@id'] for x in test] # We should find the context in the new idea assert ctx_url in test[0].get('creation_ctx_url', []) # TODO: The root idea is included in the above, that's a bug. # get the new post endpoint from the idea data post_endpoint = new_idea1_rep.get('widget_add_post_endpoint', None) assert (post_endpoint and widget_rep["@id"] and post_endpoint[widget_rep["@id"]]) post_endpoint = post_endpoint[widget_rep["@id"]] # Create a new post attached to the sub-idea new_post_create = test_app.post_json(local_to_absolute(post_endpoint), { "@type": "AssemblPost", "body": {"@type": "LangString", "entries": [{ "@type": "LangStringEntry", "value": "body", "@language": "en" }]}, "idCreator": participant1_user.uri()}) assert new_post_create.status_code == 201 # Get the new post from the db discussion.db.flush() new_post1_id = new_post_create.location post = Post.get_instance(new_post1_id) assert post.hidden # It should have a widget link to the idea. post_widget_link = discussion.db.query(IdeaContentWidgetLink).filter_by( content_id=post.id, idea_id=new_idea1.id).one() # It should be linked to the idea. content_link = discussion.db.query(IdeaContentWidgetLink).filter_by( idea_id=new_idea1.id, content_id=post.id).first() assert content_link # TODO: get the semantic data in tests. # assert subidea_1.id in Idea.get_idea_ids_showing_post(new_post1_id) # It should be a child of the proposing post assert post.parent == new_idea1.proposed_in_post # The new post should now be in the collection api test = test_app.get(local_to_absolute(post_endpoint)) assert test.status_code == 200 assert new_post1_id in test.json or new_post1_id in [ x['@id'] for x in test.json] # Get the new post from the api new_post1_rep = test_app.get( local_to_absolute(new_post_create.location), headers={"Accept": "application/json"} ) assert new_post1_rep.status_code == 200 # It should mention its idea print new_post1_rep.json assert new_idea1_id in new_post1_rep.json['widget_ideas'] new_post1 = Post.get_instance(new_post1_id) assert new_post1.hidden new_idea1 = Idea.get_instance(new_idea1_id) assert new_idea1.hidden # Create a second idea new_idea_create = test_app.post_json(idea_hiding_endpoint, { "@type": "Idea", "short_title": "This is another new idea"}) assert new_idea_create.status_code == 201 # Get the sub-idea from the db discussion.db.flush() new_idea2_id = new_idea_create.location # Approve the first but not the second idea confirm_idea_url = local_to_absolute(widget_rep['confirm_ideas_url']) confirm = test_app.post_json(confirm_idea_url, { "ids": [new_idea1_id]}) assert confirm.status_code == 200 discussion.db.flush() # Get it back get_back = test_app.get(confirm_idea_url) assert get_back.status_code == 200 # The first idea should now be unhidden, but not the second assert get_back.json == [new_idea1_id] new_idea1 = Idea.get_instance(new_idea1_id) assert not new_idea1.hidden new_idea2 = Idea.get_instance(new_idea2_id) assert new_idea2.hidden assert new_idea2.proposed_in_post # The second idea was not proposed in public assert new_idea2.proposed_in_post.hidden # The root ideas should not be hidden. subidea_1 = Idea.get_instance(subidea_1.id) assert not subidea_1.hidden # Create a second post. new_post_create = test_app.post_json(local_to_absolute(post_endpoint), { "@type": "AssemblPost", "body": {"@type": "LangString", "entries": [{ "@type": "LangStringEntry", "value": "body", "@language": "en" }]}, "idCreator": participant1_user.uri()}) assert new_post_create.status_code == 201 discussion.db.flush() new_post2_id = new_post_create.location # Approve the first but not the second idea confirm_messages_url = local_to_absolute( widget_rep['confirm_messages_url']) confirm = test_app.post_json(confirm_messages_url, { "ids": [new_post1_id]}) assert confirm.status_code == 200 discussion.db.flush() # Get it back get_back = test_app.get(confirm_messages_url) assert get_back.status_code == 200 assert get_back.json == [new_post1_id] # The first idea should now be unhidden, but not the second new_post1 = Post.get_instance(new_post1_id) assert not new_post1.hidden new_post2 = Post.get_instance(new_post2_id) def clear_data(): print "finalizing test data" test_session.delete(new_post1) test_session.delete(new_post2) test_session.delete(new_idea1.proposed_in_post) test_session.delete(new_idea2.proposed_in_post) test_session.flush() request.addfinalizer(clear_data) assert new_post2.hidden # Get the notifications notifications = test_app.get( '/data/Discussion/%d/notifications' % discussion.id) assert notifications.status_code == 200 notifications = notifications.json # Only one active session assert len(notifications) == 1 notification = notifications[0] print notification assert notification['widget_url'] assert notification['time_to_end'] > 23 * 60 * 60 assert notification['num_participants'] == 2 # participant and admin assert notification['num_ideas'] == 2
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!" """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = request.authenticated_userid or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') message_classifiers = request.GET.getall('classifier') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ).filter(PostClass.type != 'proposition_post') ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. # v0 # deleted = request.GET.get('deleted', None) # end v0 # v1: we would like something like that # deleted = request.GET.get('deleted', None) # if deleted is None: # if view_def == 'id_only': # deleted = None # else: # deleted = False # end v1 # v2 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # # if deleted == 'false': # deleted = False # posts = posts.filter(PostClass.tombstone_condition()) # elif deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # elif deleted == 'any': # deleted = None # # result will contain deleted and non-deleted posts # pass # end v2 # v3 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # if deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # end v3 # v4 deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = Idea._get_orphan_posts_statement( discussion_id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c( discussion_id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter( PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if message_classifiers: if any([len(classifier) == 0 for classifier in message_classifiers]): return {'total': 0, 'posts': []} polarities = [classifier[0] != "!" for classifier in message_classifiers] polarity = all(polarities) if not polarity: message_classifiers = [c.strip("!") for c in message_classifiers] if polarity != any(polarities): raise HTTPBadRequest(_("Do not combine negative and positive classifiers")) # Treat null as no classifier includes_null = 'null' in message_classifiers if includes_null: message_classifiers_nonull = filter(lambda c: c != "null", message_classifiers) if polarity: if len(message_classifiers) == 1: term = PostClass.message_classifier == (None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.in_(message_classifiers_nonull) if includes_null: term = term | (PostClass.message_classifier == None) else: if len(message_classifiers) == 1: term = PostClass.message_classifier != (None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.notin_(message_classifiers_nonull) if not includes_null: term = term | (PostClass.message_classifier == None) posts = posts.filter(term) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = {v.post_id for v in discussion.db.query( ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id))} my_sentiments = {l.post_id: l for l in discussion.db.query( SentimentOfPost).filter( SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id == user_id, *SentimentOfPost.get_discussion_conditions(discussion_id))} if is_unread != None: posts = posts.outerjoin( ViewPost, and_( ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service.canTranslate is not None: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread my_sentiments = {} if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter(Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) # Note: we could count the like the same way and kill the subquery. # But it interferes with the popularity order, # and the benefit is not that high. sentiment_counts = discussion.db.query( PostClass.id, SentimentOfPost.type, count(SentimentOfPost.id) ).join(SentimentOfPost ).filter(PostClass.id.in_(posts.with_entities(PostClass.id).subquery()), SentimentOfPost.tombstone_condition() ).group_by(PostClass.id, SentimentOfPost.type) sentiment_counts_by_post_id = defaultdict(dict) for (post_id, sentiment_type, sentiment_count) in sentiment_counts: sentiment_counts_by_post_id[post_id][ sentiment_type[SentimentOfPost.TYPE_PREFIX_LEN:] ] = sentiment_count posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.disagree_count - Content.like_count, Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query( PostClass).filter(PostClass.id.in_(ancestor_ids)) if view_def == 'id_only': pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) for query_result in posts: score, viewpost = None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts if view_def != "id_only": translate_content( post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False my_sentiment = my_sentiments.get(post.id, None) if my_sentiment is not None: my_sentiment = my_sentiment.generic_json('default', user_id, permissions) serializable_post['my_sentiment'] = my_sentiment if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) serializable_post['sentiment_counts'] = sentiment_counts_by_post_id[post.id] post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def test_inspiration_widget( discussion, test_app, subidea_1, subidea_1_1, participant1_user, test_session): # Post the initial configuration format = lambda x: x.strftime('%Y-%m-%dT%H:%M:%S') new_widget_loc = test_app.post( '/data/Discussion/%d/widgets' % (discussion.id,), { 'type': 'InspirationWidget', 'settings': json.dumps({ 'idea': 'local:Idea/%d' % (subidea_1.id) }) }) assert new_widget_loc.status_code == 201 # Get the widget from the db discussion.db.flush() widget_uri = new_widget_loc.location new_widget = Widget.get_instance(widget_uri) assert new_widget assert new_widget.base_idea == subidea_1 widget_id = new_widget.id # There should be a link widget_link = discussion.db.query(BaseIdeaWidgetLink).filter_by( idea_id=subidea_1.id, widget_id=widget_id).all() assert widget_link assert len(widget_link) == 1 # Get the widget from the api widget_rep = test_app.get( local_to_absolute(widget_uri), headers={"Accept": "application/json"} ) assert widget_rep.status_code == 200 widget_rep = widget_rep.json print widget_rep assert 'messages_url' in widget_rep assert 'ideas_url' in widget_rep assert 'user' in widget_rep # Get the list of new ideas # should be empty, despite the idea having a non-widget child idea_endpoint = local_to_absolute(widget_rep['ideas_url']) idea_hiding_endpoint = local_to_absolute(widget_rep['ideas_hiding_url']) test = test_app.get(idea_endpoint) assert test.status_code == 200 assert test.json == [] discussion.db.flush() assert new_widget.base_idea == subidea_1 return # WEIRD virtuoso crash in the tests here, # dependent on previous tests being run. ancestor_widgets = test_app.get( '/data/Discussion/%d/ideas/%d/ancestor_inspiration_widgets/' % ( discussion.id, subidea_1_1.id)) assert ancestor_widgets.status_code == 200 ancestor_widgets_rep = ancestor_widgets.json assert new_widget_loc.location in ancestor_widgets_rep # TODO. ajouter la collection descendant_ideas. # Comment déduire cet URL du widget???? r = test_app.post( '/data/Discussion/%d/widgets/%d/base_idea_descendants/%d/linkedposts' % (discussion.id, widget_id, subidea_1_1.id), { "type": "WidgetPost", "body": {"@type": "LangString", "entries": [{ "@type": "LangStringEntry", "value": "body", "@language": "en" }]}, "creator_id": participant1_user.id, "metadata_json": { "inspiration_url": "https://www.youtube.com/watch?v=7E2FUSYO374"}}) assert r.ok post_location = r.location post = Post.get_instance(post_location) assert post assert post.widget assert post.metadata_json['inspiration_url']
def post_extract(request): """ Create a new extract. """ extract_data = json.loads(request.body) discussion = request.context db = discussion.db user_id = authenticated_userid(request) if not user_id: # Straight from annotator token = request.headers.get('X-Annotator-Auth-Token') if token: token = decode_token(token, request.registry.settings['session.secret']) if token: user_id = token['userId'] user_id = user_id or Everyone permissions = get_permissions(user_id, discussion_id) else: permissions = request.permissions if P_ADD_EXTRACT not in permissions: #TODO: maparent: restore this code once it works: #raise HTTPForbidden(result=ACLDenied(permission=P_ADD_EXTRACT)) raise HTTPForbidden() if not user_id or user_id == Everyone: # TODO: Create an anonymous user. raise HTTPServerError("Anonymous extracts are not implemeted yet.") content = None uri = extract_data.get('uri') important = extract_data.get('important', False) annotation_text = extract_data.get('text') target = extract_data.get('target') if not uri: # Extract from an internal post if not target: raise HTTPBadRequest("No target") target_class = sqla.get_named_class(target.get('@type')) if issubclass(target_class, Post): post_id = target.get('@id') post = Post.get_instance(post_id) if not post: raise HTTPNotFound("Post with id '%s' not found." % post_id) content = post elif issubclass(target_class, Webpage): uri = target.get('url') if uri and not content: content = Webpage.get_instance(uri) if not content: # TODO: maparent: This is actually a singleton pattern, should be # handled by the AnnotatorSource now that it exists... source = db.query(AnnotatorSource).filter_by( discussion=discussion).filter( cast(AnnotatorSource.name, Unicode) == 'Annotator').first() if not source: source = AnnotatorSource(name='Annotator', discussion=discussion) db.add(source) content = Webpage(url=uri, discussion=discussion) db.add(content) extract_body = extract_data.get('quote', None) idea_id = extract_data.get('idIdea', None) if idea_id: idea = Idea.get_instance(idea_id) if (idea.discussion.id != discussion.id): raise HTTPBadRequest( "Extract from discussion %s cannot be associated with an idea from a different discussion." % extract.get_discussion_id()) if not idea.has_permission_req(P_ASSOCIATE_EXTRACT): raise HTTPForbidden("Cannot associate extact with this idea") else: idea = None new_extract = Extract(creator_id=user_id, owner_id=user_id, discussion=discussion, idea=idea, important=important, annotation_text=annotation_text, content=content) db.add(new_extract) for range_data in extract_data.get('ranges', []): range = TextFragmentIdentifier(extract=new_extract, body=extract_body, xpath_start=range_data['start'], offset_start=range_data['startOffset'], xpath_end=range_data['end'], offset_end=range_data['endOffset']) db.add(range) db.flush() return {'ok': True, '@id': new_extract.uri()}
def create_post(request): """ We use post, not put, because we don't know the id of the post """ localizer = request.localizer request_body = json.loads(request.body) user_id = authenticated_userid(request) if not user_id: raise HTTPUnauthorized() user = Post.default_db.query(User).filter_by(id=user_id).one() body = request_body.get('body', None) html = request_body.get('html', None) # BG: Is this used now? I cannot see it. reply_id = request_body.get('reply_id', None) idea_id = request_body.get('idea_id', None) subject = request_body.get('subject', None) publishes_synthesis_id = request_body.get('publishes_synthesis_id', None) if not body and not publishes_synthesis_id: # Should we allow empty messages otherwise? raise HTTPBadRequest(localizer.translate( _("Your message is empty"))) if reply_id: in_reply_to_post = Post.get_instance(reply_id) else: in_reply_to_post = None if idea_id: in_reply_to_idea = Idea.get_instance(idea_id) else: in_reply_to_idea = None discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get_instance(discussion_id) if not discussion: raise HTTPNotFound(localizer.translate(_( "No discussion found with id=%s")) % (discussion_id,) ) ctx = DummyContext({Discussion: discussion}) if html: log.warning("Still using html") # how to guess locale in this case? body = LangString.create(html) elif body: body = LangString.create_from_json( body, context=ctx, user_id=user_id) else: body = LangString.EMPTY(discussion.db) if subject: subject = LangString.create_from_json( subject, context=ctx, user_id=user_id) else: # print(in_reply_to_post.subject, discussion.topic) if in_reply_to_post: subject = (in_reply_to_post.get_title().first_original().value if in_reply_to_post.get_title() else '') elif in_reply_to_idea: # TODO: THis should use a cascade like the frontend subject = (in_reply_to_idea.short_title if in_reply_to_idea.short_title else '') else: subject = discussion.topic if discussion.topic else '' # print subject if subject is not None and len(subject): new_subject = "Re: " + restrip_pat.sub('', subject).strip() if (in_reply_to_post and new_subject == subject and in_reply_to_post.get_title()): # reuse subject and translations subject = in_reply_to_post.get_title() else: # how to guess locale in this case? subject = LangString.create(new_subject) else: raven_client = get_raven_client() if raven_client: raven_client.captureMessage( "A message is about to be written to the database with an " "empty subject. This is not supposed to happen.") subject = LangString.EMPTY(discussion.db) post_constructor_args = { 'discussion': discussion, 'creator_id': user_id, 'subject': subject, 'body': body } if publishes_synthesis_id: published_synthesis = Synthesis.get_instance(publishes_synthesis_id) post_constructor_args['publishes_synthesis'] = published_synthesis new_post = SynthesisPost(**post_constructor_args) else: new_post = AssemblPost(**post_constructor_args) discussion.db.add(new_post) discussion.db.flush() if in_reply_to_post: new_post.set_parent(in_reply_to_post) if in_reply_to_idea: idea_post_link = IdeaRelatedPostLink( creator_id=user_id, content=new_post, idea=in_reply_to_idea ) discussion.db.add(idea_post_link) idea = in_reply_to_idea while idea: idea.send_to_changes() parents = idea.get_parents() idea = next(iter(parents)) if parents else None else: discussion.root_idea.send_to_changes() for source in discussion.sources: if 'send_post' in dir(source): source.send_post(new_post) permissions = get_permissions(user_id, discussion_id) return new_post.generic_json('default', user_id, permissions)
def post_extract(request): """ Create a new extract. """ extract_data = json.loads(request.body) discussion_id = int(request.matchdict['discussion_id']) user_id = request.authenticated_userid if not user_id: # Straight from annotator token = request.headers.get('X-Annotator-Auth-Token') if token: token = decode_token( token, request.registry.settings['session.secret']) if token: user_id = token['userId'] user_id = user_id or Everyone if not user_has_permission(discussion_id, user_id, P_ADD_EXTRACT): #TODO: maparent: restore this code once it works: #return HTTPForbidden(result=ACLDenied(permission=P_ADD_EXTRACT)) return HTTPForbidden() if not user_id or user_id == Everyone: # TODO: Create an anonymous user. raise HTTPServerError("Anonymous extracts are not implemeted yet.") content = None uri = extract_data.get('uri') important = extract_data.get('important', False) annotation_text = None if uri: # Straight from annotator annotation_text = extract_data.get('text') else: target = extract_data.get('target') if not (target or uri): raise HTTPBadRequest("No target") target_class = sqla.get_named_class(target.get('@type')) if issubclass(target_class, Post): post_id = target.get('@id') post = Post.get_instance(post_id) if not post: raise HTTPNotFound( "Post with id '%s' not found." % post_id) content = post elif issubclass(target_class, Webpage): uri = target.get('url') if uri and not content: content = Webpage.get_instance(uri) if not content: # TODO: maparent: This is actually a singleton pattern, should be # handled by the AnnotatorSource now that it exists... source = AnnotatorSource.default_db.query(AnnotatorSource).filter_by( discussion_id=discussion_id).filter( cast(AnnotatorSource.name, Unicode) == 'Annotator').first() if not source: source = AnnotatorSource( name='Annotator', discussion_id=discussion_id) content = Webpage(url=uri, discussion_id=discussion_id) extract_body = extract_data.get('quote', '') idea_id = extract_data.get('idIdea', None) if idea_id: idea = Idea.get_instance(idea_id) if(idea.discussion.id != discussion_id): raise HTTPBadRequest( "Extract from discussion %s cannot be associated with an idea from a different discussion." % extract.get_discussion_id()) else: idea = None ranges = extract_data.get('ranges', []) extract_hash = Extract.get_extract_hash( None, u"".join([r['start'] for r in ranges]), u"".join([r['end'] for r in ranges]), u"".join([r['startOffset'] for r in ranges]), u"".join([r['endOffset'] for r in ranges]), content.id ) new_extract = Extract( creator_id=user_id, owner_id=user_id, discussion_id=discussion_id, body=extract_body, idea=idea, important=important, annotation_text=annotation_text, content=content, extract_hash=extract_hash ) Extract.default_db.add(new_extract) for range_data in ranges: range = TextFragmentIdentifier( extract=new_extract, xpath_start=range_data['start'], offset_start=range_data['startOffset'], xpath_end=range_data['end'], offset_end=range_data['endOffset']) TextFragmentIdentifier.default_db.add(range) Extract.default_db.flush() return {'ok': True, '@id': new_extract.uri()}
def create_post(request): """ We use post, not put, because we don't know the id of the post """ localizer = request.localizer request_body = json.loads(request.body) user_id = authenticated_userid(request) if not user_id: raise HTTPUnauthorized() user = Post.default_db.query(User).filter_by(id=user_id).one() message = request_body.get('message', None) html = request_body.get('html', None) reply_id = request_body.get('reply_id', None) idea_id = request_body.get('idea_id', None) subject = request_body.get('subject', None) publishes_synthesis_id = request_body.get('publishes_synthesis_id', None) if not message: raise HTTPBadRequest(localizer.translate( _("Your message is empty"))) if reply_id: in_reply_to_post = Post.get_instance(reply_id) else: in_reply_to_post = None if idea_id: in_reply_to_idea = Idea.get_instance(idea_id) else: in_reply_to_idea = None discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get_instance(discussion_id) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s" % discussion_id)) ) if subject: subject = subject else: #print(in_reply_to_post.subject, discussion.topic) if in_reply_to_post: subject = in_reply_to_post.get_title() if in_reply_to_post.get_title() else '' elif in_reply_to_idea: #TODO: THis should use a cascade like the frontend subject = in_reply_to_idea.short_title if in_reply_to_idea.short_title else '' else: subject = discussion.topic if discussion.topic else '' #print subject subject = "Re: " + restrip_pat.sub('', subject) post_constructor_args = { 'discussion': discussion, 'creator_id': user_id, 'subject': subject, 'body': html if html else message } if publishes_synthesis_id: published_synthesis = Synthesis.get_instance(publishes_synthesis_id) post_constructor_args['publishes_synthesis'] = published_synthesis new_post = SynthesisPost(**post_constructor_args) else: new_post = AssemblPost(**post_constructor_args) discussion.db.add(new_post) discussion.db.flush() if in_reply_to_post: new_post.set_parent(in_reply_to_post) if in_reply_to_idea: idea_post_link = IdeaRelatedPostLink( creator_id=user_id, content=new_post, idea=in_reply_to_idea ) discussion.db.add(idea_post_link) idea = in_reply_to_idea while idea: idea.send_to_changes() parents = idea.get_parents() idea = next(iter(parents)) if parents else None else: discussion.root_idea.send_to_changes() for source in discussion.sources: if 'send_post' in dir(source): source.send_post(new_post) permissions = get_permissions(user_id, discussion_id) return new_post.generic_json('default', user_id, permissions)
def test_creativity_session_widget(discussion, test_app, subidea_1, subidea_1_1, participant1_user, test_session, request): # Post the initial configuration format = lambda x: x.strftime('%Y-%m-%dT%H:%M:%S') new_widget_loc = test_app.post_json( '/data/Discussion/%d/widgets' % (discussion.id, ), { '@type': 'CreativitySessionWidget', 'settings': { 'idea': 'local:Idea/%d' % (subidea_1.id), 'notifications': [{ 'start': '2014-01-01T00:00:00', 'end': format(datetime.utcnow() + timedelta(1)), 'message': 'creativity_session' }, { 'start': format(datetime.utcnow() + timedelta(1)), 'end': format(datetime.utcnow() + timedelta(2)), 'message': 'creativity_session' }] } }) assert new_widget_loc.status_code == 201 # Get the widget from the db discussion.db.flush() new_widget = Widget.get_instance(new_widget_loc.location) assert new_widget assert new_widget.base_idea == subidea_1 assert not new_widget.generated_ideas widget_id = new_widget.id # There should be a link widget_uri = new_widget.uri() widget_link = discussion.db.query(BaseIdeaWidgetLink).filter_by( idea_id=subidea_1.id, widget_id=widget_id).all() assert widget_link assert len(widget_link) == 1 # Get the widget from the api widget_rep = test_app.get(local_to_absolute(widget_uri), headers={"Accept": "application/json"}) assert widget_rep.status_code == 200 widget_rep = widget_rep.json print widget_rep assert 'messages_url' in widget_rep assert 'ideas_url' in widget_rep assert 'user' in widget_rep # Get the list of new ideas # should be empty, despite the idea having a non-widget child idea_endpoint = local_to_absolute(widget_rep['ideas_url']) idea_hiding_endpoint = local_to_absolute(widget_rep['ideas_hiding_url']) test = test_app.get(idea_endpoint) assert test.status_code == 200 assert test.json == [] discussion.db.flush() assert new_widget.base_idea == subidea_1 ctx_url = "http://example.com/cardgame.xml#card_1" # Create a new sub-idea new_idea_create = test_app.post_json( idea_hiding_endpoint, { "@type": "Idea", "short_title": "This is a brand new idea", "context_url": ctx_url }) assert new_idea_create.status_code == 201 # Get the sub-idea from the db discussion.db.flush() assert new_widget.base_idea == subidea_1 new_idea1_id = new_idea_create.location new_idea1 = Idea.get_instance(new_idea1_id) assert new_idea1.proposed_in_post assert new_idea1 in new_widget.generated_ideas assert new_idea1.hidden assert new_idea1.proposed_in_post.hidden assert not subidea_1.hidden # Get the sub-idea from the api new_idea1_rep = test_app.get(local_to_absolute(new_idea_create.location), headers={"Accept": "application/json"}) assert new_idea1_rep.status_code == 200 new_idea1_rep = new_idea1_rep.json # It should have a link to the root idea idea_link = discussion.db.query(IdeaLink).filter_by( source_id=subidea_1.id, target_id=new_idea1.id).one() assert idea_link # It should have a link to the widget widget_link = discussion.db.query(GeneratedIdeaWidgetLink).filter_by( idea_id=new_idea1.id, widget_id=widget_id).all() assert widget_link assert len(widget_link) == 1 # It should be linked to its creating post. content_link = discussion.db.query(IdeaContentWidgetLink).filter_by( idea_id=new_idea1.id, content_id=new_idea1.proposed_in_post.id).first() assert content_link # The new idea should now be in the collection api test = test_app.get(idea_endpoint) assert test.status_code == 200 test = test.json assert new_idea1_id in test or new_idea1_id in [x['@id'] for x in test] # We should find the context in the new idea assert ctx_url in test[0].get('creation_ctx_url', []) # TODO: The root idea is included in the above, that's a bug. # get the new post endpoint from the idea data post_endpoint = new_idea1_rep.get('widget_add_post_endpoint', None) assert (post_endpoint and widget_rep["@id"] and post_endpoint[widget_rep["@id"]]) post_endpoint = post_endpoint[widget_rep["@id"]] # Create a new post attached to the sub-idea new_post_create = test_app.post_json( local_to_absolute(post_endpoint), { "@type": "AssemblPost", "body": { "@type": "LangString", "entries": [{ "@type": "LangStringEntry", "value": "body", "@language": "en" }] }, "idCreator": participant1_user.uri() }) assert new_post_create.status_code == 201 # Get the new post from the db discussion.db.flush() new_post1_id = new_post_create.location post = Post.get_instance(new_post1_id) assert post.hidden # It should have a widget link to the idea. post_widget_link = discussion.db.query(IdeaContentWidgetLink).filter_by( content_id=post.id, idea_id=new_idea1.id).one() # It should be linked to the idea. content_link = discussion.db.query(IdeaContentWidgetLink).filter_by( idea_id=new_idea1.id, content_id=post.id).first() assert content_link # TODO: get the semantic data in tests. # assert subidea_1.id in Idea.get_idea_ids_showing_post(new_post1_id) # It should be a child of the proposing post assert post.parent == new_idea1.proposed_in_post # The new post should now be in the collection api test = test_app.get(local_to_absolute(post_endpoint)) assert test.status_code == 200 assert new_post1_id in test.json or new_post1_id in [ x['@id'] for x in test.json ] # Get the new post from the api new_post1_rep = test_app.get(local_to_absolute(new_post_create.location), headers={"Accept": "application/json"}) assert new_post1_rep.status_code == 200 # It should mention its idea print new_post1_rep.json assert new_idea1_id in new_post1_rep.json['widget_ideas'] new_post1 = Post.get_instance(new_post1_id) assert new_post1.hidden new_idea1 = Idea.get_instance(new_idea1_id) assert new_idea1.hidden # Create a second idea new_idea_create = test_app.post_json( idea_hiding_endpoint, { "@type": "Idea", "short_title": "This is another new idea" }) assert new_idea_create.status_code == 201 # Get the sub-idea from the db discussion.db.flush() new_idea2_id = new_idea_create.location # Approve the first but not the second idea confirm_idea_url = local_to_absolute(widget_rep['confirm_ideas_url']) confirm = test_app.post_json(confirm_idea_url, {"ids": [new_idea1_id]}) assert confirm.status_code == 200 discussion.db.flush() # Get it back get_back = test_app.get(confirm_idea_url) assert get_back.status_code == 200 # The first idea should now be unhidden, but not the second assert get_back.json == [new_idea1_id] new_idea1 = Idea.get_instance(new_idea1_id) assert not new_idea1.hidden new_idea2 = Idea.get_instance(new_idea2_id) assert new_idea2.hidden assert new_idea2.proposed_in_post # The second idea was not proposed in public assert new_idea2.proposed_in_post.hidden # The root ideas should not be hidden. subidea_1 = Idea.get_instance(subidea_1.id) assert not subidea_1.hidden # Create a second post. new_post_create = test_app.post_json( local_to_absolute(post_endpoint), { "@type": "AssemblPost", "body": { "@type": "LangString", "entries": [{ "@type": "LangStringEntry", "value": "body", "@language": "en" }] }, "idCreator": participant1_user.uri() }) assert new_post_create.status_code == 201 discussion.db.flush() new_post2_id = new_post_create.location # Approve the first but not the second idea confirm_messages_url = local_to_absolute( widget_rep['confirm_messages_url']) confirm = test_app.post_json(confirm_messages_url, {"ids": [new_post1_id]}) assert confirm.status_code == 200 discussion.db.flush() # Get it back get_back = test_app.get(confirm_messages_url) assert get_back.status_code == 200 assert get_back.json == [new_post1_id] # The first idea should now be unhidden, but not the second new_post1 = Post.get_instance(new_post1_id) assert not new_post1.hidden new_post2 = Post.get_instance(new_post2_id) def clear_data(): print "finalizing test data" test_session.delete(new_post1) test_session.delete(new_post2) test_session.delete(new_idea1.proposed_in_post) test_session.delete(new_idea2.proposed_in_post) test_session.flush() request.addfinalizer(clear_data) assert new_post2.hidden # Get the notifications notifications = test_app.get('/data/Discussion/%d/notifications' % discussion.id) assert notifications.status_code == 200 notifications = notifications.json # Only one active session assert len(notifications) == 1 notification = notifications[0] print notification assert notification['widget_url'] assert notification['time_to_end'] > 23 * 60 * 60 assert notification['num_participants'] == 2 # participant and admin assert notification['num_ideas'] == 2
def get_posts(request): discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(id=int(discussion_id)) if not discussion: raise HTTPNotFound(_("No discussion found with id=%s" % discussion_id)) discussion.import_from_sources() user_id = authenticated_userid(request) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') #Rename "inbox" to "unread", the number of unread messages for the current user. no_of_messages_viewed_by_user = Post.db.query(ViewPost).join( Post, Content, Source ).filter( Source.discussion_id == discussion_id, Content.source_id == Source.id, ViewPost.actor_id == user_id, ).count() if user_id else 0 posts = Post.db.query(Post).join( Content, Source, ).filter( Source.discussion_id == discussion_id, Content.source_id == Source.id, ) no_of_posts_to_discussion = posts.count() post_data = [] if root_idea_id: if root_idea_id == Idea.ORPHAN_POSTS_IDEA_ID: ideas_query = Post.db.query(Post) \ .filter(Post.id.in_(text(Idea._get_orphan_posts_statement(), bindparams=[bindparam('discussion_id', discussion_id)] ))) else: ideas_query = Post.db.query(Post) \ .filter(Post.id.in_(text(Idea._get_related_posts_statement(), bindparams=[bindparam('root_idea_id', root_idea_id)] ))) posts = ideas_query.join(Content, Source, ) elif root_post_id: root_post = Post.get(id=root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (Post.id==root_post.id) ) #Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) elif ids: posts = posts.filter(Post.id.in_(ids)) if user_id: posts = posts.outerjoin(ViewPost, and_(ViewPost.actor_id==user_id, ViewPost.post_id==Post.id) ) posts = posts.add_entity(ViewPost) posts = posts.options(contains_eager(Post.content, Content.source)) posts = posts.options(joinedload_all(Post.creator, AgentProfile.user)) posts = posts.order_by(Content.creation_date) if 'synthesis' in filter_names: posts = posts.filter(Post.is_synthesis==True) if user_id: for post, viewpost in posts: if view_def: serializable_post = post.generic_json(view_def) else: serializable_post = post.serializable() if viewpost: serializable_post['read'] = True else: serializable_post['read'] = False if root_post_id: viewed_post = ViewPost( actor_id=user_id, post=post ) Post.db.add(viewed_post) post_data.append(serializable_post) else: for post in posts: if view_def: serializable_post = post.generic_json(view_def) else: serializable_post = post.serializable() post_data.append(serializable_post) data = {} data["page"] = page data["inbox"] = no_of_posts_to_discussion - no_of_messages_viewed_by_user #What is "total", the total messages in the current context? #This gave wrong count, I don't know why. benoitg #data["total"] = discussion.posts().count() data["total"] = no_of_posts_to_discussion data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = get_localizer(request) discussion_id = int(request.matchdict["discussion_id"]) discussion = Discussion.get(id=int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = ( [filter_name for filter_name in request.GET.getone("filters").split(",") if filter_name] if request.GET.get("filters") else [] ) try: page = int(request.GET.getone("page")) except (ValueError, KeyError): page = 1 order = request.GET.get("order") if order == None: order = "chronological" assert order in ("chronological", "reverse_chronological") if page < 1: page = 1 root_post_id = request.GET.getall("root_post_id") if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) root_idea_id = request.GET.getall("root_idea_id") if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall("ids") if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get("view") only_synthesis = request.GET.get("only_synthesis") if only_synthesis == "true": posts = Post.db.query(SynthesisPost) else: posts = Post.db.query(Post) posts = posts.filter(Post.discussion_id == discussion_id) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get("only_orphan") if only_orphan == "true": if root_idea_id: raise HTTPBadRequest(localizer.translate(_("Getting orphan posts of a specific idea isn't supported."))) posts = posts.filter( Post.id.in_( text(Idea._get_orphan_posts_statement(), bindparams=[bindparam("discussion_id", discussion_id)]) ) ) elif only_orphan == "false": raise HTTPBadRequest(localizer.translate(_("Getting non-orphan posts isn't supported."))) if root_idea_id: posts = posts.filter( Post.id.in_( text( Idea._get_related_posts_statement(), bindparams=[bindparam("root_idea_id", root_idea_id), bindparam("discussion_id", discussion_id)], ) ) ) if root_post_id: root_post = Post.get(id=root_post_id) posts = posts.filter( (Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ",%")) | (Post.id == root_post.id) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) # Post read/unread management is_unread = request.GET.get("is_unread") print "\n" + repr(is_unread) + "\n" if user_id: posts = posts.outerjoin(ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == Post.id)) posts = posts.add_entity(ViewPost) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) else: # If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest(localizer.translate(_("You must be logged in to view which posts are read"))) # posts = posts.options(contains_eager(Post.source)) posts = posts.options(joinedload_all(Post.creator)) if order == "chronological": posts = posts.order_by(Content.creation_date) elif order == "reverse_chronological": posts = posts.order_by(Content.creation_date.desc()) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: if user_id: post, viewpost = query_result else: post, viewpost = query_result, None no_of_posts += 1 if view_def: serializable_post = post.generic_json(view_def) else: serializable_post = post.serializable() if viewpost: serializable_post["read"] = True no_of_posts_viewed_by_user += 1 elif user_id and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) Post.db.add(viewed_post) serializable_post["read"] = True else: serializable_post["read"] = False post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent # handling of pagination. Disabling # posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results # no_of_messages_viewed_by_user = Post.db.query(ViewPost).join( # Post # ).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, # ).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) # TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def create_post(request): """ We use post, not put, because we don't know the id of the post """ localizer = get_localizer(request) request_body = json.loads(request.body) user_id = authenticated_userid(request) user = Post.db.query(User).filter_by(id=user_id).one() message = request_body.get("message", None) html = request_body.get("html", None) reply_id = request_body.get("reply_id", None) idea_id = request_body.get("idea_id", None) subject = request_body.get("subject", None) publishes_synthesis_id = request_body.get("publishes_synthesis_id", None) if not user_id: raise HTTPUnauthorized() if not message: raise HTTPUnauthorized() if reply_id: in_reply_to_post = Post.get_instance(reply_id) else: in_reply_to_post = None if idea_id: in_reply_to_idea = Idea.get_instance(idea_id) else: in_reply_to_idea = None discussion_id = request.matchdict["discussion_id"] discussion = Discussion.get_instance(discussion_id) if not discussion: raise HTTPNotFound(localizer.translate(_("No discussion found with id=%s" % discussion_id))) if subject: subject = subject elif in_reply_to_post: subject = in_reply_to_post.subject elif in_reply_to_idea: subject = in_reply_to_idea.short_title else: subject = discussion.topic subject = "Re: " + restrip_pat.sub("", subject) post_constructor_args = { "discussion": discussion, "message_id": uuid.uuid1().urn, "creator_id": user_id, "subject": subject, "body": html if html else message, } if publishes_synthesis_id: published_synthesis = Synthesis.get_instance(publishes_synthesis_id) post_constructor_args["publishes_synthesis"] = published_synthesis new_post = SynthesisPost(**post_constructor_args) else: new_post = AssemblPost(**post_constructor_args) new_post.db.add(new_post) new_post.db.flush() if in_reply_to_post: new_post.set_parent(in_reply_to_post) if in_reply_to_idea: idea_post_link = IdeaRelatedPostLink(creator_id=user_id, content=new_post, idea=in_reply_to_idea) IdeaRelatedPostLink.db.add(idea_post_link) for source in discussion.sources: source.send_post(new_post) return {"ok": True}
def test_inspiration_widget(discussion, test_app, subidea_1, subidea_1_1, participant1_user, test_session): # Post the initial configuration format = lambda x: x.strftime('%Y-%m-%dT%H:%M:%S') new_widget_loc = test_app.post( '/data/Discussion/%d/widgets' % (discussion.id, ), { 'type': 'InspirationWidget', 'settings': json.dumps({'idea': 'local:Idea/%d' % (subidea_1.id)}) }) assert new_widget_loc.status_code == 201 # Get the widget from the db discussion.db.flush() widget_uri = new_widget_loc.location new_widget = Widget.get_instance(widget_uri) assert new_widget assert new_widget.base_idea == subidea_1 widget_id = new_widget.id # There should be a link widget_link = discussion.db.query(BaseIdeaWidgetLink).filter_by( idea_id=subidea_1.id, widget_id=widget_id).all() assert widget_link assert len(widget_link) == 1 # Get the widget from the api widget_rep = test_app.get(local_to_absolute(widget_uri), headers={"Accept": "application/json"}) assert widget_rep.status_code == 200 widget_rep = widget_rep.json print widget_rep assert 'messages_url' in widget_rep assert 'ideas_url' in widget_rep assert 'user' in widget_rep # Get the list of new ideas # should be empty, despite the idea having a non-widget child idea_endpoint = local_to_absolute(widget_rep['ideas_url']) idea_hiding_endpoint = local_to_absolute(widget_rep['ideas_hiding_url']) test = test_app.get(idea_endpoint) assert test.status_code == 200 assert test.json == [] discussion.db.flush() assert new_widget.base_idea == subidea_1 return # WEIRD virtuoso crash in the tests here, # dependent on previous tests being run. ancestor_widgets = test_app.get( '/data/Discussion/%d/ideas/%d/ancestor_inspiration_widgets/' % (discussion.id, subidea_1_1.id)) assert ancestor_widgets.status_code == 200 ancestor_widgets_rep = ancestor_widgets.json assert new_widget_loc.location in ancestor_widgets_rep # TODO. ajouter la collection descendant_ideas. # Comment déduire cet URL du widget???? r = test_app.post( '/data/Discussion/%d/widgets/%d/base_idea_descendants/%d/linkedposts' % (discussion.id, widget_id, subidea_1_1.id), { "type": "WidgetPost", "body": { "@type": "LangString", "entries": [{ "@type": "LangStringEntry", "value": "body", "@language": "en" }] }, "creator_id": participant1_user.id, "metadata_json": { "inspiration_url": "https://www.youtube.com/watch?v=7E2FUSYO374" } }) assert r.ok post_location = r.location post = Post.get_instance(post_location) assert post assert post.widget assert post.metadata_json['inspiration_url']
def get_data(content): """Return uid, dict of fields we want to index, return None if we don't index.""" from assembl.models import Idea, Post, SynthesisPost, AgentProfile, LangString, Extract, Question if type(content) == Idea: # only index Idea, not Thematic or Question data = {} for attr in ('creation_date', 'id', 'discussion_id'): data[attr] = getattr(content, attr) populate_from_langstring_prop(content, data, 'title') populate_from_langstring_prop(content, data, 'synthesis_title') populate_from_langstring_prop(content, data, 'description') announcement = content.get_applicable_announcement() if announcement: populate_from_langstring_prop(announcement, data, 'title', 'announcement_title') populate_from_langstring_prop(announcement, data, 'body', 'announcement_body') phase = content.get_associated_phase() if phase: data['phase_id'] = phase.id data['phase_identifier'] = phase.identifier data['message_view_override'] = content.message_view_override return get_uid(content), data elif isinstance(content, AgentProfile): data = {} for attr in ('creation_date', 'id', 'name'): data[attr] = getattr(content, attr, None) # AgentProfile doesn't have creation_date, User does. # get all discussions that the user is in via AgentStatusInDiscussion data['discussion_id'] = set([s.discussion_id for s in content.agent_status_in_discussion]) # get discussion_id for all posts of this agent data['discussion_id'] = list( data['discussion_id'].union( [post.discussion_id for post in content.posts_created] ) ) return get_uid(content), data elif isinstance(content, Post): data = {} data['_parent'] = 'user:{}'.format(content.creator_id) if content.parent_id is not None: data['parent_creator_id'] = content.parent.creator_id for attr in ('discussion_id', 'creation_date', 'id', 'parent_id', 'creator_id', 'sentiment_counts'): data[attr] = getattr(content, attr) data['creator_display_name'] = AgentProfile.get(content.creator_id).display_name() data['sentiment_tags'] = [key for key in data['sentiment_counts'] if data['sentiment_counts'][key] > 0] like = data['sentiment_counts']['like'] disagree = data['sentiment_counts']['disagree'] dont_understand = data['sentiment_counts']['dont_understand'] more_info = data['sentiment_counts']['more_info'] all_sentiments = [like, disagree, dont_understand, more_info] data['sentiment_counts']['total'] = sum(all_sentiments) data['sentiment_counts']['popularity'] = like - disagree data['sentiment_counts']['consensus'] = max(all_sentiments) / ((sum(all_sentiments) / len(all_sentiments)) or 1) data['sentiment_counts']['controversy'] = max(like, disagree, 1) / min(like or 1, disagree or 1) data['type'] = content.type # this is the subtype (assembl_post, email...) # data['publishes_synthesis_id'] = getattr( # content, 'publishes_synthesis_id', None) phase = content.get_created_phase() if phase: data['phase_id'] = phase.id data['phase_identifier'] = phase.identifier if isinstance(content, SynthesisPost): populate_from_langstring_prop(content.publishes_synthesis, data, 'subject') populate_from_langstring_prop(content.publishes_synthesis, data, 'introduction') populate_from_langstring_prop(content.publishes_synthesis, data, 'conclusion') long_titles = [idea.synthesis_title for idea in content.publishes_synthesis.ideas if idea.synthesis_title] long_titles_c = defaultdict(list) for ls in long_titles: for e in ls.entries: if e.value: long_titles_c[strip_country(e.base_locale)].append(e.value) ls = LangString() for locale, values in long_titles_c.iteritems(): ls.add_value(' '.join(values), locale) populate_from_langstring(ls, data, 'ideas') else: idea_id = get_idea_id_for_post(content) if not idea_id: return None, None data['idea_id'] = idea_id related_idea = Idea.get(idea_id[0]) data['message_view_override'] = related_idea.message_view_override if isinstance(related_idea, Question): related_idea = related_idea.parents[0] # we take the title of the first idea in the list for now (in v2, posts are attached to only one idea) populate_from_langstring_prop( related_idea, data, 'title', 'idea_title') populate_from_langstring_prop(content, data, 'body') populate_from_langstring_prop(content, data, 'subject') return get_uid(content), data elif isinstance(content, Extract): data = {} for attr in ('discussion_id', 'body', 'creation_date', 'id', 'creator_id'): data[attr] = getattr(content, attr) data['post_id'] = content.content_id post = Post.get(content.content_id) populate_from_langstring_prop(post, data, 'subject') phase = post.get_created_phase() if phase: data['phase_id'] = phase.id data['phase_identifier'] = phase.identifier idea_id = get_idea_id_for_post(post) if not idea_id: return None, None data['idea_id'] = idea_id # we take the title of the first idea in the list for now (in v2, posts are attached to only one idea) related_idea = Idea.get(idea_id[0]) data['message_view_override'] = related_idea.message_view_override if isinstance(related_idea, Question): related_idea = related_idea.parents[0] populate_from_langstring_prop( related_idea, data, 'title', 'idea_title') data['extract_state'] = 'taxonomy_state.' + content.extract_state if content.extract_nature: data['extract_nature'] = 'taxonomy_nature.' + content.extract_nature.name if content.extract_action: data['extract_action'] = 'taxonomy_action.' + content.extract_action.name data['creator_display_name'] = AgentProfile.get(content.creator_id).display_name() return get_uid(content), data return None, None
def post_extract(request): """ Create a new extract. """ extract_data = json.loads(request.body) discussion_id = int(request.matchdict['discussion_id']) user_id = authenticated_userid(request) if not user_id: # Straight from annotator token = request.headers.get('X-Annotator-Auth-Token') if token: token = decode_token( token, request.registry.settings['session.secret']) if token: user_id = token['userId'] if not user_id: user_id = Everyone if not user_has_permission(discussion_id, user_id, P_ADD_EXTRACT): #TODO: maparent: restore this code once it works: #return HTTPForbidden(result=ACLDenied(permission=P_ADD_EXTRACT)) return HTTPForbidden() if user_id == Everyone: # TODO: Create an anonymous user. raise HTTPServerError("Anonymous extracts are not implemeted yet.") content = None uri = extract_data.get('uri') annotation_text = None if uri: # Straight from annotator annotation_text = extract_data.get('text') else: target = extract_data.get('target') if not (target or uri): raise HTTPClientError("No target") target_type = target.get('@type') if target_type == 'email': post_id = target.get('@id') post = Post.get_instance(post_id) if not post: raise HTTPNotFound( "Post with id '%s' not found." % post_id) content = post elif target_type == 'webpage': uri = target.get('url') if uri and not content: content = Webpage.get_instance(uri) if not content: # TODO: maparent: This is actually a singleton pattern, should be # handled by the AnnotatorSource now that it exists... source = AnnotatorSource.db.query(AnnotatorSource).filter_by( discussion_id=discussion_id).filter( cast(AnnotatorSource.name, Unicode) == 'Annotator').first() if not source: source = AnnotatorSource( name='Annotator', discussion_id=discussion_id, type='source') content = Webpage(url=uri, discussion_id=discussion_id) extract_body = extract_data.get('quote', '') new_extract = Extract( creator_id=user_id, owner_id=user_id, discussion_id=discussion_id, body=extract_body, annotation_text=annotation_text, content=content ) Extract.db.add(new_extract) for range_data in extract_data.get('ranges', []): range = TextFragmentIdentifier( extract=new_extract, xpath_start=range_data['start'], offset_start=range_data['startOffset'], xpath_end=range_data['end'], offset_end=range_data['endOffset']) TextFragmentIdentifier.db.add(range) Extract.db.flush() return {'ok': True, 'id': new_extract.uri()}
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 order = request.GET.get('order') if order == None: order = 'chronological' assert order in ('chronological', 'reverse_chronological') if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') PostClass = SynthesisPost if only_synthesis == "true" else Post posts = Post.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get('only_orphan') if only_orphan == "true": if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = text(Idea._get_orphan_posts_statement(), bindparams=[bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('orphans') posts = posts.join(orphans, PostClass.id==orphans.c.post_id) elif only_orphan == "false": raise HTTPBadRequest(localizer.translate( _("Getting non-orphan posts isn't supported."))) if root_idea_id: related = text(Idea._get_related_posts_statement(), bindparams=[bindparam('root_idea_id', root_idea_id), bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('related') #Virtuoso bug: This should work... #posts = posts.join(related, PostClass.id==related.c.post_id) posts = posts.filter(PostClass.id.in_(related)) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) # Post read/unread management is_unread = request.GET.get('is_unread') if user_id: posts = posts.outerjoin(ViewPost, and_(ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id) ) posts = posts.add_entity(ViewPost) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) #posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'partial': pass # posts = posts.options(defer(Post.body)) else: posts = posts.options(joinedload_all(Post.creator), undefer(Email.recipients)) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: if user_id: post, viewpost = query_result else: post, viewpost = query_result, None no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id and root_post is not None and root_post.id == post.id: #Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) Post.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = Post.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order == None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') PostClass = SynthesisPost if only_synthesis == "true" else Post ideaContentLinkQuery = discussion.db.query( PostClass.id, PostClass.idea_content_links_above_post) if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.discussion_id == discussion_id) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get('only_orphan') if only_orphan == "true": if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = text(Idea._get_orphan_posts_statement(), bindparams=[bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('orphans') posts = posts.join(orphans, PostClass.id==orphans.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( orphans, PostClass.id==orphans.c.post_id) elif only_orphan == "false": raise HTTPBadRequest(localizer.translate( _("Getting non-orphan posts isn't supported."))) # "true" means hidden only, "false" (default) means visible only. "any" means both. hidden = request.GET.get('hidden_messages', "false") if hidden != 'any': posts = posts.filter(PostClass.hidden==asbool(hidden)) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.hidden==asbool(hidden)) if root_idea_id: related = text(Idea._get_related_posts_statement(), bindparams=[bindparam('root_idea_id', root_idea_id), bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('related') #Virtuoso bug: This should work... #posts = posts.join(related, PostClass.id==related.c.post_id) posts = posts.join(related, PostClass.id == related.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( related, PostClass.id == related.c.post_id) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) ideaContentLinkQuery = ideaContentLinkQuery.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) ideaContentLinkQuery = ideaContentLinkQuery.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) ideaContentLinkQuery = posts.filter( ideaContentLinkQuery.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creator_id == post_author_id) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) ideaContentLinkQuery = ideaContentLinkQuery.join( parent_alias, PostClass.parent) ideaContentLinkQuery = ideaContentLinkQuery.filter( parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = {v.post_id for v in discussion.db.query( ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id))} liked_posts = {l.post_id: l.id for l in discussion.db.query( LikedPost).filter( LikedPost.tombstone_condition(), LikedPost.actor_id == user_id, *LikedPost.get_discussion_conditions(discussion_id))} if is_unread != None: posts = posts.outerjoin( ViewPost, and_( ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service: translations = user_pref_as_translation_table(user, service) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter(Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) ideaContentLinkQuery = ideaContentLinkQuery.filter( Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) else: posts = posts.order_by(Content.id) print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: score, viewpost, likedpost = None, None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if user_id != Everyone: viewpost = post.id in read_posts likedpost = liked_posts.get(post.id, None) if view_def != "id_only": translate_content( post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False # serializable_post['liked'] = likedpost.uri() if likedpost else False serializable_post['liked'] = ( LikedPost.uri_generic(likedpost) if likedpost else False) if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def create_post(request): """ Create a new post in this discussion. We use post, not put, because we don't know the id of the post """ localizer = request.localizer request_body = json.loads(request.body) user_id = request.authenticated_userid if not user_id: raise HTTPUnauthorized() user = Post.default_db.query(User).filter_by(id=user_id).one() body = request_body.get('body', None) html = request_body.get('html', None) # BG: Is this used now? I cannot see it. reply_id = request_body.get('reply_id', None) idea_id = request_body.get('idea_id', None) subject = request_body.get('subject', None) publishes_synthesis_id = request_body.get('publishes_synthesis_id', None) message_classifier = request_body.get('message_classifier', None) if not body and not publishes_synthesis_id: # Should we allow empty messages otherwise? raise HTTPBadRequest(localizer.translate( _("Your message is empty"))) if reply_id: in_reply_to_post = Post.get_instance(reply_id) else: in_reply_to_post = None if idea_id: in_reply_to_idea = Idea.get_instance(idea_id) else: in_reply_to_idea = None discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get_instance(discussion_id) if not discussion: raise HTTPNotFound(localizer.translate(_( "No discussion found with id=%s")) % (discussion_id,) ) ctx = DummyContext({Discussion: discussion}) if html: log.warning("Still using html") # how to guess locale in this case? body = LangString.create(sanitize_html(html)) # TODO: AssemblPosts are pure text right now. # Allowing HTML requires changes to the model. elif body: # TODO: Accept HTML body. for e in body['entries']: e['value'] = sanitize_text(e['value']) body = LangString.create_from_json( body, context=ctx, user_id=user_id) else: body = LangString.EMPTY(discussion.db) if subject: for e in subject['entries']: e['value'] = sanitize_text(e['value']) subject = LangString.create_from_json( subject, context=ctx, user_id=user_id) else: from assembl.models import Locale locale = Locale.UNDEFINED # print(in_reply_to_post.subject, discussion.topic) if in_reply_to_post and in_reply_to_post.get_title(): original_subject = in_reply_to_post.get_title().first_original() if original_subject: locale = original_subject.locale_code subject = ( original_subject.value or '' if in_reply_to_post.get_title() else '') elif in_reply_to_idea: # TODO: THis should use a cascade like the frontend # also, some ideas have extra langstring titles subject = (in_reply_to_idea.short_title if in_reply_to_idea.short_title else '') locale = discussion.main_locale else: subject = discussion.topic if discussion.topic else '' locale = discussion.main_locale # print subject if subject is not None and len(subject): new_subject = "Re: " + restrip_pat.sub('', subject).strip() if (in_reply_to_post and new_subject == subject and in_reply_to_post.get_title()): # reuse subject and translations subject = in_reply_to_post.get_title().clone(discussion.db) else: # how to guess locale in this case? subject = LangString.create(new_subject, locale) else: capture_message( "A message is about to be written to the database with an " "empty subject. This is not supposed to happen.") subject = LangString.EMPTY(discussion.db) post_constructor_args = { 'discussion': discussion, 'creator_id': user_id, 'message_classifier': message_classifier, 'subject': subject, 'body': body } if publishes_synthesis_id: published_synthesis = Synthesis.get_instance(publishes_synthesis_id) post_constructor_args['publishes_synthesis'] = published_synthesis new_post = SynthesisPost(**post_constructor_args) new_post.finalize_publish() else: new_post = AssemblPost(**post_constructor_args) new_post.guess_languages() discussion.db.add(new_post) discussion.db.flush() if in_reply_to_post: new_post.set_parent(in_reply_to_post) if in_reply_to_idea: idea_post_link = IdeaRelatedPostLink( creator_id=user_id, content=new_post, idea=in_reply_to_idea ) discussion.db.add(idea_post_link) idea = in_reply_to_idea while idea: idea.send_to_changes() parents = idea.get_parents() idea = next(iter(parents)) if parents else None else: discussion.root_idea.send_to_changes() for source in discussion.sources: if 'send_post' in dir(source): source.send_post(new_post) permissions = get_permissions(user_id, discussion_id) return new_post.generic_json('default', user_id, permissions)