def test_extracts_on_post(admin_user, graphql_request, discussion, top_post_in_thread_phase): from graphene.relay import Node raw_id = int(Node.from_global_id(top_post_in_thread_phase)[1]) from assembl.models import Extract, Post post = Post.get(raw_id) post.extracts.append( Extract(body=u"super quote", important=False, creator=admin_user, owner=admin_user, discussion=discussion, extract_hash=u"extract1")) post.extracts.append( Extract(body=u"super important quote", important=True, creator=admin_user, owner=admin_user, discussion=discussion, extract_hash=u"extract2")) post.db.flush() res = schema.execute(u""" query Post($id: ID!) { post: node(id: $id) { ... on Post { extracts { body important } } } } """, context_value=graphql_request, variable_values={ "id": top_post_in_thread_phase, }) assert json.loads(json.dumps(res.data)) == { u'post': { u'extracts': [ { u'body': u'super quote', u'important': False }, { u'body': u'super important quote', u'important': True }, ] } }
def extract_submitted_in_post_related_to_sub_idea_1_1_1( request, participant2_user, post_related_to_sub_idea_1_1_1, subidea_1_1, discussion, test_session): """ Create an extract in a post related to an idea.""" from assembl.models import Extract from assembl.models.idea_content_link import ExtractNatureVocabulary, ExtractActionVocabulary new_extract = Extract( discussion_id=discussion.id, body= u"Commodi maiores magni rerum. Sint natus corporis in qui in ut dignissimos cumque repellendus. Reprehenderit nihil illum.", creator=participant2_user, owner=participant2_user, content=post_related_to_sub_idea_1_1_1, extract_hash=u'extract_submitted_in_post_related_to_sub_idea_1_1_1', extract_nature=ExtractNatureVocabulary.Enum.actionable_solution, extract_action=ExtractActionVocabulary.Enum.give_examples) test_session.add(new_extract) test_session.flush() def fin(): print "finalizer extract_with_range_submitted_in_reply_post_1" test_session.delete(new_extract) test_session.flush() request.addfinalizer(fin) return new_extract
def extract_post_1_to_subidea_1_1(request, participant2_user, reply_post_1, subidea_1_1, discussion, test_session): """ Links reply_post_1 to subidea_1_1 """ from assembl.models import Extract, Keyword from assembl.models.idea_content_link import ExtractNatureVocabulary, ExtractActionVocabulary e = Extract( body=u"body", creator=participant2_user, owner=participant2_user, content=reply_post_1, idea_id=subidea_1_1.id, # strange bug: Using idea directly fails discussion=discussion, extract_hash=u'extract_post_1_to_subidea_1_1', extract_nature=ExtractNatureVocabulary.Enum.actionable_solution, extract_action=ExtractActionVocabulary.Enum.give_examples) tags = Keyword.get_tags(['foo', 'bar'], discussion.id, test_session) e.tags = tags['new_tags'] + tags['tags'] test_session.add(e) test_session.flush() def fin(): print "finalizer extract_post_1_to_subidea_1_1" tags = e.tags e.tags = [] for tag in tags: test_session.delete(tag) test_session.delete(e) test_session.flush() request.addfinalizer(fin) return e
def extract_with_range_submitted_in_reply_post_1(request, discussion_admin_user, reply_post_1, subidea_1_1, discussion, test_session): """ Create an extract of a given range of text in a message """ from assembl.models import Extract, TextFragmentIdentifier, ExtractStates extract_body = "variable-temperature spectra indicate the onset of oxide-ion motion involving the interstitials at 130 °C, which is linked to an orthorhombic−tetragonal phase transition. For the V-doped phases, an oxide-ion conduction mechanism is observed that involves oxygen exchange between the Bi-O sublattice and rapidly rotating VO4 tetrahedral units. The more poorly conducting P-doped phase exhibits only vacancy conduction with no evidence of sublattice exchange, a result ascribed to the differing propensities of the dopants to undergo variable oxygen coordination. So I think it would be a very bad idea to allow hot beverages in coworking spaces." xpathStart = u"//div[@id='message-body-local:Content/%s']/" % reply_post_1.id xpathEnd = xpathStart offsetStart = 314 offsetEnd = 958 lang = 'en' extract_hash = Extract.get_extract_hash(lang, xpathStart, xpathEnd, offsetStart, offsetEnd, reply_post_1.id) new_extract = Extract(creator_id=discussion_admin_user.id, owner_id=discussion_admin_user.id, discussion_id=discussion.id, body=extract_body, important=True, content=reply_post_1, extract_state=ExtractStates.SUBMITTED.value, extract_hash=extract_hash) new_extract.lang = lang test_session.add(new_extract) new_range = TextFragmentIdentifier(extract=new_extract, xpath_start=xpathStart, offset_start=offsetStart, xpath_end=xpathEnd, offset_end=offsetEnd) test_session.add(new_range) test_session.flush() def fin(): print "finalizer extract_with_range_submitted_in_reply_post_1" test_session.delete(new_range) test_session.delete(new_extract) test_session.flush() request.addfinalizer(fin) return new_extract
def extract_post_1_to_subidea_1_1( request, participant2_user, reply_post_1, subidea_1_1, discussion, test_session): """ Links reply_post_1 to subidea_1_1 """ from assembl.models import Extract e = Extract( body=u"body", creator=participant2_user, owner=participant2_user, content=reply_post_1, idea_id=subidea_1_1.id, # strange bug: Using idea directly fails discussion=discussion) test_session.add(e) test_session.flush() def fin(): print "finalizer extract_post_1_to_subidea_1_1" test_session.delete(e) test_session.flush() request.addfinalizer(fin) return e
def post_extract(request): """ Create a new extract. """ extract_data = json.loads(request.body) discussion = request.context db = discussion.db user_id = authenticated_userid(request) if not user_id: # Straight from annotator token = request.headers.get('X-Annotator-Auth-Token') if token: token = decode_token(token, request.registry.settings['session.secret']) if token: user_id = token['userId'] user_id = user_id or Everyone permissions = get_permissions(user_id, discussion_id) else: permissions = request.permissions if P_ADD_EXTRACT not in permissions: #TODO: maparent: restore this code once it works: #raise HTTPForbidden(result=ACLDenied(permission=P_ADD_EXTRACT)) raise HTTPForbidden() if not user_id or user_id == Everyone: # TODO: Create an anonymous user. raise HTTPServerError("Anonymous extracts are not implemeted yet.") content = None uri = extract_data.get('uri') important = extract_data.get('important', False) annotation_text = extract_data.get('text') target = extract_data.get('target') if not uri: # Extract from an internal post if not target: raise HTTPBadRequest("No target") target_class = sqla.get_named_class(target.get('@type')) if issubclass(target_class, Post): post_id = target.get('@id') post = Post.get_instance(post_id) if not post: raise HTTPNotFound("Post with id '%s' not found." % post_id) content = post elif issubclass(target_class, Webpage): uri = target.get('url') if uri and not content: content = Webpage.get_instance(uri) if not content: # TODO: maparent: This is actually a singleton pattern, should be # handled by the AnnotatorSource now that it exists... source = db.query(AnnotatorSource).filter_by( discussion=discussion).filter( cast(AnnotatorSource.name, Unicode) == 'Annotator').first() if not source: source = AnnotatorSource(name='Annotator', discussion=discussion) db.add(source) content = Webpage(url=uri, discussion=discussion) db.add(content) extract_body = extract_data.get('quote', None) idea_id = extract_data.get('idIdea', None) if idea_id: idea = Idea.get_instance(idea_id) if (idea.discussion.id != discussion.id): raise HTTPBadRequest( "Extract from discussion %s cannot be associated with an idea from a different discussion." % extract.get_discussion_id()) if not idea.has_permission_req(P_ASSOCIATE_EXTRACT): raise HTTPForbidden("Cannot associate extact with this idea") else: idea = None new_extract = Extract(creator_id=user_id, owner_id=user_id, discussion=discussion, idea=idea, important=important, annotation_text=annotation_text, content=content) db.add(new_extract) for range_data in extract_data.get('ranges', []): range = TextFragmentIdentifier(extract=new_extract, body=extract_body, xpath_start=range_data['start'], offset_start=range_data['startOffset'], xpath_end=range_data['end'], offset_end=range_data['endOffset']) db.add(range) db.flush() return {'ok': True, '@id': new_extract.uri()}