def paths_filtered_by_status(request, paths, exclude=("deleted", "replaced"), include=None): if include is not None: return [ path for path in paths if traverse(request.root, path)["context"].__json__(request).get("status") in include ] else: return [ path for path in paths if traverse(request.root, path)["context"].__json__(request).get("status") not in exclude ]
def paths_filtered_by_status(request, paths, exclude=('deleted', 'replaced'), include=None): if include is not None: return [ path for path in paths if traverse(request.root, path)['context'].__json__(request).get('status') in include ] else: return [ path for path in paths if traverse(request.root, path)['context'].__json__(request).get('status') not in exclude ]
def __getitem__(self, key): if not isinstance(key, int) and not key.isdigit(): return if not isinstance(key, int) and "*" in key: things = self.items raise # Pattern pattern = re.compile(fnmatch.translate(subpath)) contexts = [(f, traverse(self, f)["context"]) for f in listdir(self.path) if pattern.match(f)] return MultipleTraverser.from_parent(self, subpath, contexts) b = self.basket[int(key)] return traverse(HomeResource(self.request), b['path'])["context"]
def analysis_step_version(self, request, root, step_run=None): if step_run is None: return step_run_obj = traverse(root, step_run)['context'] step_version_uuid = step_run_obj.__json__(request).get('analysis_step_version') if step_version_uuid is not None: return request.resource_path(root[step_version_uuid])
def fill_slot(self, index, value): """ Fill the `index`th slot of the URL with `value` """ fragments = list(reversed(list(lineage(self)))) fillers = self.slot_fillers assert index < len(fillers) # Index into the path for filling the `index`th slot and a function # which fills it to_fill = self.ordering[index] filler_index, filler_function = fillers[to_fill] # Get the (as yet incomplete) resource with the slot filled filled_traverser = filler_function(fragments[filler_index], value) assert filled_traverser # Get the path which needs to be appended to this traverser remaining_fragments = [ f.__name__ for f in fragments[filler_index + 1:] ] remaining_fragments = transpose_fragments_fixup( remaining_fragments, to_fill) # Traverse any remaining parts of the path, if they exist remaining_path = "/".join(remaining_fragments) if remaining_path: filled_traverser = traverse(filled_traverser, remaining_path)["context"] return filled_traverser
def main(): parser = OptionParser(description=__doc__) parser.add_option('-s', '--source', dest='source', action="store", default='/', metavar='ZODB-PATH', help="The ZODB source path to dump (e.g. /foo/bar or /)") parser.add_option('-d', '--dest', dest='dest', action="store", default='dump', metavar='FILESYSTEM-PATH', help="The destination filesystem path to dump to.") options, args = parser.parse_args() if args: config_uri = args[0] else: parser.error("Requires a config_uri as an argument") source = options.source dest = os.path.expanduser(os.path.normpath(options.dest)) setup_logging(config_uri) env = bootstrap(config_uri) root = env['root'] source = traverse(root, source)['context'] dump(source, dest)
def fill_slot(self, index, value): """ Fill the `index`th slot of the URL with `value` """ fragments = list(reversed(list(lineage(self)))) fillers = self.slot_fillers assert index < len(fillers) # Index into the path for filling the `index`th slot and a function # which fills it to_fill = self.ordering[index] filler_index, filler_function = fillers[to_fill] # Get the (as yet incomplete) resource with the slot filled filled_traverser = filler_function(fragments[filler_index], value) assert filled_traverser # Get the path which needs to be appended to this traverser remaining_fragments = [f.__name__ for f in fragments[filler_index + 1:]] remaining_fragments = transpose_fragments_fixup(remaining_fragments, to_fill) # Traverse any remaining parts of the path, if they exist remaining_path = "/".join(remaining_fragments) if remaining_path: filled_traverser = traverse(filled_traverser, remaining_path)["context"] return filled_traverser
def biological_replicates(self, request, registry, root, bioreplicate=None): if bioreplicate is not None: replicate_obj = traverse(root, bioreplicate)['context'] replicate_biorep = replicate_obj.__json__( request)['biological_replicate_number'] return [replicate_biorep] conn = registry[CONNECTION] derived_from_closure = property_closure(request, 'derived_from', self.uuid) dataset_uuid = self.__json__(request)['biodataset'] obj_props = (conn.get_by_uuid(uuid).__json__(request) for uuid in derived_from_closure) replicates = { props['bioreplicate'] for props in obj_props if props['biodataset'] == dataset_uuid and 'bioreplicate' in props } bioreps = { conn.get_by_uuid(uuid).__json__(request) ['biological_replicate_number'] for uuid in replicates } return sorted(bioreps)
def main(): parser = argparse.ArgumentParser() parser.add_argument("config_uri", help="Paster ini file to load settings from") parser.add_argument("path", help="from which path to clear likes (meeting or agenda item)") args = parser.parse_args() env = bootstrap(args.config_uri) root = env['root'] request = env['request'] context = traverse(root, args.path).get('context') if IMeeting.providedBy(context) or IAgendaItem.providedBy(context): print('Clearing likes on {}'.format(context.title)) path_query = query.Eq('path', args.path) cleared = False for type_name in ('Proposal', 'DiscussionPost'): count, docids = root.catalog.query(path_query & query.Eq('type_name', type_name)) response = input('Found {} {} on {}. Do you want to clear likes on these? (y/N) '.format( count, type_name, context.title).encode('utf8')) if response.lower() in ('y', 'yes', 'j', 'ja'): cleared = True for obj in request.resolve_docids(docids, perm=None): like = request.registry.getAdapter(obj, IUserTags, name='like') like.storage.clear() like._notify() if cleared: transaction.commit() env['closer']() else: print('Path does not match a meeting or agenda item')
def get_filtered_rev_links(self, request, name): """ Run get_rev_links, but only return items that do not have a status in self.filtered_rev_statuses (a tuple defined on the Item) If we are indexing, add rev_link info to _rev_linked_uuids_by_item. Args: request: current Request name (str): name of the rev (must be in self.rev) Returns: list of str uuids of the given rev_link, filtered by status """ # Consider caching rev links on the request? Would save DB requests # May not be worth it because they are quite fast rev_uuids = self.get_rev_links(request, name) filtered_uuids = [ str(rev_id) for rev_id in rev_uuids if traverse(request.root, str(rev_id))['context'].__json__(request).get('status') not in self.filtered_rev_statuses ] if getattr(request, '_indexing_view', False) is True: to_update = {name: filtered_uuids} if str(self.uuid) in request._rev_linked_uuids_by_item: request._rev_linked_uuids_by_item[str(self.uuid)].update(to_update) else: request._rev_linked_uuids_by_item[str(self.uuid)] = to_update return filtered_uuids
def technical_replicates(self, request, registry, root, replicate=None): if replicate is not None: replicate_obj = traverse(root, replicate)['context'] replicate_biorep = replicate_obj.__json__( request)['biological_replicate_number'] replicate_techrep = replicate_obj.__json__( request)['technical_replicate_number'] tech_rep_string = str(replicate_biorep) + "_" + str( replicate_techrep) return [tech_rep_string] conn = registry[CONNECTION] derived_from_closure = property_closure(request, 'derived_from', self.uuid) dataset_uuid = self.__json__(request)['dataset'] obj_props = (conn.get_by_uuid(uuid).__json__(request) for uuid in derived_from_closure) replicates = { props['replicate'] for props in obj_props if props['dataset'] == dataset_uuid and 'replicate' in props } techreps = { str( conn.get_by_uuid(uuid).__json__(request) ['biological_replicate_number']) + '_' + str( conn.get_by_uuid(uuid).__json__(request) ['technical_replicate_number']) for uuid in replicates } return sorted(techreps)
def _get_user_home_path(context, request): """If currently authenticated user has a 'home_path' set, create a response redirecting user to that path. Otherwise return None. """ userid = authenticated_userid(request) if userid is None: return None, None site = find_site(context) profiles = find_profiles(site) profile = profiles.get(userid, None) if profile is None: return None, None home_path = getattr(profile, 'home_path', None) if home_path: # OSI sets this to a single space to mean None home_path = home_path.strip() if not home_path: return None, None tdict = traverse(site, home_path) target = tdict['context'] view_name = tdict['view_name'] subpath = list(tdict['subpath']) if view_name: subpath.insert(0, view_name) return target, subpath
def evolve(context): for path in exceptions: d = traverse(context, path) ob = d['context'] if model_path(ob) == path: if hasattr(ob, '__acl__'): ob.__custom_acl__ = ob.__acl__ reset_security_workflow(context)
def paths_filtered_by_status(request, paths, exclude=('deleted', 'replaced'), include=None): """ This function has been deprecated in Fourfront, but is still used by access_keys calc property in types/user.py (only for snowflakes) filter out status that shouldn't be visible. Also convert path to str as functions like rev_links return uuids """ if include is not None: return [ path for path in paths if traverse(request.root, str(path))['context'].__json__(request).get('status') in include ] else: return [ path for path in paths if traverse(request.root, str(path))['context'].__json__(request).get('status') not in exclude ]
def _rewrite_link(site, path, link): url = urlparse.urlparse(link) scheme = url.scheme if scheme == 'mailto': # We can ignore mailto links return link if scheme == 'file': log.error("User linked to local filesystem at %s: %s", path, link) return link if scheme and scheme not in ('http', 'https'): unknown_schemes.add(scheme) log.warn("Unknown scheme in link at %s: %s", path, link) return link if url.hostname and url.hostname != 'karl.soros.org': # Skip external links return link link = url.path if link.startswith("#"): # Skip anchors return link log.debug("Check link: %s", link) if not link.startswith('/'): # Try to resolve relative url link = urlparse.urljoin(path, link) # Some links still have '..' even after rewriting # These kill traverse link = link.replace("../", "") if isinstance(link, unicode): link = link.encode('utf-8') # Attempt BFG traversal and see what that gets us traversal = traverse(site, link) context = traversal["context"] view_name = traversal["view_name"] if view_name is None: # Path led right up to an existing object. # Awesome. Next. return link if not _check_view(context, view_name): # We didn't end up at a leaf node with a view # Need to see if we can fix this broken link. return _broken_link(site, path, link) return link
def test_traversal__path_type_view_name(path, resource_type, view_name): """ Ensure that traversing the ``path`` results in a resource of type ``resource_type`` with view name ``view_name``. """ from pyramid.traversal import traverse root_resource = root_resource_factory() t = traverse(root_resource, path) assert isinstance(t['context'], resource_type) assert t['view_name'] == view_name
def traverse(self, path): if path.startswith('/'): context = self.root path = path[1:] else: context = self result = traverse(context, path) if result['view_name']: raise KeyError(result['view_name']) return result['context']
def test_resource_form_traversal(): result = traverse(_root, "/rest/schools/%s/@view" % SCHOOL_ID) context = result['context'] request = DummyRequest(params={}) request.context = context from webapp.views.rest import json_rest_get_f result = json_rest_get_f(context, request) assert(result['id'] == SCHOOL_ID)
def pipeline(self, root, request, step_run=None): if step_run is None: return workflow_uuid = traverse(root, step_run)['context'].__json__(request).get('workflow_run') if workflow_uuid is None: return pipeline_uuid = root[workflow_uuid].__json__(request).get('pipeline') if pipeline_uuid is None: return return request.resource_path(root[pipeline_uuid])
def test_traversal__path_resource_attribute(path, attribute_name, value): """ Ensure that traversing the ``path`` results in a resource having the attribute ``attribute_name`` set to ``value``. """ from pyramid.traversal import traverse root_resource = root_resource_factory() t = traverse(root_resource, path) context = t['context'] assert getattr(context, attribute_name) == value
def resolve_uri(self, uri, exact=False): url = urlparse(uri) url_loc = url.netloc if not url.port: url_loc += ':80' if url_loc != self.req.host: raise ValueError('Alien URL supplied.') path = url.path.strip('/').split('/')[1:] path = [unquote(n) for n in path] tr = traverse(self, path) if exact and (tr['view_name'] or (len(tr['subpath']) > 0)): raise ValueError('Object not found.') return tr
def get_paths(self): paths = set() root = self.registry['root'] request = _make_request('/', registry=self.registry) if root: excludes = self.siteconfig['site'].get('excludes', '').split('\n') excludes.extend([ '.*', '/config.py*', '/site.cfg', '/%s' % self.siteconfig['site']['outpath']]) relpaths = dirtools.Dir( root.abspath, excludes=excludes).files() for relpath in relpaths: traverse(root, relpath) if root: paths.add('/%s' % relpath) visited_routes = set() info = self.registry.queryUtility(IStaticURLInfo) if info: for (url, spec, route_name) in info._get_registrations(self.registry): visited_routes.add(route_name) path = abspath_from_resource_spec(spec) relpaths = dirtools.Dir(path).files() for relpath in relpaths: paths.add( request.route_path(route_name, subpath=relpath)) routelist = self.site.config.config.get_routes_mapper().routelist for route in routelist: if route.factory is not None: matches = route.factory.matches(self.registry) paths = paths.union(route.generate(x) for x in matches) elif route.name not in visited_routes: paths.add(route.generate({})) visited_routes.add(route.name) return list(sorted(paths))
def resolve_uri(self, uri, exact=False, accept_path=True): url = urlparse(uri) url_loc = url.netloc if accept_path and (uri[0] == '/') and (url_loc == ''): url_loc = self.req.host elif not url.port: url_loc += ':80' if url_loc != self.req.host: raise ValueError('Alien URL supplied.') path = url.path.strip('/').split('/')[1:] path = [unquote(n) for n in path] tr = traverse(self, path) if exact and (tr['view_name'] or (len(tr['subpath']) > 0)): raise ValueError('Object not found.') return tr
def main(): parser = OptionParser(description=__doc__) parser.add_option('-d', '--dry-run', dest='dry_run', action="store_true", default=False, help="Don't commit the transactions") parser.add_option('-i', '--interval', dest='commit_interval', action="store", default=200, help="Commit every N transactions") parser.add_option('-p', '--path', dest='path', action="store", default=None, metavar='EXPR', help="Reindex only objects whose path matches a regular expression") parser.add_option('-n', '--index', dest='indexes', action="append", help="Reindex only the given index (can be repeated)") parser.add_option('-s', '--site', dest='site', action="store", default=None, metavar='PATH') options, args = parser.parse_args() if args: config_uri = args[0] else: parser.error("Requires a config_uri as an argument") commit_interval = int(options.commit_interval) if options.path: path_re = re.compile(options.path) else: path_re = None kw = {} if options.indexes: kw['indexes'] = options.indexes setup_logging(config_uri) env = bootstrap(config_uri) site = env['root'] if options.site: site = traverse(site, options.site) catalog = find_service(site, 'catalog') if catalog is None: raise KeyError('No catalog service found at ' % resource_path(site)) catalog.reindex(path_re=path_re, commit_interval=commit_interval, dry_run=options.dry_run, **kw)
def __getitem__(self, key): if self.selection: if "*" in key: keys = sorted([k.GetName() for k in self.rootfile.GetListOfKeys() if k.GetClassName() == self.selection]) pattern = re.compile(fnmatch.translate(key)) contexts = [(f, traverse(self, f)["context"]) for f in keys if pattern.match(f)] return MultipleTraverser.from_parent(self, key, contexts) try: (k for k in self.keys if k.GetName() == key and k.GetClassName() == self.selection ).next() except StopIteration: return else: return super(SelectClass, self).__getitem__(key) else: return self.from_parent(self, key, self.rootfile, key)
def biological_replicates(self, request, registry, root, replicate=None): if replicate is not None: replicate_obj = traverse(root, replicate)['context'] replicate_biorep = replicate_obj.__json__(request)['biological_replicate_number'] return [replicate_biorep] conn = registry[CONNECTION] derived_from_closure = property_closure(request, 'derived_from', self.uuid) dataset_uuid = self.__json__(request)['dataset'] obj_props = (conn.get_by_uuid(uuid).__json__(request) for uuid in derived_from_closure) replicates = { props['replicate'] for props in obj_props if props['dataset'] == dataset_uuid and 'replicate' in props } bioreps = { conn.get_by_uuid(uuid).__json__(request)['biological_replicate_number'] for uuid in replicates } return sorted(bioreps)
def get_breadcrumbs(request, context=None): breadcrumbs = [] req = request if not context: context = request.context pathes = resource_path_tuple(context) resources = [] t = request.root for i, item in enumerate(pathes): t = traverse(t, item)['context'] resources.append((i, t, item)) end = len(resources) for i, resource, item in resources: infos = get_nav_infos(resource, req, item) if i == 0: infos['class'] = 'start' if 0 < i < end-1: infos['class'] = 'middle' if i == end-1: infos['class'] = 'end' breadcrumbs.append(infos) return breadcrumbs
def main(): parser = argparse.ArgumentParser() parser.add_argument("config_uri", help="Paster ini file to load settings from") parser.add_argument( "path", help="from which path to clear likes (meeting or agenda item)") args = parser.parse_args() env = bootstrap(args.config_uri) root = env['root'] request = env['request'] context = traverse(root, args.path).get('context') if IMeeting.providedBy(context) or IAgendaItem.providedBy(context): print('Clearing likes on {}'.format(context.title)) path_query = query.Eq('path', args.path) cleared = False for type_name in ('Proposal', 'DiscussionPost'): count, docids = root.catalog.query( path_query & query.Eq('type_name', type_name)) response = input( 'Found {} {} on {}. Do you want to clear likes on these? (y/N) ' .format(count, type_name, context.title).encode('utf8')) if response.lower() in ('y', 'yes', 'j', 'ja'): cleared = True for obj in request.resolve_docids(docids, perm=None): like = request.registry.getAdapter(obj, IUserTags, name='like') like.storage.clear() like._notify() if cleared: transaction.commit() env['closer']() else: print('Path does not match a meeting or agenda item')
def test_upload_traversal(self): root = Root(None) root.__parent__ = None out = traverse(root, '/essence/20111227') context = out['context'] self.assertEqual(context.filename, u'20111227')
def batch_images(context, request, get_image_info=get_image_info, # unittest get_images_batch=get_images_batch): # unittest include_image_url = request.params.get('include_image_url', None) # include_image_url is a special case. include_info = None if include_image_url is not None: # Note, we must use the path only, as IE submits the full domain # and without the next line IE would fail. path = urlparse.urlparse(include_image_url)[2] include_context = traverse(context, path)['context'] if IImage.providedBy(include_context): # We have a good image to include. include_info = get_image_info(include_context, request) # Find query parameters based on the 'source' param, # which signifies the selection index of the source button # in the imagedrawer dialog. source = request.params.get('source') assert source in ('myrecent', 'thiscommunity', 'allkarl') if source == 'myrecent': creator = authenticated_userid(request) community_path = None elif source == 'thiscommunity': creator = None community = find_community(context) # batching api requires the community path community_path = resource_path(community) else: # All Karl creator = None community_path = None # batching # Decide start and size here, don't let the lower levels # apply their default. This allows us to enforce # a MINIMAL_BATCH size. batch_start = int(request.params.get('start', '0')) batch_size = int(request.params.get('limit', '0')) # there is a minimal batch size to enforce, if the client # does not ask for one # Just pass the values to lower levels where sensible # defaults will be applied. sort_index = request.params.get('sort_on', None) reverse = request.params.get('reverse', None) # XXX include_image will now be inserted in the first # position, as extra image. insert_extra = False if include_info is not None: if batch_start == 0: batch_size -= 1 insert_extra = True else: batch_start -= 1 # Enforce the minimal batch size batch_size = max(batch_size, MINIMAL_BATCH) search_params = dict( creator=creator, community=community_path, batch_start=batch_start, batch_size=batch_size, ) if sort_index: search_params['sort_index'] = sort_index if reverse: search_params['reverse'] = bool(int(reverse)) batch_info = get_images_batch( context, request, **search_params ) records = [get_image_info(image, request) for image in batch_info['entries']] start = batch_info['batch_start'] totalRecords = batch_info['total'] # add the fake included image if include_info is not None: totalRecords += 1 if insert_extra: records.insert(0, include_info) else: start += 1 return dict( records = records, start = start, totalRecords = totalRecords, )
def _callFUT(self, context, name): from pyramid.traversal import traverse return traverse(context, name)
def test_subcollection(): result = traverse(_root, "/rest") assert(isinstance(result['context'], RestRootCollection))
def test_resource(): result = traverse(_root, "/rest/schools/%s" % SCHOOL_ID) context = result['context'] assert(isinstance(context, SchoolResource)) assert(isinstance(context.model, School)) assert(context.model.id == SCHOOL_ID)
def test_search_traversal(self): root = Root(None) root.__parent__ = None out = traverse(root, '/search') context = out['context'] self.assertTrue(isinstance(context, Search))
def test_resource_collection(): result = traverse(_root, "/rest/schools/%s/students" % SCHOOL_ID) context = result['context'] assert(isinstance(context, webapp.RestCollection)) assert(context.subitems_source == 'students')
def test_queue_indexing_with_linked(app, testapp, indexer_testapp, dummy_request): """ Test a whole bunch of things here: - posting/patching invalidates rev linked items - check linked_uuids/rev_link_names/rev_linked_to_me fields in ES - test indexer_utils.find_uuids_for_indexing fxn - test check_es_and_cache_linked_sids & validate_es_content - test purge functionality before and after removing links to an item """ import webtest from snovault import util from pyramid.traversal import traverse from snovault.tests.testing_views import TestingLinkSourceSno es = app.registry[ELASTIC_SEARCH] indexer_queue = app.registry[INDEXER_QUEUE] # first, run create mapping with the indices we will use create_mapping.run( app, collections=['testing_link_target_sno', 'testing_link_source_sno'], skip_indexing=True ) ppp_res = testapp.post_json(TEST_COLL, {'required': ''}) ppp_uuid = ppp_res.json['@graph'][0]['uuid'] target = {'name': 'one', 'uuid': '775795d3-4410-4114-836b-8eeecf1d0c2f'} source = { 'name': 'A', 'target': '775795d3-4410-4114-836b-8eeecf1d0c2f', 'ppp': ppp_uuid, 'uuid': '16157204-8c8f-4672-a1a4-14f4b8021fcd', 'status': 'current', } target_res = testapp.post_json('/testing-link-targets-sno/', target, status=201) res = indexer_testapp.post_json('/index', {'record': True}) time.sleep(2) # wait for the first item to index doc_count_target = es.count(index='testing_link_target_sno', doc_type='testing_link_target_sno').get('count') doc_count_ppp = es.count(index=TEST_TYPE, doc_type=TEST_TYPE).get('count') tries = 0 while (doc_count_target < 1 or doc_count_ppp < 1) and tries < 5: time.sleep(4) doc_count_target = es.count(index='testing_link_target_sno', doc_type='testing_link_target_sno').get('count') doc_count_ppp = es.count(index=TEST_TYPE, doc_type=TEST_TYPE).get('count') tries += 1 assert doc_count_target == 1 assert doc_count_ppp == 1 # indexing the source will also reindex the target, since it has a reverse link source_res = testapp.post_json('/testing-link-sources-sno/', source, status=201) source_uuid = source_res.json['@graph'][0]['uuid'] time.sleep(2) res = indexer_testapp.post_json('/index', {'record': True}) assert res.json['indexing_count'] == 2 time.sleep(2) # wait for them to index doc_count = es.count(index='testing_link_source_sno', doc_type='testing_link_source_sno').get('count') tries = 0 while doc_count < 1 and tries < 5: time.sleep(4) doc_count = es.count(index='testing_link_source_sno', doc_type='testing_link_source_sno').get('count') assert doc_count == 1 # patching json will not queue the embedded ppp # the target will be indexed though, since it has a linkTo back to the source testapp.patch_json('/testing-link-sources-sno/' + source_uuid, {'name': 'ABC'}) time.sleep(2) res = indexer_testapp.post_json('/index', {'record': True}) assert res.json['indexing_count'] == 2 time.sleep(3) # check some stuff on the es results for source and target es_source = es.get(index='testing_link_source_sno', doc_type='testing_link_source_sno', id=source['uuid']) uuids_linked_emb = [link['uuid'] for link in es_source['_source']['linked_uuids_embedded']] uuids_linked_obj = [link['uuid'] for link in es_source['_source']['linked_uuids_object']] assert set(uuids_linked_emb) == {target['uuid'], source['uuid'], ppp_uuid} assert uuids_linked_obj == [source['uuid']] assert es_source['_source']['rev_link_names'] == {} assert es_source['_source']['rev_linked_to_me'] == [target['uuid']] es_target = es.get(index='testing_link_target_sno', doc_type='testing_link_target_sno', id=target['uuid']) # just the source uuid itself in the linked uuids for the object view uuids_linked_emb2 = [link['uuid'] for link in es_target['_source']['linked_uuids_embedded']] uuids_linked_obj2 = [link['uuid'] for link in es_target['_source']['linked_uuids_object']] assert set(uuids_linked_emb2) == {target['uuid'], source['uuid']} assert uuids_linked_obj2 == [target['uuid']] assert es_target['_source']['rev_link_names'] == {'reverse': [source['uuid']]} assert es_target['_source']['rev_linked_to_me'] == [] # test find_uuids_for_indexing to_index = indexer_utils.find_uuids_for_indexing(app.registry, {target['uuid']}) assert to_index == {target['uuid'], source['uuid']} to_index = indexer_utils.find_uuids_for_indexing(app.registry, {ppp_uuid}) assert to_index == {ppp_uuid, source['uuid']} # this will return the target uuid, since it has an indexed rev link to_index = indexer_utils.find_uuids_for_indexing(app.registry, {source['uuid']}) assert to_index == {target['uuid'], source['uuid']} # now use a made-up uuid; only result should be itself fake_uuid = str(uuid.uuid4()) to_index = indexer_utils.find_uuids_for_indexing(app.registry, {fake_uuid}) assert to_index == {fake_uuid} # test @@links functionality source_links_res = testapp.get('/' + source['uuid'] + '/@@links', status=200) linking_uuids = source_links_res.json.get('uuids_linking_to') assert linking_uuids and len(linking_uuids) == 1 assert linking_uuids[0]['uuid'] == target['uuid'] # rev_link from target # test check_es_and_cache_linked_sids and validate_es_content # must get the context object through request traversal dummy_request.datastore = 'database' assert dummy_request._sid_cache == {} source_ctxt = traverse(dummy_request.root, source_res.json['@graph'][0]['@id'])['context'] target_ctxt = traverse(dummy_request.root, target_res.json['@graph'][0]['@id'])['context'] # first check frame=object for target tar_es_res_obj = util.check_es_and_cache_linked_sids(target_ctxt, dummy_request, 'object') assert tar_es_res_obj['uuid'] == target['uuid'] assert set(uuids_linked_obj2) == set(dummy_request._sid_cache) # frame=embedded for source src_es_res_emb = util.check_es_and_cache_linked_sids(source_ctxt, dummy_request, 'embedded') assert src_es_res_emb['uuid'] == source['uuid'] assert set(uuids_linked_emb) == set(dummy_request._sid_cache) # make everything in _sid_cache is present and up to date for rid in dummy_request._sid_cache: found_sid = dummy_request.registry[STORAGE].write.get_by_uuid(rid).sid assert dummy_request._sid_cache.get(rid) == found_sid # test validate_es_content with the correct sids and then an incorrect one valid = util.validate_es_content(source_ctxt, dummy_request, src_es_res_emb, 'embedded') assert valid is True # lastly, test purge_uuid and delete functionality with pytest.raises(webtest.AppError) as excinfo: del_res0 = testapp.delete_json('/' + source['uuid'] + '/?purge=True') assert 'Item status must equal deleted before purging' in str(excinfo.value) del_res1 = testapp.delete_json('/' + source['uuid']) assert del_res1.json['status'] == 'success' # this item will still have items linking to it indexing occurs with pytest.raises(webtest.AppError) as excinfo: del_res2 = testapp.delete_json('/' + source['uuid'] + '/?purge=True') assert 'Cannot purge item as other items still link to it' in str(excinfo.value) # the source should fail due to outdated sids # must manually update _sid_cache on dummy_request for source src_sid = dummy_request.registry[STORAGE].write.get_by_uuid(source['uuid']).sid dummy_request._sid_cache[source['uuid']] = src_sid valid2 = util.validate_es_content(source_ctxt, dummy_request, src_es_res_emb, 'embedded') assert valid2 is False # the target should fail due to outdated rev_links (at least frame=object) # need to get a new the target context again, otherwise get a sqlalchemy error target_ctxt2 = traverse(dummy_request.root, target_res.json['@graph'][0]['@id'])['context'] valid3 = util.validate_es_content(target_ctxt2, dummy_request, tar_es_res_obj, 'object') assert valid3 is False res = indexer_testapp.post_json('/index', {'record': True}) del_res3 = testapp.delete_json('/' + source['uuid'] + '/?purge=True') assert del_res3.json['status'] == 'success' assert del_res3.json['notification'] == 'Permanently deleted ' + source['uuid'] time.sleep(3) # make sure everything has updated on ES check_es_source = es.get(index='testing_link_source_sno', doc_type='testing_link_source_sno', id=source['uuid'], ignore=[404]) assert check_es_source['found'] == False # source uuid removed from the target uuid check_es_target = es.get(index='testing_link_target_sno', doc_type='testing_link_target_sno', id=target['uuid']) uuids_linked_emb2 = [link['uuid'] for link in check_es_target['_source']['linked_uuids_embedded']] assert source['uuid'] not in uuids_linked_emb2 # the source is now purged testapp.get('/' + source['uuid'], status=404) # make sure check_es_and_cache_linked_sids fails for the purged item es_res_emb2 = util.check_es_and_cache_linked_sids(source_ctxt, dummy_request, 'embedded') assert es_res_emb2 is None
def test_resource_collection_resource(): result = traverse(_root, "/rest/schools/%s/students/234" % SCHOOL_ID) context = result['context'] assert(isinstance(context, StudentResource)) assert(isinstance(context.model, Student)) assert(context.model.id == 234)