def _get_indexes(context) -> list: indexes = [] system = find_catalog(context, 'system') or {} indexes.extend(system.values()) adhocracy = find_catalog(context, 'adhocracy') or {} indexes.extend(adhocracy.values()) return indexes
def ctest(context, request): demo_catalog = find_catalog(context, 'sdidemo') system_catalog = find_catalog(context, 'system') q = ( demo_catalog['title'].eq('fred') & system_catalog['content_type'].eq('Document') ) resultset = q.execute().all() request.response.content_type = 'text/plain' request.response.body = str(list(resultset)) return request.response
def comments(self): context = self.context system_catalog = find_catalog(context, 'system') blog_catalog = find_catalog(context, 'blog') content_type = system_catalog['content_type'] path = system_catalog['path'] comments_path = self.request.resource_path(context['comments']) query = content_type.eq('Comment') & path.eq(comments_path) query_result = query.execute().sort(blog_catalog['pubdate']) return query_result
def show_blog(self): blog = self.context request = self.request system_catalog = find_catalog(blog, 'system') path = system_catalog['path'] allowed = system_catalog['allowed'] q = (path.eq(blog, depth=1, include_origin=False) & allowed.allows(request, 'view')) limit = self.posts_per_page year = request.params.get('year') month = request.params.get('month') if year and month: year, month = int(year), int(month) limit = None start = datetime.datetime(year, month, 1, 0, 0, tzinfo=UTC) if month == 12: end = datetime.datetime(year + 1, 1, 1, 0, 0, tzinfo=UTC) else: end = datetime.datetime(year, month + 1, 1, 0, 0, tzinfo=UTC) catalog = find_catalog(blog, 'navel') pub_date = catalog['pub_date'] q &= query.InRange(pub_date, start, end) results = q.execute() results = pub_date_sorter(blog, results, reverse=True) if limit and len(results) > limit: offset = int(request.params.get('offset', 0)) last = offset + limit ids = itertools.islice(results.ids, offset, last) pager = [{ 'title': 'Older', 'url': request.resource_url(blog, query={'offset': last}), 'disabled': " disabled" if last >= len(results) else "" }, { 'title': 'Newer', 'url': request.resource_url(blog, query={'offset': offset - limit}), 'disabled': " disabled" if offset <= 0 else "" }] else: pager = None ids = results.ids objectmap = find_objectmap(blog) entries = map(self.get_info, map(objectmap.object_for, ids)) return {'entries': entries, 'pager': pager}
def test_create_root_with_initial_content(self, registry): from adhocracy_core.resources.root import IRootPool from adhocracy_core.utils import find_graph from substanced.util import find_objectmap from substanced.util import find_catalog from substanced.util import find_service inst = registry.content.create(IRootPool.__identifier__) assert IRootPool.providedBy(inst) assert find_objectmap(inst) is not None assert find_graph(inst) is not None assert find_graph(inst)._objectmap is not None assert find_catalog(inst, 'system') is not None assert find_catalog(inst, 'adhocracy') is not None assert find_service(inst, 'principals', 'users') is not None assert find_service(inst, 'locations') is not None
def show_blog(self): blog = self.context request = self.request system_catalog = find_catalog(blog, 'system') path = system_catalog['path'] allowed = system_catalog['allowed'] q = (path.eq(blog, depth=1, include_origin=False) & allowed.allows(request, 'view') ) limit = self.posts_per_page year = request.params.get('year') month = request.params.get('month') if year and month: year, month = int(year), int(month) limit = None start = datetime.datetime(year, month, 1, 0, 0, tzinfo=UTC) if month == 12: end = datetime.datetime(year + 1, 1, 1, 0, 0, tzinfo=UTC) else: end = datetime.datetime(year, month + 1, 1, 0, 0, tzinfo=UTC) catalog = find_catalog(blog, 'navel') pub_date = catalog['pub_date'] q &= query.InRange(pub_date, start, end) results = q.execute() results = pub_date_sorter(blog, results, reverse=True) if limit and len(results) > limit: offset = int(request.params.get('offset', 0)) last = offset + limit ids = itertools.islice(results.ids, offset, last) pager = [ {'title': 'Older', 'url': request.resource_url(blog, query={'offset': last}), 'disabled': " disabled" if last >= len(results) else ""}, {'title': 'Newer', 'url': request.resource_url(blog, query={'offset': offset - limit}), 'disabled': " disabled" if offset <= 0 else ""}] else: pager = None ids = results.ids objectmap = find_objectmap(blog) entries = map(self.get_info, map(objectmap.object_for, ids)) return { 'entries': entries, 'pager': pager}
def response(self): page = int(self.request.params.get('page') or 0) description_only = self.request.params.get('description_only', False) catalog = find_catalog(self.context, 'system') content_type = catalog['content_type'] query = content_type.eq(self.context.target_content_type) if self.context.path is not None: path = catalog['path'] query &= path.eq(self.context.path, include_origin=False) resultset = [i for i in query.execute()] resultset.sort( key=attrgetter(self.context.sort_field), reverse=self.context.sort_inverse) if description_only: resultset = [ {'title': e.title, 'name': e.name, 'description': e.short_description} for e in resultset] page_to_show = slice(None) else: resultset = [str(render_view(e, self.request)) for e in resultset] page_to_show = slice(page, page + self.context.total_results or None) return { 'title': self.context.title, 'text': self.context.text, 'items': resultset[page_to_show], 'page': page, 'pages': len(resultset), }
def evolve1_add_ititle_sheet_to_proposals(root): # pragma: no cover """Migrate title value from ole IIntroduction sheet to ITitle sheet.""" registry = get_current_registry() catalog = find_catalog(root, 'system') path = catalog['path'] interfaces = catalog['interfaces'] query = path.eq('/mercator') \ & interfaces.eq(IMercatorProposalVersion) \ & interfaces.noteq(ITitle) proposals = query.execute() catalogs = find_service(root, 'catalogs') for proposal in proposals: logger.info('updating {0}'.format(proposal)) introduction = get_sheet_field(proposal, IMercatorSubResources, 'introduction') if introduction == '' or introduction is None: continue alsoProvides(proposal, ITitle) catalogs.reindex_index(proposal, 'interfaces') sheet = registry.content.get_sheet(introduction, IIntroduction) if 'title' not in sheet.get().keys(): continue value = sheet.get()['title'] title = registry.content.get_sheet(proposal, ITitle) title.set({'title': value}) sheet.delete_field_values(['title'])
def evolve1_add_ititle_sheet_to_proposals(root, registry): # pragma: no cover """Migrate title value from ole IIntroduction sheet to ITitle sheet.""" catalog = find_catalog(root, 'system') path = catalog['path'] interfaces = catalog['interfaces'] query = path.eq('/mercator') \ & interfaces.eq(IMercatorProposalVersion) \ & interfaces.noteq(ITitle) proposals = query.execute() catalogs = find_service(root, 'catalogs') for proposal in proposals: logger.info('updating {0}'.format(proposal)) introduction = registry.content.get_sheet_field(proposal, IMercatorSubResources, 'introduction') if introduction == '' or introduction is None: continue alsoProvides(proposal, ITitle) catalogs.reindex_index(proposal, 'interfaces') sheet = registry.content.get_sheet(introduction, IIntroduction) if 'title' not in sheet.get().keys(): continue value = sheet.get()['title'] title = registry.content.get_sheet(proposal, ITitle) title.set({'title': value}) sheet.delete_field_values(['title'])
def list_resource_with_descendants(resource: IResource) -> Iterable: """List all descendants of a resource, including the resource itself.""" system_catalog = find_catalog(resource, 'system') if system_catalog is None: # ease testing return [] path_index = system_catalog['path'] query = path_index.eq(resource_path(resource), include_origin=True) return query.execute()
def has_services(context, request): catalog = find_catalog(context, 'system') ifaces = catalog['interfaces'] path = catalog['path'] q = (path.eq(context, depth=1, include_origin=False) & ifaces.any([IService])) result = bool(len(q.execute())) return result
def _ensure_rate_is_unique(self, node, value, request): # Other rates with the same subject and object may occur below the # current context (earlier versions of the same rate item). # If they occur elsewhere, an error is thrown. adhocracy_catalog = find_catalog(request.context, 'adhocracy') index = adhocracy_catalog['reference'] reference_subject = Reference(None, IRate, 'subject', value['subject']) query = index.eq(reference_subject) reference_object = Reference(None, IRate, 'object', value['object']) query &= index.eq(reference_object) system_catalog = find_catalog(request.context, 'system') path_index = system_catalog['path'] query &= path_index.noteq(resource_path(request.context), depth=None) elements = query.execute(resolver=None) if elements: err = colander.Invalid(node) err['object'] = 'Another rate by the same user already exists' raise err
def has_services(context, request): catalog = find_catalog(context, 'system') ifaces = catalog['interfaces'] path = catalog['path'] q = ( path.eq(context, depth=1, include_origin=False) & ifaces.any([IService]) ) result = bool(len(q.execute())) return result
def _get_feed_info(self): context = self.context request = self.request feed = { "rss_url": request.application_url + "/rss.xml", "atom_url": request.application_url + "/index.atom", "blog_url": request.application_url, "title": context.sdi_title, "description": context.description } def _add_updated_strings(updated, info): if getattr(updated, 'now', None) is None: y, mo, d, h, mi, s = updated.timetuple()[:6] updated = datetime.datetime(y, mo, d, h, mi, s, tzinfo=pytz.utc) info['updated_atom'] = updated.isoformat() info['updated_rss'] = updated.strftime('%a, %d %b %Y %H:%M:%S %z') system_catalog = find_catalog(context, 'system') blog_catalog = find_catalog(context, 'blogentry') content_type = system_catalog['content_type'] query = content_type.eq('Blog Entry') result = query.execute().sort(blog_catalog['pubdate'], reverse=True) blogentries = [] for blogentry in result: if request.registry.content.istype(blogentry, 'Blog Entry'): updated = blogentry.pubdate info = {'url': resource_url(blogentry, request), 'title': blogentry.title, 'body': _getentrybody(blogentry.format, blogentry.entry), 'created': updated, 'pubdate': updated, } _add_updated_strings(updated, info) blogentries.append((updated, info)) blogentries.sort(key=lambda x: x[0].isoformat()) blogentries = [entry[1] for entry in reversed(blogentries)][:15] updated = blogentries and blogentries[0]['pubdate'] or self._nowtz() _add_updated_strings(updated, feed) return feed, blogentries
def blogview(context, request): system_catalog = find_catalog(context, 'system') blog_catalog = find_catalog(context, 'blog') content_type = system_catalog['content_type'] query = content_type.eq('Blog Entry') query_result = query.execute().sort(blog_catalog['pubdate'], reverse=True) blogentries = [] for blogentry in query_result: blogentries.append( {'url': resource_url(blogentry, request), 'title': blogentry.title, 'body': _getentrybody(blogentry.format, blogentry.entry), 'pubdate': blogentry.pubdate, 'attachments': [ {'name': a.__name__, 'url': resource_url(a, request, 'download')} for a in blogentry['attachments'].values()], 'numcomments': len(blogentry['comments'].values()), }) return dict(blogentries = blogentries)
def blogentry_search(context, request): search_text = request.GET.get('q') if search_text: catalog = find_catalog(context, 'blogentry') entry = catalog['titleentry'] q = entry.contains(search_text) matched = q.execute().sort(catalog['pubdate'], reverse=True) else: matched = [] return {'searchtext': search_text, 'matchedentries': matched}
def blogview(context, request): system_catalog = find_catalog(context, 'system') blog_catalog = find_catalog(context, 'blogentry') content_type = system_catalog['content_type'] query = content_type.eq('Blog Entry') blogentries = [] result = query.execute().sort(blog_catalog['pubdate'], reverse=True) for blogentry in itertools.islice(result, 10): blogentries.append({ 'url': resource_url(blogentry, request), 'title': blogentry.title, 'body': _getentrybody(blogentry.format, blogentry.entry), 'pubdate': blogentry.pubdate, 'attachments': [{'name': a.__name__, 'url': resource_url(a, request, 'download')} for a in blogentry['attachments'].values()], 'numcomments': len(blogentry['comments'].values()), 'tags': [{'name': tag.name, 'url': resource_url(tag, request)} for tag in blogentry.tags] }) blogentries.sort(key=lambda x: x['pubdate'].isoformat()) blogentries.reverse() return dict(blogentries=blogentries)
def blog_toc(blog, request): current_url = request.url def info(yearmonth): year, month = yearmonth dt = datetime.date(year, month, 1) url = request.resource_url(blog, query={'year': year, 'month': month}) active = current_url == url return { 'title': dt.strftime("%B %Y"), 'class': 'active' if active else '', 'url': url} catalog = find_catalog(blog, 'navel') months = sorted(set([(dt.year, dt.month) for dt in catalog['pub_date']._fwd_index.keys()]), reverse=True) return {'contents': map(info, months)}
def _find_index_if_arbitrary_filter_node(name: str, context: IResource) -> SDIndex: """ Find the referenced index if `name' refers to an arbitrary catalog index. Throws an exception otherwise. If there are no catalogs, `None` is returned to facilitate testing. """ catalog = find_catalog(context, 'adhocracy') if not catalog: return None if name in catalog and not name.startswith('private_'): return catalog[name] else: raise_colander_style_error(None, name, 'No such catalog')
def get(self): """Return a list of instances available to the current user """ catalog = find_catalog(self.request.context, 'system') content_type = catalog['content_type'] allowed = catalog['allowed'] name = catalog['name'] q = content_type.eq('JIRA Instance') & allowed.allows( self.request, 'sdi.view') results = q.execute() results.sort(name) return [self.instance_json(instance) for instance in results]
def blog_toc(blog, request): current_url = request.url def info(yearmonth): year, month = yearmonth dt = datetime.date(year, month, 1) url = request.resource_url(blog, query={'year': year, 'month': month}) active = current_url == url return { 'title': dt.strftime("%B %Y"), 'class': 'active' if active else '', 'url': url } catalog = find_catalog(blog, 'navel') months = sorted(set([(dt.year, dt.month) for dt in catalog['pub_date']._fwd_index.keys()]), reverse=True) return {'contents': map(info, months)}
def find_instance(context, request, instance_id, permission='sdi.view'): """Use a catalog search to find the instance with the given id """ catalog = find_catalog(context, 'system') content_type = catalog['content_type'] allowed = catalog['allowed'] name = catalog['name'] q = (content_type.eq('JIRA Instance') & allowed.allows(request, permission) & name.eq(instance_id)) results = q.execute() if len(results) == 0: raise HTTPNotFound(instance_id) if len(results) > 1: raise HTTPInternalServerError("More than one record matches %s" % instance_id) return results.one()
def evolve1_add_ititle_sheet_to_proposals(root): # pragma: no cover """Migrate title value from ole IIntroduction sheet to ITitle sheet.""" registry = get_current_registry() catalog = find_catalog(root, 'system') path = catalog['path'] interfaces = catalog['interfaces'] query = path.eq('/mercator') \ & interfaces.eq(IMercatorProposalVersion) \ & interfaces.noteq(ITitle) proposals = query.execute() for proposal in proposals: logger.info('updating {0}'.format(proposal)) introduction = get_sheet_field(proposal, IMercatorSubResources, 'introduction') if introduction == '' or introduction is None: continue alsoProvides(proposal, ITitle) if 'title' not in introduction._sheets[IIntroduction.__identifier__]: continue value = introduction._sheets[IIntroduction.__identifier__]['title'] title = registry.content.get_sheet(proposal, ITitle) title.set({'title': value}) del introduction._sheets[IIntroduction.__identifier__]['title']
def system_catalog(self): return find_catalog(self.context, 'system')
def get_all_documents(context, request): catalog = find_catalog(context, 'system') interfaces = catalog['interfaces'] docs = interfaces.eq(IDemoContent).execute().all() return map(lambda x: (get_oid(x), x.name), docs)
def find_index(context, catalog_name, index_name): catalog = find_catalog(context, catalog_name) return catalog[index_name]
def forms(self): catalog = find_catalog(self, 'system') content_type = catalog['content_type'] path = catalog['path'] q = content_type.eq('Form') & path.eq(self.path) return q.execute()
def pub_date_sorter(resource, resultset, limit=None, reverse=False): catalog = find_catalog(resource, 'navel') index = catalog['pub_date'] resultset = resultset.sort(index, limit=limit, reverse=reverse) return resultset