def count(node): from pyramid.traversal import resource_path if IWikiPage.providedBy(node): stats['wiki_pages'] += 1 elif IBlogEntry.providedBy(node): stats['blog_entries'] += 1 elif IComment.providedBy(node): stats['comments'] += 1 elif ICommunityFile.providedBy(node): stats['files'] += 1 elif ICalendarEvent.providedBy(node): stats['calendar_events'] += 1 created = getattr(node, 'created', None) if created is not None and now - created < THIRTY_DAYS: creator = getattr(node, 'creator', None) if creator is not None: if creator not in active_users: active_users[creator] = 1 else: active_users[creator] += 1 if hasattr(node, '__getitem__') and hasattr(node, 'values'): for child in node.values(): count(child) if hasattr(node, '_p_deactivate'): node._p_deactivate()
def count(node): from repoze.bfg.traversal import model_path if IWikiPage.providedBy(node): stats['wiki_pages'] += 1 elif IBlogEntry.providedBy(node): stats['blog_entries'] += 1 elif IComment.providedBy(node): stats['comments'] += 1 elif ICommunityFile.providedBy(node): stats['files'] += 1 elif ICalendarEvent.providedBy(node): stats['calendar_events'] += 1 created = getattr(node, 'created', None) if created is not None and now - created < THIRTY_DAYS: creator = getattr(node, 'creator', None) if creator is not None: if creator not in active_users: active_users[creator] = 1 else: active_users[creator] += 1 if hasattr(node, '__getitem__') and hasattr(node, 'values'): for child in node.values(): count(child) if hasattr(node, '_p_deactivate'): node._p_deactivate()
def show_history(context, request, tz=None): repo = find_repo(context) profiles = find_profiles(context) # downloading files using ajax shows information bar in IE # We need to disable that if context is a file use_ajax = True preview_view = 'preview.html' if ICommunityFile.providedBy(context): use_ajax = False preview_view = 'download_preview' def display_record(record): editor = profiles[record.user] return { 'date': format_local_date(record.archive_time, tz), 'editor': { 'name': editor.title, 'url': resource_url(editor, request), }, 'preview_url': resource_url(context, request, preview_view, query={'version_num': str(record.version_num)}), 'restore_url': resource_url(context, request, 'revert', query={'version_num': str(record.version_num)}), 'is_current': record.current_version == record.version_num, } # newest to oldest history = map(display_record, repo.history(context.docid)) page_title = 'History for %s' % context.title backto = {'href': resource_url(context, request), 'title': context.title} return { 'api': TemplateAPI(context, request, page_title), 'history': history, 'use_ajax': use_ajax, 'backto': backto, 'lock_info': lock_info_for_view(context, request), }
def visit(self, context): if ICommunity.providedBy(context): self.community = context self.row = { 'community': context.title, 'id': context.__name__, 'is_private': is_private(context), 'members': len(context.member_names), 'moderators': len(context.moderator_names), 'last_activity': context.content_modified, 'create_date': context.created, 'wiki_pages': 0, 'blog_entries': 0, 'blog_comments': 0, 'files': 0, 'calendar_events': 0, 'community_tags': set(), 'hits_this_month': 'Unknown', 'percent_engaged': 'Unknown' } elif self.community is None: return else: last_activity = getattr(context, 'content_modified', None) if (last_activity is not None and last_activity > self.row['last_activity']): self.row['last_activity'] = last_activity if IWikiPage.providedBy(context): self.row['wiki_pages'] += 1 elif IBlogEntry.providedBy(context): self.row['blog_entries'] += 1 elif IComment.providedBy(context): self.row['blog_comments'] += 1 elif ICommunityFile.providedBy(context): self.row['files'] += 1 elif ICalendarEvent.providedBy(context): self.row['calendar_events'] += 1 tags = find_tags(context) docid = getattr(context, 'docid', None) if docid is not None: for tag in tags.getTags([docid,]): self.row['community_tags'].add(tag)
def show_history(context, request, tz=None): repo = find_repo(context) profiles = find_profiles(context) # downloading files using ajax shows information bar in IE # We need to disable that if context is a file use_ajax = True preview_view = 'preview.html' if ICommunityFile.providedBy(context): use_ajax = False preview_view = 'download_preview' def display_record(record): editor = profiles[record.user] return { 'date': format_local_date(record.archive_time, tz), 'editor': { 'name': editor.title, 'url': resource_url(editor, request), }, 'preview_url': resource_url( context, request, preview_view, query={'version_num': str(record.version_num)}), 'restore_url': resource_url( context, request, 'revert', query={'version_num': str(record.version_num)}), 'is_current': record.current_version == record.version_num, } # newest to oldest history = map(display_record, repo.history(context.docid)) page_title = 'History for %s' % context.title backto = { 'href': resource_url(context, request), 'title': context.title } return { 'api': TemplateAPI(context, request, page_title), 'history': history, 'use_ajax': use_ajax, 'backto': backto, 'lock_info': lock_info_for_view(context, request), }
def _get_viewall(context, request, api): """Get the nested data used by ZPT for showing the refman TOC""" # First, be a chicken and sync context.ordering.sync(context.keys()) # Iterate over each section using the ordering for the order of # __name__'s sections = [] for section_name in context.ordering.items(): # Get the data about this section section = context.get(section_name) section.ordering.sync(section.keys()) item = { 'name': section_name, 'title': section.title, 'html': '<p>%s</p>' % section.description, 'items': [], } # Now append data about each section's items, again using the # ordering for subitem_name in section.ordering.items(): subitem = section.get(subitem_name) # If this is a page, we generate one chunk of HTML, if # File, a different if IPage.providedBy(subitem): html = subitem.text elif ICommunityFile.providedBy(subitem): fileinfo = getMultiAdapter((subitem, request), IFileInfo) html = render_template( 'templates/inline_file.pt', api=api, fileinfo=fileinfo, ) else: html = '<p>Unknown type</p>' item['items'].append({ 'name': subitem_name, 'title': subitem.title, 'html': html, }) sections.append(item) return sections
def export_blobs(context, parentid, xml): blobs = [x for x in context.values() if ICommunityFile.providedBy(x)] if not blobs: return xml.startElement('attachments', {}) community = find_community(context) dirpath = get_community_path(community) attachmentpath = os.path.join(dirpath, 'attachments') parentpath = os.path.join(attachmentpath, parentid) ensure_dir(parentpath) for blob in blobs: export_file(blob, parentpath) simple_element('attachment', blob.__name__, xml) xml.endElement('attachments')
def vocabulary_view(context, request): try: attributes = json.loads(request.params.get('attributes', '["title", "id"]')) except: attributes = ['title', 'id'] if 'UID' in attributes: # always put in anyways attributes.remove('UID') try: batch = json.loads(request.params.get('batch')) except: batch = DEFAULT_BATCH query = normalize_query(json.loads(request.params['query'])) criteria = parse_query(query) resolver = ResovlerFactory(context) if 'UID' in query: docids = query['UID'] if type(docids) not in (list, tuple): docids = [docids] # convert to ints new_docids = [] for docid in docids: try: new_docids.append(int(docid)) except: pass docids = new_docids numdocs = len(docids) else: criteria.append(Any('allowed', effective_principals(request))) if 'title' not in query: # we default to requiring a title in these results or # else we get a bunch of junky results criteria.append(NotEq('title', '')) catalog = find_catalog(context) numdocs, docids = catalog.query(And(*criteria)) if batch and ('size' not in batch or 'page' not in batch): batch = DEFAULT_BATCH if batch: # must be slicable for batching support page = int(batch['page']) # page is being passed in is 1-based start = (max(page - 1, 0)) * int(batch['size']) end = start + int(batch['size']) # Try __getitem__-based slice, then iterator slice. # The iterator slice has to consume the iterator through # to the desired slice, but that shouldn't be the end # of the world because at some point the user will hopefully # give up scrolling and search instead. try: docids = docids[start:end] except TypeError: docids = itertools.islice(docids, start, end) # build result items items = [] for docid in docids: result = resolver(docid) if result is None: continue data = { 'UID': docid } for attribute in attributes: attr = attribute if attribute in _attribute_mapping: attr = _attribute_mapping[attribute] if attr in ('Type', 'portal_type'): value = 'Page' if IImage.providedBy(result): value = 'Image' elif ICommunityFile.providedBy(result): value = 'File' elif IFolder.providedBy(result): value = 'Folder' elif attr == 'getURL': value = resource_url(result, request) elif attr == 'path': # a bit weird here... value = resource_path(result, request).split('/GET')[0] else: value = getattr(result, attr, None) data[attribute] = value items.append(data) return { 'results': items, 'total': numdocs }