def handle_listing(listing, node): if not listing: return nodes_successfully_listed.add(node.id) for folder, items in listing['items'].iteritems(): for item in items: relative_parent_path = '/'.join(item['path'].split('/')[:-1]) item_path = '/'.join(x for x in [path.decode('utf-8'), item['path']] if x) href = '%s/%s' % (base_url, urllib.quote(item['path'].encode('utf-8'))) rel = 'file' if 'stream_depth' in level_setup and level_setup['stream_depth'] < path_depth(relative_parent_path) and item['type'] == 'file': continue if 'stream_depth' not in level_setup or level_setup['stream_depth'] >= path_depth(item['path']) or self.extra_folder and self.extra_folder.match(item['name']): rel = 'folder' else: href += '?node=%s' % node.id item_data = self._create_item(rel, href, item['modified'], item['name'], item) for f in self.get_services('listing_result_item'): yield defer.maybeDeferred(f, self, user, item_path, item_data) path_pointers[relative_parent_path].append(item_data) if rel == 'folder': folder_listing_skiplist[item_path].add(node.id) item_data = self._create_item('folder', base_url, listing['modified'], listing['name'], listing) for f in self.get_services('listing_result_item'): yield defer.maybeDeferred(f, self, user, '', item_data) root.append(item_data)
def list(self, user, path, config): level = path_depth(path) if level not in self.levels: defer.returnValue(ErrorPage(http.BAD_REQUEST, 'Unknown level', 'The path you have requested is at a depth that this section does not support')) level_setup = self.get_level(level) path_pointers = defaultdict(list) root = [] base_url = '/'.join(x for x in [ urljoin(settings.BASE_URL, '/section'), urllib.quote(self.id), urllib.quote(path)] if x) if 'filter_paths' in config: config['filter_paths'] = [('/'.join(y.split('/')[:-1]), y.split('/')[-1]) for y in config['filter_paths']] folder_listing_skiplist = defaultdict(set) nodes_successfully_listed = set() @defer.inlineCallbacks def handle_listing(listing, node): if not listing: return nodes_successfully_listed.add(node.id) for folder, items in listing['items'].iteritems(): for item in items: relative_parent_path = '/'.join(item['path'].split('/')[:-1]) item_path = '/'.join(x for x in [path.decode('utf-8'), item['path']] if x) href = '%s/%s' % (base_url, urllib.quote(item['path'].encode('utf-8'))) rel = 'file' if 'stream_depth' in level_setup and level_setup['stream_depth'] < path_depth(relative_parent_path) and item['type'] == 'file': continue if 'stream_depth' not in level_setup or level_setup['stream_depth'] >= path_depth(item['path']) or self.extra_folder and self.extra_folder.match(item['name']): rel = 'folder' else: href += '?node=%s' % node.id item_data = self._create_item(rel, href, item['modified'], item['name'], item) for f in self.get_services('listing_result_item'): yield defer.maybeDeferred(f, self, user, item_path, item_data) path_pointers[relative_parent_path].append(item_data) if rel == 'folder': folder_listing_skiplist[item_path].add(node.id) item_data = self._create_item('folder', base_url, listing['modified'], listing['name'], listing) for f in self.get_services('listing_result_item'): yield defer.maybeDeferred(f, self, user, '', item_data) root.append(item_data) nodes = self.nodes # checking if we can skip listing some nodes skipitems_path = path.split(u'/') while True: skipitems_key = u'skipitems:%s' % u'/'.join(skipitems_path) if skipitems_key in cache and path in cache[skipitems_key]: nodes = [node for node in self.nodes if node.id not in cache[skipitems_key][path]] break if not skipitems_path: break skipitems_path.pop() d = [] for node in nodes: d.append(defer.maybeDeferred(node.list, path).addCallback(handle_listing, node)) yield defer.DeferredList(d) if not root: defer.returnValue(NoResource('Unknown path')) for f in self.get_services('listing_result'): yield defer.maybeDeferred(f, self, user, path, path_pointers) # the first listing is fast for scanning and adding new elements # # the final listing is good for browsing around and solves some short-comings # that the other listing suffers from root = self.merge_items(root) root['result'] = [] root['overwrite_title'] = self.levels.get(level, {}).get('overwrite_title', True) if level == 0: root['name'] = self.id item_pointer = {'': root} for item_parent_path in sorted(path_pointers.keys()): items = path_pointers[item_parent_path] for item in items: if 'filter_paths' in config and (item_parent_path, item['name']) not in config['filter_paths']: continue if item['rel'] == 'file': if 'result' not in item_pointer[item_parent_path]: item_pointer[item_parent_path]['result'] = [] item_pointer[item_parent_path]['overwrite_title'] = root['overwrite_title'] item_pointer[item_parent_path]['result'].append(item) elif item['rel'] == 'folder': if item_parent_path: item_path = '%s/%s' % (item_parent_path, item['name']) else: item_path = item['name'] if item_path in item_pointer: item_pointer[item_path] = self.merge_items([item_pointer[item_path], item]) else: item_pointer[item_path] = item # remove empty itamz for item_path in sorted(item_pointer.keys())[1:]: item = item_pointer[item_path] parent_item_path = '/'.join(item_path.split('/')[:-1]) parent_item = item_pointer[parent_item_path] if 'result' not in parent_item: parent_item['result'] = [] parent_item['result'].append(item) for items in item_pointer.values(): if 'result' not in items: continue self.filter(items['result']) self.sort(items['result'], config.get('order_by', 'name')) self.clean_items(items['result']) del root['_original_item'] if 'content_type' in level_setup: root['content_type'] = level_setup['content_type'] # create skiplist for subfolder listings if len(nodes_successfully_listed) > 1: final_folder_listing_skiplist = {} for item_path, nodes in folder_listing_skiplist.iteritems(): nodes = nodes_successfully_listed - nodes if not nodes: continue final_folder_listing_skiplist[item_path] = nodes if final_folder_listing_skiplist: cache.set(u'skipitems:%s' % path, final_folder_listing_skiplist, timedelta(hours=2)) defer.returnValue(json.dumps(root))