def list_threads(): """List all available threads in most efficient way.""" def order(item): """Order Threads by latest comment or start time.""" thread = item[1] timestamp = thread['timestamp'] return thread.get('last_comment', {}).get('timestamp') or timestamp # Read Threads from Links and Content databases with content.pipeline() as pipe: uids = [] for thread_uid in links.lrange(build_key(THREADS_KEY), 0, -1): pipe.hgetall(build_key(THREAD_KEY, thread_uid)) uids.append(thread_uid) threads = dict(zip(uids, pipe.execute())) # Make another multi request for threads' counters and last comments where # possible comments_request = OrderedDict() for thread_uid, thread in iteritems(threads): last_comment_uid = thread.get('last_comment_uid') if not last_comment_uid: continue comments_request[thread_uid] = thread['last_comment_uid'] # We assume that last comment and comments counter available only for # threads with comments if comments_request: with links.pipeline() as pipe: for thread_uid in iterkeys(comments_request): pipe.get(build_key(THREAD_COUNTER_KEY, thread_uid)) response = zip(iterkeys(comments_request), pipe.execute()) for thread_uid, counter in response: threads[thread_uid]['comments_counter'] = counter with content.pipeline() as pipe: for thread_uid, comment_uid in iteritems(comments_request): key = build_key(COMMENT_KEY, thread_uid, comment_uid) pipe.hgetall(key) response = zip(iterkeys(comments_request), pipe.execute()) for thread_uid, comment in response: threads[thread_uid]['last_comment'] = comment return OrderedDict(sorted(iteritems(threads), key=order, reverse=True))
def __build_export(self, stats): """Build the export lists.""" export_names = [] export_values = [] if isinstance(stats, dict): # Stats is a dict # Is there a key ? if 'key' in iterkeys(stats) and stats['key'] in iterkeys(stats): pre_key = '{}.'.format(stats[stats['key']]) else: pre_key = '' # Walk through the dict for key, value in iteritems(stats): if isinstance(value, bool): value = json.dumps(value) if isinstance(value, list): try: value = value[0] except IndexError: value = '' if isinstance(value, dict): item_names, item_values = self.__build_export(value) item_names = [pre_key + key.lower() + str(i) for i in item_names] export_names += item_names export_values += item_values else: export_names.append(pre_key + key.lower()) export_values.append(value) elif isinstance(stats, list): # Stats is a list (of dict) # Recursive loop through the list for item in stats: item_names, item_values = self.__build_export(item) export_names += item_names export_values += item_values
def match_files(self, files, separators=None): """ Matches the files to this path-spec. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). """ if isinstance(files, (bytes, unicode)): raise TypeError("files:{!r} is not an iterable.".format(files)) file_map = util.normalize_files(files, separators=separators) matched_files = util.match_files(self.patterns, iterkeys(file_map)) for path in matched_files: yield file_map[path]
def keys(self): return iterkeys(super(TimerDict, self))