def refresh_cache(self, path, encoding=None): """Refresh the cache, no matter what, with an optional encoding.""" path = self.absolute(path) encoding = encoding or self.encoding data = read_from(path, encoding=encoding) mtime = os.stat(path).st_mtime self.cache[path] = (mtime, data)
def crumbs(self, path): """ Produce a breadcrumbs list for the given filename. The crumbs are calculated based on the wiki root and the absolute path to the current file. Examples -------- Assuming a wiki root of `/a/b/c`: * `a/b/c/wiki/index.md` => `[('index', None)]` * `a/b/c/wiki/subdir/index.md` => `[('index', '/'), ('subdir', None)]` * `a/b/c/wiki/subdir/file.md` => `[('index', '/'), ('subdir', '/subdir/'), ('file', None)] """ fp = path.strip('/') fs_rel_fn = p.sep.join(fp.split('/')) fs_abs_fn = p.join(self.config.wiki_dir, fs_rel_fn) contents = read_from(fs_abs_fn) slug = p.splitext(p.basename(fs_abs_fn))[0] title = get_title(slug, contents) if p.isabs(path): path = self.doc_cache.relative(path) rel_components = path.split(p.sep) terminus = p.splitext(rel_components.pop())[0] if not rel_components: if terminus == 'index': return [('index', 'Home', None)] return [('index', 'Home', '/'), (terminus, terminus.capitalize(), None)] elif terminus == 'index': terminus = p.splitext(rel_components.pop())[0] crumbs = [('index', 'Home', '/')] for component in rel_components: path = '%s%s/' % (crumbs[-1][2], component) crumbs.append((component, component.capitalize(), path)) crumbs.append((terminus, title, None)) return crumbs
def get(self, path, cache=True, encoding=None): """Retrieve the data for a given path, optionally using the cache.""" path = self.absolute(path) if cache: self.update_to_latest_version(path) return self.cache[path][1] # (mtime, data)[1] if not p.isfile(path): return None if encoding is None: encoding = self.encoding return read_from(path, encoding=encoding)
def listing_context(self, directory): """ Generate the template context for a directory listing. This method accepts a relative path, with the base assumed to be the HTML root. This means listings must be generated after the wiki is built, allowing them to list static media too. Directories should always be '/'-delimited when specified, since it is assumed that they are URL paths, not filesystem paths. For information on what the produced context will look like, consult the `listing` doctest. """ # Ensure the directory name ends with '/'. directory = directory.strip('/') # Resolve to filesystem paths. fs_rel_dir = p.sep.join(directory.split('/')) fs_abs_dir = p.join(self.config.html_dir, fs_rel_dir) skip_files = set([self.config['listing-filename'], 'index.html']) sub_directories, pages, files = [], [], [] for basename in os.listdir(fs_abs_dir): fs_abs_path = p.join(fs_abs_dir, basename) file_dict = { 'basename': basename, 'href': directory + '/' + basename} if not file_dict['href'].startswith('/'): file_dict['href'] = '/' + file_dict['href'] if p.isdir(fs_abs_path): file_dict['href'] += '/' sub_directories.append(file_dict) else: if (basename in skip_files or basename.startswith('.') or basename.startswith('_')): continue file_dict['slug'] = p.splitext(basename)[0] file_dict['size'] = p.getsize(fs_abs_path) file_dict['humansize'] = humansize(file_dict['size']) if p.splitext(basename)[1] == (p.extsep + 'html'): # Get the title from the file. contents = read_from(fs_abs_path) file_dict['title'] = get_title(file_dict['slug'], contents) # Remove .html from the end of the href. file_dict['href'] = p.splitext(file_dict['href'])[0] pages.append(file_dict) else: files.append(file_dict) sub_directories.sort(key=lambda directory: directory['basename']) pages.sort(key=lambda page: page['title']) files.sort(key=lambda file_: file_['basename']) return { 'directory': directory, 'sub_directories': sub_directories, 'pages': pages, 'files': files, 'make_relative': lambda href: make_relative(directory, href), }
def listing_context(self, directory): """ Generate the template context for a directory listing. This method accepts a relative path, with the base assumed to be the HTML root. This means listings must be generated after the wiki is built, allowing them to list static media too. Directories should always be '/'-delimited when specified, since it is assumed that they are URL paths, not filesystem paths. For information on what the produced context will look like, consult the `listing` doctest. """ # Ensure the directory name ends with '/'. directory = directory.strip('/') # Resolve to filesystem paths. fs_rel_dir = p.sep.join(directory.split('/')) fs_abs_dir = p.join(self.config.html_dir, fs_rel_dir) skip_files = set([self.config['listing-filename'], 'index.html']) sub_directories, pages, files = [], [], [] for basename in os.listdir(fs_abs_dir): fs_abs_path = p.join(fs_abs_dir, basename) file_dict = { 'basename': basename, 'href': directory + '/' + basename } if not file_dict['href'].startswith('/'): file_dict['href'] = '/' + file_dict['href'] if p.isdir(fs_abs_path): if directory.startswith('.') or basename.startswith('.'): continue elif directory == 'media' or directory.startswith( 'media') or basename == 'media': continue file_dict['href'] += '/' sub_directories.append(file_dict) else: if (basename in skip_files or basename.startswith('.') or basename.startswith('_')): continue file_dict['slug'] = p.splitext(basename)[0] file_dict['size'] = p.getsize(fs_abs_path) file_dict['humansize'] = humansize(file_dict['size']) if p.splitext(basename)[1] == (p.extsep + 'html'): # Get the title from the file. contents = read_from(fs_abs_path) path = p.join(directory.strip('/'), basename.replace('.html', '.md')) try: file_dict.update(self.meta(path)) except: file_dict['title'] = get_title(file_dict['slug'], contents) # Remove .html from the end of the href. file_dict['href'] = p.splitext(file_dict['href'])[0] pages.append(file_dict) else: files.append(file_dict) sub_directories.sort(key=lambda directory: directory['basename']) pages.sort(key=lambda page: page['date'], reverse=True) files.sort(key=lambda file_: file_['basename']) return { 'directory': directory, 'sub_directories': sub_directories, 'pages': pages, 'files': files, 'make_relative': lambda href: make_relative(directory, href), }
def listing_context(self, directory): """ Generate the template context for a directory listing. This method accepts a relative path, with the base assumed to be the HTML root. This means listings must be generated after the wiki is built, allowing them to list static media too. Directories should always be '/'-delimited when specified, since it is assumed that they are URL paths, not filesystem paths. For information on what the produced context will look like, consult the `listing` doctest. """ # Ensure the directory name ends with '/'. directory = directory.strip("/") # Resolve to filesystem paths. fs_rel_dir = p.sep.join(directory.split("/")) fs_abs_dir = p.join(self.config.html_dir, fs_rel_dir) skip_files = set([self.config["listing-filename"], "index.html"]) sub_directories, pages, files = [], [], [] for basename in os.listdir(fs_abs_dir): fs_abs_path = p.join(fs_abs_dir, basename) file_dict = {"basename": basename, "href": directory + "/" + basename} if not file_dict["href"].startswith("/"): file_dict["href"] = "/" + file_dict["href"] if p.isdir(fs_abs_path): file_dict["href"] += "/" sub_directories.append(file_dict) else: if basename in skip_files or basename.startswith(".") or basename.startswith("_"): continue file_dict["slug"] = p.splitext(basename)[0] file_dict["size"] = p.getsize(fs_abs_path) file_dict["humansize"] = humansize(file_dict["size"]) if p.splitext(basename)[1] == (p.extsep + "html"): # Get the title from the file. contents = read_from(fs_abs_path) file_dict["title"] = get_title(file_dict["slug"], contents) # Remove .html from the end of the href. file_dict["href"] = p.splitext(file_dict["href"])[0] pages.append(file_dict) else: files.append(file_dict) sub_directories.sort(key=lambda directory: directory["basename"]) pages.sort(key=lambda page: page["title"]) files.sort(key=lambda file_: file_["basename"]) return { "directory": directory, "sub_directories": sub_directories, "pages": pages, "files": files, "make_relative": lambda href: make_relative(directory, href), }