def run(*args, _check=True, **kwargs) -> int: params = (('--' + key.replace('_', '-'), value) for key, value in kwargs.items()) params = list(filter(None, chain.from_iterable(params))) # type: ignore _args = cli.parser.parse_args([*params, *args]) # type: ignore try: returncode = cli.main(_args) return returncode or 0 except SystemExit as e: return e.code
def __init__(self, modules): self.template_dir = None self.html = True self.http = None self.filter = None self.external_links = None self.overwrite = True self.html_dir = 'docs' self.modules = modules self.link_prefix = '' self.html_no_source = False cli.main(PdocArgs(['acsploit'])) os.rename('docs/acsploit.html', 'docs/index.html') modules = ['exploits', 'input', 'output', 'options'] for module in modules: cli.main(PdocArgs([module])) toc_end = """</ul>\n</li>\n</ul>\n</li>""" # Generate the Sub-module table of contents sub_module_entries = """\n<li><h3><a href="">Sub-modules</a></h3>\n<ul>""" for module in modules: sub_module_entries += '<li><h4><code><a title="{}" href="{}/index.html">{}</a></code></h4></li>\n'.format(module, module, module) sub_module_entries += """</ul>\n</li>"""
if top_module.is_package: # Reference from subfolder if its a package _, url = url.split("/", maxsplit=1) if url not in url_cache: url_cache[url] = len(url_cache) return url_cache[url] index = [] url_cache = {} recursive_add_to_index(top_module) urls = sorted(url_cache.keys(), key=url_cache.__getitem__) # If top module is a package, output the index in its subfolder, else, in the output dir main_path = path.join( cli.args.output_dir, *top_module.name.split(".") if top_module.is_package else "") with cli._open_write_file(path.join(main_path, "index.json")) as f: json.dump({"urls": urls, "index": index}, f) # Generate search.html with cli._open_write_file(path.join(main_path, "search.html")) as f: rendered_template = pdoc._render_template("/search.mako", module=top_module, **template_config) f.write(rendered_template) if __name__ == "__main__": cli._generate_lunr_search = _patched_generate_lunr_search cli.main()
if not (os.path.exists(file_path) and os.path.isfile(file_path)): raise RuntimeError('Not a file: {}'.format(file_path)) file_name = os.path.basename(file_path) shutil.copyfile(file_path, os.path.join(str(target_dir_path), file_name)) def copy_doc_images(): __copy_file_to_dir(file_paths=['./images/pyarmnn.png'], target_dir_path='docs/pyarmnn/images') def archive_docs(path, version): output_filename = f'pyarmnn_docs-{version}.tar' with tarfile.open(output_filename, "w") as tar: tar.add(path) if __name__ == "__main__": with open('./README.md', 'r') as readme_file: top_level_pyarmnn_doc = ''.join(readme_file.readlines()) ann.__doc__ = top_level_pyarmnn_doc main() copy_doc_images() archive_docs('./docs', ann.__version__)
def main(): import json import os.path as path import re from functools import lru_cache import pdoc from pdoc import cli # We don't document stuff on the index of the documentation, but pdoc doesn't know that, # so we have to patch the function that generates the index. def _patched_generate_lunr_search(modules, index_docstrings, template_config): # This will only be called once due to how we generate the documentation, so we can ignore the rest assert len(modules) == 1, "expected only 1 module to be generated, got more" top_module = modules[0] def trim_docstring(docstring): return re.sub( r""" \s+| # whitespace sequences \s+[-=~]{3,}\s+| # title underlines ^[ \t]*[`~]{3,}\w*$| # code blocks \s*[`#*]+\s*| # common markdown chars \s*([^\w\d_>])\1\s*| # sequences of punct of the same kind \s*</?\w*[^>]*>\s* # simple HTML tags """, " ", docstring, flags=re.VERBOSE | re.MULTILINE, ) def recursive_add_to_index(dobj): url = to_url_id(dobj.module) if url != 0: # 0 is index.html # r: ref # u: url # d: doc # f: function info = {"r": dobj.refname, "u": url} if index_docstrings: info["d"] = trim_docstring(dobj.docstring) if isinstance(dobj, pdoc.Function): info["f"] = 1 index.append(info) for member_dobj in getattr(dobj, "doc", {}).values(): if url == 0 and not isinstance(dobj, pdoc.Module): # Don't document anything that is not a submodule in root package continue recursive_add_to_index(member_dobj) @lru_cache() def to_url_id(module): url = module.url() if top_module.is_package: # Reference from subfolder if its a package _, url = url.split("/", maxsplit=1) if url not in url_cache: url_cache[url] = len(url_cache) return url_cache[url] index = [] url_cache = {} recursive_add_to_index(top_module) urls = sorted(url_cache.keys(), key=url_cache.__getitem__) # If top module is a package, output the index in its subfolder, else, in the output dir main_path = path.join(cli.args.output_dir, *top_module.name.split(".") if top_module.is_package else "") with cli._open_write_file(path.join(main_path, "index.json")) as f: json.dump({"index": index, "urls": urls}, f) # Generate search.html with cli._open_write_file(path.join(main_path, "search.html")) as f: rendered_template = pdoc._render_template("/search.mako", module=top_module, **template_config) f.write(rendered_template) cli._generate_lunr_search = _patched_generate_lunr_search cli.main()
def main(): from pdoc import cli cli.main()
keywords="prometeia template project pipeline jenkins", ) if __name__ == '__main__': setup(METADATA) # Don't mess with STDOUT, il would parsed by jenkins to discover package name! old_stdout = sys.stdout sys.stdout = mystdout = six.StringIO() doc_final_path = os.path.join("dist", "doc", METADATA["name"], METADATA["version"]) try: from pdoc.cli import main, parser print("INFO: Generating doc in {}".format(doc_final_path)) params = "-o {} --html --template-dir template_dir {} --force".format( os.path.join("dist", "doc", METADATA["name"]), METADATA["name"]).split() main(parser.parse_args(params)) if os.path.isdir(doc_final_path): shutil.rmtree(doc_final_path) os.rename( os.path.join("dist", "doc", METADATA["name"], METADATA["name"]), doc_final_path) print("INFO: Doc entrypoint is {}".format( os.path.join(doc_final_path, "index.html"))) except ImportError: print("WARNING: Cannot import pdoc3, skipping doc generation") finally: sys.stdout = old_stdout print(mystdout.getvalue(), file=sys.stderr)
if 'bl_description' in class_.__dict__: class_.__doc__ += '\n\nDescription: `{0}`.'.format(class_.bl_description) for key, value in _typing.get_type_hints(class_).items(): # print("type hint: {0} - {1} - {2}".format(repr(class_), repr(key), repr(value))) if not isinstance(value, tuple) or len(value) != 2: continue prop_func, options = value if prop_func is None or 'Property' not in prop_func.__name__: continue if not isinstance(options, dict) or 'name' not in options: continue __pdoc__[class_.__name__ + '.' + key] = "**{0}.*** {1}".format(options['name'], options.get('description', '')) if __name__ == '__main__': import pdoc from pdoc import cli import kawa_scripts print(dir(pdoc)) # print(cli) # context = pdoc.Context() # mod = pdoc.Module('kawa_scripts', context=context) # pdoc.link_inheritance(context) # # def recursive_htmls(mod): # yield mod.name, mod.html() # for submod in mod.submodules(): # yield from recursive_htmls(submod) cli.main(_args=cli.parser.parse_args(['--html', '-f', '-o', 'doc', 'kawa_scripts']))