def _write(self, write_func, directory, filename, *args): # Display our progress. self._files_written += 1 log.progress(self._files_written / self._num_files, filename) path = os.path.join(directory, filename) if self._encoding == 'utf-8': f = codecs.open(path, 'w', 'utf-8') write_func(f.write, *args) f.close() else: result = [] write_func(result.append, *args) s = u''.join(result) try: s = s.encode(self._encoding) except UnicodeError: log.error("Output could not be represented with the " "given encoding (%r). Unencodable characters " "will be displayed as '?'. It is recommended " "that you use a different output encoding (utf-8, " "if it's supported by latex on your system)." % self._encoding) s = s.encode(self._encoding, 'replace') f = open(path, 'w') f.write(s) f.close()
def _write(self, write_func, directory, filename, *args): # Display our progress. self._files_written += 1 log.progress(self._files_written/self._num_files, filename) path = os.path.join(directory, filename) if self._encoding == 'utf8': f = codecs.open(path, 'w', 'utf-8') write_func(f.write, *args) f.close() else: result = [] write_func(result.append, *args) s = u''.join(result) try: s = s.encode(self._encoding) except UnicodeError: log.error("Output could not be represented with the " "given encoding (%r). Unencodable characters " "will be displayed as '?'. It is recommended " "that you use a different output encoding (utf8, " "if it's supported by latex on your system).") s = s.encode(self._encoding, 'replace') f = open(path, 'w') f.write(s) f.close()
def _get_docs_from_pyname(name, introspect, parse, progress_estimator, supress_warnings=False): progress_estimator.complete += 1 log.progress(progress_estimator.progress(), name) introspect_doc = parse_doc = None introspect_error = parse_error = None if introspect: try: introspect_doc = introspect_docs(name=name) except ImportError, e: introspect_error = str(e)
def _get_docs_from_pyobject(obj, introspect, parse, progress_estimator): progress_estimator.complete += 1 log.progress(progress_estimator.progress(), `obj`) if not introspect: log.error("Cannot get docs for Python objects without " "introspecting them.") introspect_doc = parse_doc = None introspect_error = parse_error = None try: introspect_doc = introspect_docs(value=obj) except ImportError, e: log.error(e) return (None, None)
def _get_docs_from_module_file(filename, introspect, parse, progress_estimator, parent_docs=(None, None)): """ Construct and return the API documentation for the python module with the given filename. @param parent_doc: The C{ModuleDoc} of the containing package. If C{parent_doc} is not provided, then this method will check if the given filename is contained in a package; and if so, it will construct a stub C{ModuleDoc} for the containing package(s). """ # Record our progress. modulename = os.path.splitext(os.path.split(filename)[1])[0] if modulename == '__init__': modulename = os.path.split(os.path.split(filename)[0])[1] if parent_docs[0]: modulename = DottedName(parent_docs[0].canonical_name, modulename) elif parent_docs[1]: modulename = DottedName(parent_docs[1].canonical_name, modulename) log.progress(progress_estimator.progress(), '%s (%s)' % (modulename, filename)) progress_estimator.complete += 1 # Normalize the filename. filename = os.path.normpath(os.path.abspath(filename)) # When possible, use the source version of the file. try: filename = py_src_filename(filename) src_file_available = True except ValueError: src_file_available = False # Get the introspected & parsed docs (as appropriate) introspect_doc = parse_doc = None introspect_error = parse_error = None if introspect: try: introspect_doc = introspect_docs(filename=filename, context=parent_docs[0]) except ImportError, e: introspect_error = str(e)
def _get_docs_from_module_file(filename, introspect, parse, progress_estimator, parent_docs=(None,None)): """ Construct and return the API documentation for the python module with the given filename. @param parent_doc: The C{ModuleDoc} of the containing package. If C{parent_doc} is not provided, then this method will check if the given filename is contained in a package; and if so, it will construct a stub C{ModuleDoc} for the containing package(s). """ # Record our progress. modulename = os.path.splitext(os.path.split(filename)[1])[0] if modulename == '__init__': modulename = os.path.split(os.path.split(filename)[0])[1] if parent_docs[0]: modulename = DottedName(parent_docs[0].canonical_name, modulename) elif parent_docs[1]: modulename = DottedName(parent_docs[1].canonical_name, modulename) log.progress(progress_estimator.progress(), '%s (%s)' % (modulename, filename)) progress_estimator.complete += 1 # Normalize the filename. filename = os.path.normpath(os.path.abspath(filename)) # When possible, use the source version of the file. try: filename = py_src_filename(filename) src_file_available = True except ValueError: src_file_available = False # Get the introspected & parsed docs (as appropriate) introspect_doc = parse_doc = None introspect_error = parse_error = None if introspect: try: introspect_doc = introspect_docs( filename=filename, context=parent_docs[0]) except ImportError, e: introspect_error = str(e)
def _report_valdoc_progress(i, val_doc, val_docs): if (isinstance(val_doc, (ModuleDoc, ClassDoc)) and val_doc.canonical_name != UNKNOWN and not val_doc.canonical_name[0].startswith('??')): log.progress(float(i)/len(val_docs), val_doc.canonical_name)
def build_doc_index(items, introspect=True, parse=True, add_submodules=True): """ Build API documentation for the given list of items, and return it in the form of a L{DocIndex}. @rtype: L{DocIndex} @param items: The items to document, specified using any of the following: - A string, naming a python package directory (e.g., C{'epydoc/markup'}) - A string, naming a python file (e.g., C{'epydoc/docparser.py'}) - A string, naming a python object (e.g., C{'epydoc.docparser.DocParser'}) - Any (non-string) python object (e.g., C{list.append}) @param introspect: If true, then use introspection to examine the specified items. Otherwise, just use parsing. @param parse: If true, then use parsing to examine the specified items. Otherwise, just use introspection. """ # Get the basic docs for each item. doc_pairs = _get_docs_from_items(items, introspect, parse, add_submodules) # Merge the introspection & parse docs. if parse and introspect: log.start_progress('Merging parsed & introspected information') docs = [] for i, (introspect_doc, parse_doc) in enumerate(doc_pairs): if introspect_doc is not None and parse_doc is not None: if introspect_doc.canonical_name not in (None, UNKNOWN): name = introspect_doc.canonical_name else: name = parse_doc.canonical_name log.progress(float(i)/len(doc_pairs), name) docs.append(merge_docs(introspect_doc, parse_doc)) elif introspect_doc is not None: docs.append(introspect_doc) elif parse_doc is not None: docs.append(parse_doc) log.end_progress() elif introspect: docs = [doc_pair[0] for doc_pair in doc_pairs if doc_pair[0]] else: docs = [doc_pair[1] for doc_pair in doc_pairs if doc_pair[1]] if len(docs) == 0: log.error('Nothing left to document!') return None # Collect the docs into a single index. docindex = DocIndex(docs) # Replace any proxy valuedocs that we got from importing with # their targets. if parse: log.start_progress('Linking imported variables') valdocs = docindex.reachable_valdocs(sort_by_name=True, imports=False, submodules=False, packages=False, subclasses=False) for i, val_doc in enumerate(valdocs): _report_valdoc_progress(i, val_doc, valdocs) link_imports(val_doc, docindex) log.end_progress() # Assign canonical names. log.start_progress('Indexing documentation') for i, val_doc in enumerate(docindex.root): log.progress(float(i)/len(docindex.root), val_doc.canonical_name) assign_canonical_names(val_doc, val_doc.canonical_name, docindex) log.end_progress() # Parse the docstrings for each object. log.start_progress('Parsing docstrings') valdocs = docindex.reachable_valdocs(sort_by_name=True, imports=False, submodules=False, packages=False, subclasses=False) for i, val_doc in enumerate(valdocs): _report_valdoc_progress(i, val_doc, valdocs) # the value's docstring parse_docstring(val_doc, docindex) # the value's variables' docstrings if (isinstance(val_doc, NamespaceDoc) and val_doc.variables not in (None, UNKNOWN)): for var_doc in val_doc.variables.values(): parse_docstring(var_doc, docindex) log.end_progress() # Take care of inheritance. log.start_progress('Inheriting documentation') for i, val_doc in enumerate(valdocs): if isinstance(val_doc, ClassDoc): percent = float(i)/len(valdocs) log.progress(percent, val_doc.canonical_name) inherit_docs(val_doc) log.end_progress() # Initialize the groups & sortedvars attributes. log.start_progress('Sorting & Grouping') for i, val_doc in enumerate(valdocs): if isinstance(val_doc, NamespaceDoc): percent = float(i)/len(valdocs) log.progress(percent, val_doc.canonical_name) val_doc.init_sorted_variables() val_doc.init_variable_groups() if isinstance(val_doc, ModuleDoc): val_doc.init_submodule_groups() log.end_progress() return docindex
def _report_valdoc_progress(i, val_doc, val_docs): if (isinstance(val_doc, (ModuleDoc, ClassDoc)) and val_doc.canonical_name != UNKNOWN and not val_doc.canonical_name[0].startswith('??')): log.progress(float(i) / len(val_docs), val_doc.canonical_name)
def build_doc_index(items, introspect=True, parse=True, add_submodules=True): """ Build API documentation for the given list of items, and return it in the form of a L{DocIndex}. @rtype: L{DocIndex} @param items: The items to document, specified using any of the following: - A string, naming a python package directory (e.g., C{'epydoc/markup'}) - A string, naming a python file (e.g., C{'epydoc/docparser.py'}) - A string, naming a python object (e.g., C{'epydoc.docparser.DocParser'}) - Any (non-string) python object (e.g., C{list.append}) @param introspect: If true, then use introspection to examine the specified items. Otherwise, just use parsing. @param parse: If true, then use parsing to examine the specified items. Otherwise, just use introspection. """ # Get the basic docs for each item. doc_pairs = _get_docs_from_items(items, introspect, parse, add_submodules) # Merge the introspection & parse docs. if parse and introspect: log.start_progress('Merging parsed & introspected information') docs = [] for i, (introspect_doc, parse_doc) in enumerate(doc_pairs): if introspect_doc is not None and parse_doc is not None: if introspect_doc.canonical_name not in (None, UNKNOWN): name = introspect_doc.canonical_name else: name = parse_doc.canonical_name log.progress(float(i) / len(doc_pairs), name) docs.append(merge_docs(introspect_doc, parse_doc)) elif introspect_doc is not None: docs.append(introspect_doc) elif parse_doc is not None: docs.append(parse_doc) log.end_progress() elif introspect: docs = [doc_pair[0] for doc_pair in doc_pairs if doc_pair[0]] else: docs = [doc_pair[1] for doc_pair in doc_pairs if doc_pair[1]] if len(docs) == 0: log.error('Nothing left to document!') return None # Collect the docs into a single index. docindex = DocIndex(docs) # Replace any proxy valuedocs that we got from importing with # their targets. if parse: log.start_progress('Linking imported variables') valdocs = docindex.reachable_valdocs(sort_by_name=True, imports=False, submodules=False, packages=False, subclasses=False) for i, val_doc in enumerate(valdocs): _report_valdoc_progress(i, val_doc, valdocs) link_imports(val_doc, docindex) log.end_progress() # Assign canonical names. log.start_progress('Indexing documentation') for i, val_doc in enumerate(docindex.root): log.progress(float(i) / len(docindex.root), val_doc.canonical_name) assign_canonical_names(val_doc, val_doc.canonical_name, docindex) log.end_progress() # Parse the docstrings for each object. log.start_progress('Parsing docstrings') valdocs = docindex.reachable_valdocs(sort_by_name=True, imports=False, submodules=False, packages=False, subclasses=False) for i, val_doc in enumerate(valdocs): _report_valdoc_progress(i, val_doc, valdocs) # the value's docstring parse_docstring(val_doc, docindex) # the value's variables' docstrings if (isinstance(val_doc, NamespaceDoc) and val_doc.variables not in (None, UNKNOWN)): for var_doc in val_doc.variables.values(): parse_docstring(var_doc, docindex) log.end_progress() # Take care of inheritance. log.start_progress('Inheriting documentation') for i, val_doc in enumerate(valdocs): if isinstance(val_doc, ClassDoc): percent = float(i) / len(valdocs) log.progress(percent, val_doc.canonical_name) inherit_docs(val_doc) log.end_progress() # Initialize the groups & sortedvars attributes. log.start_progress('Sorting & Grouping') for i, val_doc in enumerate(valdocs): if isinstance(val_doc, NamespaceDoc): percent = float(i) / len(valdocs) log.progress(percent, val_doc.canonical_name) val_doc.init_sorted_variables() val_doc.init_variable_groups() if isinstance(val_doc, ModuleDoc): val_doc.init_submodule_groups() log.end_progress() return docindex
def write_latex(docindex, options, format): from epydoc.docwriter.latex import LatexWriter latex_writer = LatexWriter(docindex, **options.__dict__) log.start_progress('Writing LaTeX docs') latex_writer.write(options.target) log.end_progress() # If we're just generating the latex, and not any output format, # then we're done. if format == 'latex': return if format == 'dvi': steps = 4 elif format == 'ps': steps = 5 elif format == 'pdf': steps = 6 log.start_progress('Processing LaTeX docs') oldpath = os.path.abspath(os.curdir) running = None # keep track of what we're doing. try: try: os.chdir(options.target) # Clear any old files out of the way. for ext in 'tex aux log out idx ilg toc ind'.split(): if os.path.exists('apidoc.%s' % ext): os.remove('apidoc.%s' % ext) # The first pass generates index files. running = 'latex' log.progress(0./steps, 'LaTeX: First pass') run_subprocess('latex api.tex') # Build the index. running = 'makeindex' log.progress(1./steps, 'LaTeX: Build index') run_subprocess('makeindex api.idx') # The second pass generates our output. running = 'latex' log.progress(2./steps, 'LaTeX: Second pass') out, err = run_subprocess('latex api.tex') # The third pass is only necessary if the second pass # changed what page some things are on. running = 'latex' if _RERUN_LATEX_RE.match(out): log.progress(3./steps, 'LaTeX: Third pass') out, err = run_subprocess('latex api.tex') # A fourth path should (almost?) never be necessary. running = 'latex' if _RERUN_LATEX_RE.match(out): log.progress(3./steps, 'LaTeX: Fourth pass') run_subprocess('latex api.tex') # If requested, convert to postscript. if format in ('ps', 'pdf'): running = 'dvips' log.progress(4./steps, 'dvips') run_subprocess('dvips api.dvi -o api.ps -G0 -Ppdf') # If requested, convert to pdf. if format in ('pdf'): running = 'ps2pdf' log.progress(5./steps, 'ps2pdf') run_subprocess( 'ps2pdf -sPAPERSIZE=letter -dMaxSubsetPct=100 ' '-dSubsetFonts=true -dCompatibilityLevel=1.2 ' '-dEmbedAllFonts=true api.ps api.pdf') except RunSubprocessError, e: if running == 'latex': e.out = re.sub(r'(?sm)\A.*?!( LaTeX Error:)?', r'', e.out) e.out = re.sub(r'(?sm)\s*Type X to quit.*', '', e.out) e.out = re.sub(r'(?sm)^! Emergency stop.*', '', e.out) log.error("%s failed: %s" % (running, (e.out+e.err).lstrip())) except OSError, e: log.error("%s failed: %s" % (running, e))
def write_latex(docindex, options, format): from epydoc.docwriter.latex import LatexWriter latex_writer = LatexWriter(docindex, **options.__dict__) log.start_progress('Writing LaTeX docs') latex_writer.write(options.target) log.end_progress() # If we're just generating the latex, and not any output format, # then we're done. if format == 'latex': return if format == 'dvi': steps = 4 elif format == 'ps': steps = 5 elif format == 'pdf': steps = 6 log.start_progress('Processing LaTeX docs') oldpath = os.path.abspath(os.curdir) running = None # keep track of what we're doing. try: try: os.chdir(options.target) # Clear any old files out of the way. for ext in 'tex aux log out idx ilg toc ind'.split(): if os.path.exists('apidoc.%s' % ext): os.remove('apidoc.%s' % ext) # The first pass generates index files. running = 'latex' log.progress(0. / steps, 'LaTeX: First pass') run_subprocess('latex api.tex') # Build the index. running = 'makeindex' log.progress(1. / steps, 'LaTeX: Build index') run_subprocess('makeindex api.idx') # The second pass generates our output. running = 'latex' log.progress(2. / steps, 'LaTeX: Second pass') out, err = run_subprocess('latex api.tex') # The third pass is only necessary if the second pass # changed what page some things are on. running = 'latex' if _RERUN_LATEX_RE.match(out): log.progress(3. / steps, 'LaTeX: Third pass') out, err = run_subprocess('latex api.tex') # A fourth path should (almost?) never be necessary. running = 'latex' if _RERUN_LATEX_RE.match(out): log.progress(3. / steps, 'LaTeX: Fourth pass') run_subprocess('latex api.tex') # If requested, convert to postscript. if format in ('ps', 'pdf'): running = 'dvips' log.progress(4. / steps, 'dvips') run_subprocess('dvips api.dvi -o api.ps -G0 -Ppdf') # If requested, convert to pdf. if format in ('pdf'): running = 'ps2pdf' log.progress(5. / steps, 'ps2pdf') run_subprocess('ps2pdf -sPAPERSIZE=letter -dMaxSubsetPct=100 ' '-dSubsetFonts=true -dCompatibilityLevel=1.2 ' '-dEmbedAllFonts=true api.ps api.pdf') except RunSubprocessError, e: if running == 'latex': e.out = re.sub(r'(?sm)\A.*?!( LaTeX Error:)?', r'', e.out) e.out = re.sub(r'(?sm)\s*Type X to quit.*', '', e.out) e.out = re.sub(r'(?sm)^! Emergency stop.*', '', e.out) log.error("%s failed: %s" % (running, (e.out + e.err).lstrip())) except OSError, e: log.error("%s failed: %s" % (running, e))