def process_result(self, result): uri, docname, lineno, status, info = result if status == 'unchecked': return if status == 'working' and info != 'new': return if lineno: self.info('(line %3d) ' % lineno, nonl=1) if status == 'ignored': self.info(uri + ' - ' + darkgray('ignored')) elif status == 'local': self.info(uri + ' - ' + darkgray('local')) self.write_entry('local', docname, lineno, uri) elif status == 'working': self.info(uri + ' - ' + darkgreen('working')) elif status == 'broken': self.info(uri + ' - ' + red('broken: ') + info) self.write_entry('broken', docname, lineno, uri + ': ' + info) if self.app.quiet: self.warn('broken link: %s' % uri, '%s:%s' % (self.env.doc2path(docname), lineno)) elif status == 'redirected': self.info(uri + ' - ' + purple('redirected') + ' to ' + info) self.write_entry('redirected', docname, lineno, uri + ' to ' + info)
def process_result(self, result): uri, docname, lineno, status, info, code = result if status == 'unchecked': return if status == 'working' and info != 'new': return if lineno: self.info('(line %4d) ' % lineno, nonl=1) if status == 'ignored': self.info(darkgray('-ignored- ') + uri) elif status == 'local': self.info(darkgray('-local- ') + uri) self.write_entry('local', docname, lineno, uri) elif status == 'working': self.info(darkgreen('ok ') + uri) elif status == 'broken': self.info(red('broken ') + uri + red(' - ' + info)) self.write_entry('broken', docname, lineno, uri + ': ' + info) if self.app.quiet: self.warn('broken link: %s' % uri, '%s:%s' % (self.env.doc2path(docname), lineno)) elif status == 'redirected': text, color = { 301: ('permanently', darkred), 302: ('with Found', purple), 303: ('with See Other', purple), 307: ('temporarily', turquoise), 0: ('with unknown code', purple), }[code] self.write_entry('redirected ' + text, docname, lineno, uri + ' to ' + info) self.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def check(self, node, docname): uri = node['refuri'] if '#' in uri: uri = uri.split('#')[0] if uri in self.good: return lineno = None while lineno is None: node = node.parent if node is None: break lineno = node.line if len(uri) == 0 or uri[0:7] == 'mailto:' or uri[0:4] == 'ftp:': return if lineno: self.info('(line %3d) ' % lineno, nonl=1) for rex in self.to_ignore: if rex.match(uri): self.info(uri + ' - ' + darkgray('ignored')) return if uri[0:5] == 'http:' or uri[0:6] == 'https:': self.info(uri, nonl=1) if uri in self.broken: (r, s) = self.broken[uri] elif uri in self.redirected: (r, s) = self.redirected[uri] else: (r, s) = self.resolve(uri) if r == 0: self.info(' - ' + darkgreen('working')) self.good.add(uri) elif r == 2: self.info(' - ' + red('broken: ') + s) self.write_entry('broken', docname, lineno, uri + ': ' + s) self.broken[uri] = (r, s) if self.app.quiet: self.warn('broken link: %s' % uri, '%s:%s' % (self.env.doc2path(docname), lineno)) else: self.info(' - ' + purple('redirected') + ' to ' + s) self.write_entry('redirected', docname, lineno, uri + ' to ' + s) self.redirected[uri] = (r, s) else: self.info(uri + ' - ' + darkgray('local')) self.write_entry('local', docname, lineno, uri) if self.broken: self.app.statuscode = 1
def process_result(self, result: CheckResult) -> None: filename = self.env.doc2path(result.docname, False) linkstat = {"filename": filename, "lineno": result.lineno, "status": result.status, "code": result.code, "uri": result.uri, "info": result.message} self.write_linkstat(linkstat) if result.status == 'unchecked': return if result.status == 'working' and result.message == 'old': return if result.lineno: logger.info('(%16s: line %4d) ', result.docname, result.lineno, nonl=True) if result.status == 'ignored': if result.message: logger.info(darkgray('-ignored- ') + result.uri + ': ' + result.message) else: logger.info(darkgray('-ignored- ') + result.uri) elif result.status == 'local': logger.info(darkgray('-local- ') + result.uri) self.write_entry('local', result.docname, filename, result.lineno, result.uri) elif result.status == 'working': logger.info(darkgreen('ok ') + result.uri + result.message) elif result.status == 'broken': if self.app.quiet or self.app.warningiserror: logger.warning(__('broken link: %s (%s)'), result.uri, result.message, location=(result.docname, result.lineno)) else: logger.info(red('broken ') + result.uri + red(' - ' + result.message)) self.write_entry('broken', result.docname, filename, result.lineno, result.uri + ': ' + result.message) self.broken_hyperlinks += 1 elif result.status == 'redirected': try: text, color = { 301: ('permanently', purple), 302: ('with Found', purple), 303: ('with See Other', purple), 307: ('temporarily', turquoise), 308: ('permanently', purple), }[result.code] except KeyError: text, color = ('with unknown code', purple) linkstat['text'] = text if self.config.linkcheck_allowed_redirects: logger.warning('redirect ' + result.uri + ' - ' + text + ' to ' + result.message, location=(result.docname, result.lineno)) else: logger.info(color('redirect ') + result.uri + color(' - ' + text + ' to ' + result.message)) self.write_entry('redirected ' + text, result.docname, filename, result.lineno, result.uri + ' to ' + result.message) else: raise ValueError("Unknown status %s." % result.status)
def process_result(self, result: Tuple[str, str, int, str, str, int]) -> None: uri, docname, lineno, status, info, code = result filename = self.env.doc2path(docname, None) linkstat = dict(filename=filename, lineno=lineno, status=status, code=code, uri=uri, info=info) if status == 'unchecked': self.write_linkstat(linkstat) return if status == 'working' and info == 'old': self.write_linkstat(linkstat) return if lineno: logger.info('(line %4d) ', lineno, nonl=True) if status == 'ignored': if info: logger.info(darkgray('-ignored- ') + uri + ': ' + info) else: logger.info(darkgray('-ignored- ') + uri) self.write_linkstat(linkstat) elif status == 'local': logger.info(darkgray('-local- ') + uri) self.write_entry('local', docname, filename, lineno, uri) self.write_linkstat(linkstat) elif status == 'working': logger.info(darkgreen('ok ') + uri + info) self.write_linkstat(linkstat) elif status == 'broken': if self.app.quiet or self.app.warningiserror: logger.warning(__('broken link: %s (%s)'), uri, info, location=(filename, lineno)) else: logger.info(red('broken ') + uri + red(' - ' + info)) self.write_entry('broken', docname, filename, lineno, uri + ': ' + info) self.write_linkstat(linkstat) elif status == 'redirected': try: text, color = { 301: ('permanently', purple), 302: ('with Found', purple), 303: ('with See Other', purple), 307: ('temporarily', turquoise), 308: ('permanently', purple), }[code] except KeyError: text, color = ('with unknown code', purple) linkstat['text'] = text logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info)) self.write_entry('redirected ' + text, docname, filename, lineno, uri + ' to ' + info) self.write_linkstat(linkstat)
def process_result(self, result): # type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None uri, docname, lineno, status, info, code = result if status == 'unchecked': return if status == 'working' and info == 'old': return if lineno: logger.info('(line %4d) ', lineno, nonl=1) if status == 'ignored': if info: logger.info(darkgray('-ignored- ') + uri + ': ' + info) else: logger.info(darkgray('-ignored- ') + uri) elif status == 'local': logger.info(darkgray('-local- ') + uri) self.write_entry('local', docname, lineno, uri) elif status == 'working': logger.info(darkgreen('ok ') + uri + info) elif status == 'broken': self.write_entry('broken', docname, lineno, uri + ': ' + info) if self.app.quiet or self.app.warningiserror: logger.warning('broken link: %s (%s)', uri, info, location=(self.env.doc2path(docname), lineno)) else: logger.info(red('broken ') + uri + red(' - ' + info)) elif status == 'redirected': text, color = { 301: ('permanently', darkred), 302: ('with Found', purple), 303: ('with See Other', purple), 307: ('temporarily', turquoise), 0: ('with unknown code', purple), }[code] self.write_entry('redirected ' + text, docname, lineno, uri + ' to ' + info) logger.info( color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def debug(self, message, *args, **kwargs): """Emit a debug-level informational message. The message will only be emitted for verbosity levels >= 2 (i.e. at least two ``-v`` options were given). The message can contain %-style interpolation placeholders, which is formatted with either the ``*args`` or ``**kwargs`` when output. """ if self.verbosity < 2: return if args or kwargs: message = message % (args or kwargs) self._log(darkgray(message), self._status)
def check(self, node, docname): uri = node["refuri"] if "#" in uri: uri = uri.split("#")[0] if uri in self.good: return lineno = None while lineno is None: node = node.parent if node is None: break lineno = node.line if len(uri) == 0 or uri[0:7] == "mailto:" or uri[0:4] == "ftp:": return if lineno: self.info("(line %3d) " % lineno, nonl=1) if uri[0:5] == "http:" or uri[0:6] == "https:": self.info(uri, nonl=1) if uri in self.broken: (r, s) = self.broken[uri] elif uri in self.redirected: (r, s) = self.redirected[uri] else: (r, s) = self.resolve(uri) if r == 0: self.info(" - " + darkgreen("working")) self.good.add(uri) elif r == 2: self.info(" - " + red("broken: ") + s) self.write_entry("broken", docname, lineno, uri + ": " + s) self.broken[uri] = (r, s) if self.app.quiet: self.warn("broken link: %s" % uri, "%s:%s" % (self.env.doc2path(docname), lineno)) else: self.info(" - " + purple("redirected") + " to " + s) self.write_entry("redirected", docname, lineno, uri + " to " + s) self.redirected[uri] = (r, s) else: self.info(uri + " - " + darkgray("local")) self.write_entry("local", docname, lineno, uri) if self.broken: self.app.statuscode = 1
def process_result(self, result): # type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None uri, docname, lineno, status, info, code = result if status == 'unchecked': return if status == 'working' and info == 'old': return if lineno: logger.info('(line %4d) ', lineno, nonl=1) if status == 'ignored': if info: logger.info(darkgray('-ignored- ') + uri + ': ' + info) else: logger.info(darkgray('-ignored- ') + uri) elif status == 'local': logger.info(darkgray('-local- ') + uri) self.write_entry('local', docname, lineno, uri) elif status == 'working': logger.info(darkgreen('ok ') + uri + info) elif status == 'broken': self.write_entry('broken', docname, lineno, uri + ': ' + info) if self.app.quiet or self.app.warningiserror: logger.warning('broken link: %s (%s)', uri, info, location=(self.env.doc2path(docname), lineno)) else: logger.info(red('broken ') + uri + red(' - ' + info)) elif status == 'redirected': text, color = { 301: ('permanently', darkred), 302: ('with Found', purple), 303: ('with See Other', purple), 307: ('temporarily', turquoise), 0: ('with unknown code', purple), }[code] self.write_entry('redirected ' + text, docname, lineno, uri + ' to ' + info) logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def run(self) -> None: kwargs = {} if self.config.linkcheck_timeout: kwargs['timeout'] = self.config.linkcheck_timeout def get_request_headers() -> Dict: url = urlparse(uri) candidates = [ "%s://%s" % (url.scheme, url.netloc), "%s://%s/" % (url.scheme, url.netloc), uri, "*" ] for u in candidates: if u in self.config.linkcheck_request_headers: headers = dict(DEFAULT_REQUEST_HEADERS) headers.update(self.config.linkcheck_request_headers[u]) return headers return {} def check_uri() -> Tuple[str, str, int]: # split off anchor if '#' in uri: req_url, anchor = uri.split('#', 1) for rex in self.anchors_ignore: if rex.match(anchor): anchor = None break else: req_url = uri anchor = None # handle non-ASCII URIs try: req_url.encode('ascii') except UnicodeError: req_url = encode_uri(req_url) # Get auth info, if any for pattern, auth_info in self.auth: if pattern.match(uri): break else: auth_info = None # update request headers for the URL kwargs['headers'] = get_request_headers() try: if anchor and self.config.linkcheck_anchors: # Read the whole document and see if #anchor exists response = requests.get(req_url, stream=True, config=self.config, auth=auth_info, **kwargs) response.raise_for_status() found = check_anchor(response, unquote(anchor)) if not found: raise Exception(__("Anchor '%s' not found") % anchor) else: try: # try a HEAD request first, which should be easier on # the server and the network response = requests.head(req_url, allow_redirects=True, config=self.config, auth=auth_info, **kwargs) response.raise_for_status() # Servers drop the connection on HEAD requests, causing # ConnectionError. except (ConnectionError, HTTPError, TooManyRedirects) as err: if isinstance( err, HTTPError) and err.response.status_code == 429: raise # retry with GET request if that fails, some servers # don't like HEAD requests. response = requests.get(req_url, stream=True, config=self.config, auth=auth_info, **kwargs) response.raise_for_status() except HTTPError as err: if err.response.status_code == 401: # We'll take "Unauthorized" as working. return 'working', ' - unauthorized', 0 elif err.response.status_code == 429: next_check = self.limit_rate(err.response) if next_check is not None: self.wqueue.put(CheckRequest(next_check, hyperlink), False) return 'rate-limited', '', 0 return 'broken', str(err), 0 elif err.response.status_code == 503: # We'll take "Service Unavailable" as ignored. return 'ignored', str(err), 0 else: return 'broken', str(err), 0 except Exception as err: return 'broken', str(err), 0 else: netloc = urlparse(req_url).netloc try: del self.rate_limits[netloc] except KeyError: pass if response.url.rstrip('/') == req_url.rstrip('/'): return 'working', '', 0 else: new_url = response.url if anchor: new_url += '#' + anchor if allowed_redirect(req_url, new_url): return 'working', '', 0 elif response.history: # history contains any redirects, get last code = response.history[-1].status_code return 'redirected', new_url, code else: return 'redirected', new_url, 0 def allowed_redirect(url: str, new_url: str) -> bool: for from_url, to_url in self.config.linkcheck_allowed_redirects.items( ): if from_url.match(url) and to_url.match(new_url): return True return False def check(docname: str) -> Tuple[str, str, int]: # check for various conditions without bothering the network if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'tel:')): return 'unchecked', '', 0 elif not uri.startswith(('http:', 'https:')): if uri_re.match(uri): # non supported URI schemes (ex. ftp) return 'unchecked', '', 0 else: srcdir = path.dirname(self.env.doc2path(docname)) if path.exists(path.join(srcdir, uri)): return 'working', '', 0 else: return 'broken', '', 0 # need to actually check the URI for _ in range(self.config.linkcheck_retries): status, info, code = check_uri() if status != "broken": break return (status, info, code) while True: check_request = self.wqueue.get() try: next_check, hyperlink = check_request if hyperlink is None: break uri, docname, lineno = hyperlink except ValueError: # old styled check_request (will be deprecated in Sphinx-5.0) next_check, uri, docname, lineno = check_request if uri is None: break netloc = urlparse(uri).netloc try: # Refresh rate limit. # When there are many links in the queue, workers are all stuck waiting # for responses, but the builder keeps queuing. Links in the queue may # have been queued before rate limits were discovered. next_check = self.rate_limits[netloc].next_check except KeyError: pass if next_check > time.time(): # Sleep before putting message back in the queue to avoid # waking up other threads. time.sleep(QUEUE_POLL_SECS) self.wqueue.put(CheckRequest(next_check, hyperlink), False) self.wqueue.task_done() continue status, info, code = check(docname) if status == 'rate-limited': logger.info( darkgray('-rate limited- ') + uri + darkgray(' | sleeping...')) else: self.rqueue.put( CheckResult(uri, docname, lineno, status, info, code)) self.wqueue.task_done()
def prefix_preserved(self): r'''Messaging prefix for preserved files. ''' from sphinx.util.console import darkgray return darkgray('PRESERVED: ')
def debug(self, message, *args, **kwargs): if self.verbosity < 2: return if args or kwargs: message = message % (args or kwargs) self._log(darkgray(message), self._status)
def run(self) -> None: kwargs = {} if self.config.linkcheck_timeout: kwargs['timeout'] = self.config.linkcheck_timeout def get_request_headers() -> Dict: url = urlparse(uri) candidates = [ "%s://%s" % (url.scheme, url.netloc), "%s://%s/" % (url.scheme, url.netloc), uri, "*" ] for u in candidates: if u in self.config.linkcheck_request_headers: headers = dict(DEFAULT_REQUEST_HEADERS) headers.update(self.config.linkcheck_request_headers[u]) return headers return {} def check_uri() -> Tuple[str, str, int]: # split off anchor if '#' in uri: req_url, anchor = uri.split('#', 1) for rex in self.anchors_ignore: if rex.match(anchor): anchor = None break else: req_url = uri anchor = None # handle non-ASCII URIs try: req_url.encode('ascii') except UnicodeError: req_url = encode_uri(req_url) # Get auth info, if any for pattern, auth_info in self.auth: if pattern.match(uri): break else: auth_info = None # update request headers for the URL kwargs['headers'] = get_request_headers() try: if anchor and self.config.linkcheck_anchors: # Read the whole document and see if #anchor exists response = requests.get(req_url, stream=True, config=self.config, auth=auth_info, **kwargs) response.raise_for_status() anchor_str = unquote(anchor) # Hack (?): https://github.com/container-storage-interface/spec/blob/master/spec.md#getplugininfo # is a valid anchor, but the actual id of the anchor is user-content-getplugininfo, which causes # the anchor check to fail: # <a id="user-content-getplugininfo" class="anchor" aria-hidden="true" href="#getplugininfo"> # # Might have to be fixed in AnchorCheckParser instead? if req_url.startswith('https://github.com/'): anchor_str = "user-content-" + anchor_str found = check_anchor(response, anchor_str) if not found: raise Exception(__("Anchor '%s' not found") % anchor) else: try: # try a HEAD request first, which should be easier on # the server and the network response = requests.head(req_url, allow_redirects=True, config=self.config, auth=auth_info, **kwargs) response.raise_for_status() except (HTTPError, TooManyRedirects) as err: if isinstance( err, HTTPError) and err.response.status_code == 429: raise # retry with GET request if that fails, some servers # don't like HEAD requests. response = requests.get(req_url, stream=True, config=self.config, auth=auth_info, **kwargs) response.raise_for_status() except HTTPError as err: if err.response.status_code == 401: # We'll take "Unauthorized" as working. return 'working', ' - unauthorized', 0 elif err.response.status_code == 429: next_check = self.limit_rate(err.response) if next_check is not None: self.wqueue.put(CheckRequest(next_check, hyperlink), False) return 'rate-limited', '', 0 return 'broken', str(err), 0 elif err.response.status_code == 503: # We'll take "Service Unavailable" as ignored. return 'ignored', str(err), 0 else: return 'broken', str(err), 0 except Exception as err: return 'broken', str(err), 0 else: netloc = urlparse(req_url).netloc try: del self.rate_limits[netloc] except KeyError: pass if response.url.rstrip('/') == req_url.rstrip('/'): return 'working', '', 0 else: new_url = response.url if anchor: new_url += '#' + anchor # history contains any redirects, get last if response.history: code = response.history[-1].status_code return 'redirected', new_url, code else: return 'redirected', new_url, 0 def check(docname: str) -> Tuple[str, str, int]: # check for various conditions without bothering the network if len(uri) == 0 or uri.startswith(('mailto:', 'tel:')): return 'unchecked', '', 0 elif not uri.startswith(('http:', 'https:')): if uri_re.match(uri): # non supported URI schemes (ex. ftp) return 'unchecked', '', 0 else: srcdir = path.dirname(self.env.doc2path(docname)) parts = file_re.match(uri) if not parts and uri.startswith('#'): parts = (uri, '', uri[1:]) if parts: # A document link. Check that the document exists by reading the generated # HTML. This only works if the documented was already written out, which is # not the case during the initial pass. The Makefile has to first generate # without link checking, then a second time with link checking enabled. if parts[1]: filename = path.join(path.dirname(docname), parts[1]) else: filename = docname + ".md" htmlfile = "_output/html/" + filename.replace( ".md", ".html").replace(".rst", ".html") if path.exists(htmlfile): # Optionally check anchor. if parts[2]: anchor = parts[2] with open(htmlfile, encoding='utf-8') as f: content = f.read() if not check_anchor_body(content, anchor): self._broken[uri] = '' return 'broken', "Anchor '%s' not found in %s" % ( anchor, htmlfile), 0 return 'working', '', 0 else: self._broken[uri] = '' return 'broken', 'file %s not found' % htmlfile, 0 elif uri.startswith("/"): # Absolute path is treated as relative to the current build directory. if path.exists("." + uri): return 'working', '', 0 else: self._broken[uri] = '' return 'broken', 'file not found in build directory', 0 elif path.exists(path.join(srcdir, uri)): return 'working', '', 0 else: self._broken[uri] = '' return 'broken', 'file not found relative to ' + srcdir, 0 elif uri in self._good: return 'working', 'old', 0 elif uri in self._broken: return 'broken', self._broken[uri], 0 elif uri in self._redirected: return 'redirected', self._redirected[uri][ 0], self._redirected[uri][1] # need to actually check the URI for _ in range(self.config.linkcheck_retries): status, info, code = check_uri() if status != "broken": break if status == "working": self._good.add(uri) elif status == "broken": self._broken[uri] = info elif status == "redirected": self._redirected[uri] = (info, code) return (status, info, code) while True: check_request = self.wqueue.get() try: next_check, hyperlink = check_request if hyperlink is None: break uri, docname, lineno = hyperlink except ValueError: # old styled check_request (will be deprecated in Sphinx-5.0) next_check, uri, docname, lineno = check_request if uri is None: break netloc = urlparse(uri).netloc try: # Refresh rate limit. # When there are many links in the queue, workers are all stuck waiting # for responses, but the builder keeps queuing. Links in the queue may # have been queued before rate limits were discovered. next_check = self.rate_limits[netloc].next_check except KeyError: pass if next_check > time.time(): # Sleep before putting message back in the queue to avoid # waking up other threads. time.sleep(QUEUE_POLL_SECS) self.wqueue.put(CheckRequest(next_check, hyperlink), False) self.wqueue.task_done() continue status, info, code = check(docname) if status == 'rate-limited': logger.info( darkgray('-rate limited- ') + uri + darkgray(' | sleeping...')) else: self.rqueue.put((uri, docname, lineno, status, info, code)) self.wqueue.task_done()
class DocumentationManager(abctools.AbjadObject): r'''An API documentation manager. ''' ### CLASS VARIABLES ### __documentation_section__ = 'Documenters' api_directory_name = 'api' api_title = 'Abjad API' root_package_name = 'abjad' source_directory_path_parts = ('docs', 'source') tools_packages_package_path = 'abjad.tools' prefix_ignored = lightgray('IGNORED: ') prefix_preserved = darkgray('PRESERVED: ') prefix_pruned = red('PRUNED: ') prefix_rewrote = green('REWROTE: ') prefix_wrote = yellow('WROTE: ') ### PRIVATE METHODS ### def _build_attribute_section( self, cls, attrs, directive, title, ): r''' ''' from abjad.tools import documentationtools result = [] if attrs: result.append(documentationtools.ReSTHeading( level=3, text=title, )) for attr in attrs: options = { 'noindex': True, } autodoc = documentationtools.ReSTAutodocDirective( argument='{}.{}'.format(cls.__name__, attr.name), directive=directive, options=options, ) if cls is attr.defining_class: result.append(autodoc) else: container = documentationtools.ReSTDirective( argument='inherited', directive='container', ) container.append(autodoc) html_only = documentationtools.ReSTDirective( argument='html', directive='only', ) html_only.append(container) result.append(html_only) return result def _build_attributes_autosummary( self, cls, class_methods, data, inherited_attributes, methods, readonly_properties, readwrite_properties, special_methods, static_methods, ): r''' ''' from abjad.tools import documentationtools result = [] attributes = [] attributes.extend(readonly_properties) attributes.extend(readwrite_properties) attributes.extend(methods) attributes.extend(class_methods) attributes.extend(static_methods) attributes.sort(key=lambda x: x.name) attributes.extend(special_methods) if attributes: autosummary = documentationtools.ReSTAutosummaryDirective() for attribute in attributes: autosummary.append('~{}.{}.{}'.format( cls.__module__, cls.__name__, attribute.name, )) html_only = documentationtools.ReSTOnlyDirective(argument='html') html_only.append( documentationtools.ReSTHeading( level=3, text='Attribute summary', )) html_only.append(autosummary) result.append(html_only) return result def _build_bases_section(self, cls): from abjad.tools import documentationtools result = [] result.append(documentationtools.ReSTHeading( level=3, text='Bases', )) mro = inspect.getmro(cls)[1:] for cls in mro: parts = cls.__module__.split('.') + [cls.__name__] while 1 < len(parts) and parts[-1] == parts[-2]: parts.pop() packagesystem_path = '.'.join(parts) text = '- :py:class:`{}`'.format(packagesystem_path) paragraph = documentationtools.ReSTParagraph( text=text, wrap=False, ) result.append(paragraph) return result def _build_enumeration_section(self, cls): from abjad.tools import documentationtools result = [] if not issubclass(cls, enum.Enum): return result items = sorted(cls, key=lambda x: x.name) if items: result.append( documentationtools.ReSTHeading( level=3, text='Enumeration Items', )) for item in items: name = item.name value = item.value line = '- `{}`: {}'.format(name, value) paragraph = documentationtools.ReSTParagraph( text=line, wrap=False, ) result.append(paragraph) return result def _collect_class_attributes(self, cls): ignored_special_methods = ( '__getattribute__', '__getnewargs__', '__getstate__', '__init__', '__reduce__', '__reduce_ex__', '__setstate__', '__sizeof__', '__subclasshook__', 'fromkeys', 'pipe_cloexec', ) class_methods = [] data = [] inherited_attributes = [] methods = [] readonly_properties = [] readwrite_properties = [] special_methods = [] static_methods = [] attrs = inspect.classify_class_attrs(cls) for attr in attrs: if attr.defining_class is object: continue if attr.defining_class is not cls: inherited_attributes.append(attr) if attr.kind == 'method': if attr.name not in ignored_special_methods: if attr.name.startswith('__'): special_methods.append(attr) elif not attr.name.startswith('_'): methods.append(attr) elif attr.kind == 'class method': if attr.name not in ignored_special_methods: if attr.name.startswith('__'): special_methods.append(attr) elif not attr.name.startswith('_'): class_methods.append(attr) elif attr.kind == 'static method': if attr.name not in ignored_special_methods: if attr.name.startswith('__'): special_methods.append(attr) elif not attr.name.startswith('_'): static_methods.append(attr) elif attr.kind == 'property' and not attr.name.startswith('_'): if attr.object.fset is None: readonly_properties.append(attr) else: readwrite_properties.append(attr) elif attr.kind == 'data' and not attr.name.startswith('_') \ and attr.name not in getattr(cls, '__slots__', ()): data.append(attr) class_methods = tuple(sorted(class_methods)) data = tuple(sorted(data)) inherited_attributes = tuple(sorted(inherited_attributes)) methods = tuple(sorted(methods)) readonly_properties = tuple(sorted(readonly_properties)) readwrite_properties = tuple(sorted(readwrite_properties)) special_methods = tuple(sorted(special_methods)) static_methods = tuple(sorted(static_methods)) result = ( class_methods, data, inherited_attributes, methods, readonly_properties, readwrite_properties, special_methods, static_methods, ) return result def _ensure_directory(self, path): path = os.path.dirname(path) if not os.path.exists(path): os.makedirs(path) def _get_api_directory_path(self, source_directory): if self.api_directory_name: path = os.path.join( source_directory, self.api_directory_name, ) else: path = source_directory return path def _get_api_index_file_path(self, source_directory): if self.api_directory_name: directory_path = os.path.join( source_directory, self.api_directory_name, ) else: directory_path = source_directory api_index_path = os.path.join( directory_path, 'index.rst', ) return api_index_path def _get_api_index_rst(self, tools_packages): r''' ''' from abjad.tools import documentationtools document = documentationtools.ReSTDocument() heading = documentationtools.ReSTHeading( level=2, text=self.api_title, ) document.append(heading) toc = documentationtools.ReSTTOCDirective(options={ 'maxdepth': 3, 'includehidden': True, }, ) for tools_package in tools_packages: tools_package_parts = tools_package.__package__.split('.')[1:] tools_package_path = '/'.join(tools_package_parts) toc_item = documentationtools.ReSTTOCItem( text='{}/index'.format(tools_package_path), ) toc.append(toc_item) document.append(toc) return document def _get_class_rst(self, cls): import abjad module_name, _, class_name = cls.__module__.rpartition('.') tools_package_python_path = '.'.join(cls.__module__.split('.')[:-1]) ( class_methods, data, inherited_attributes, methods, readonly_properties, readwrite_properties, special_methods, static_methods, ) = self._collect_class_attributes(cls) document = abjad.documentationtools.ReSTDocument() module_directive = abjad.documentationtools.ReSTDirective( directive='currentmodule', argument=tools_package_python_path, ) document.append(module_directive) heading = abjad.documentationtools.ReSTHeading( level=2, text=class_name, ) document.append(heading) autoclass_directive = abjad.documentationtools.ReSTAutodocDirective( argument=cls.__name__, directive='autoclass', ) document.append(autoclass_directive) try: lineage_heading = abjad.documentationtools.ReSTHeading( level=3, text='Lineage', ) document.append(lineage_heading) lineage_graph = self._get_lineage_graph(cls) lineage_graph.attributes['background'] = 'transparent' lineage_graph.attributes['rankdir'] = 'LR' graphviz_directive = \ abjad.documentationtools.ReSTGraphvizDirective( graph=lineage_graph, ) graphviz_container = abjad.documentationtools.ReSTDirective( directive='container', argument='graphviz', ) graphviz_container.append(graphviz_directive) document.append(graphviz_container) except: traceback.print_exc() document.extend(self._build_bases_section(cls)) document.extend(self._build_enumeration_section(cls)) document.extend( self._build_attributes_autosummary( cls, class_methods, data, inherited_attributes, methods, readonly_properties, readwrite_properties, special_methods, static_methods, )) document.extend( self._build_attribute_section( cls, readonly_properties, 'autoattribute', 'Read-only properties', )) document.extend( self._build_attribute_section( cls, readwrite_properties, 'autoattribute', 'Read/write properties', )) document.extend( self._build_attribute_section( cls, methods, 'automethod', 'Methods', )) document.extend( self._build_attribute_section( cls, sorted( class_methods + static_methods, key=lambda x: x.name, ), 'automethod', 'Class & static methods', )) # document.extend(self._build_attribute_section( # cls, # class_methods, # 'automethod', # 'Class methods', # )) # document.extend(self._build_attribute_section( # cls, # static_methods, # 'automethod', # 'Static methods', # )) document.extend( self._build_attribute_section( cls, special_methods, 'automethod', 'Special methods', )) return document def _get_class_summary(self, cls): r''' ''' doc = cls.__doc__ if doc is None: doc = '' doc = doc.splitlines() m = re.search(r"^([A-Z].*?\.)(?:\s|$)", " ".join(doc).strip()) if m: summary = m.group(1).strip() elif doc: summary = doc[0].strip() else: summary = '' return summary def _get_function_rst(self, function): r''' ''' import abjad document = abjad.documentationtools.ReSTDocument() tools_package_python_path = '.'.join( function.__module__.split('.')[:-1]) module_directive = abjad.documentationtools.ReSTDirective( directive='currentmodule', argument=tools_package_python_path, ) document.append(module_directive) heading = abjad.documentationtools.ReSTHeading( level=2, text=function.__name__, ) document.append(heading) autodoc_directive = abjad.documentationtools.ReSTAutodocDirective( argument=function.__name__, directive='autofunction', ) document.append(autodoc_directive) return document def _get_ignored_classes(self): from abjad.tools import abjadbooktools ignored_classes = set([ abjadbooktools.abjad_import_block, abjadbooktools.abjad_input_block, abjadbooktools.abjad_output_block, abjadbooktools.abjad_thumbnail_block, ]) return ignored_classes def _get_tools_package_graph(self, tools_package): from abjad.tools import documentationtools inheritance_graph = documentationtools.InheritanceGraph( lineage_addresses=[tools_package.__package__]) lineage_graph = inheritance_graph.__graph__() lineage_graph.attributes['background'] = 'transparent' lineage_graph.attributes['rankdir'] = 'LR' return lineage_graph def _get_lineage_graph(self, cls): def get_node_name(original_name): parts = original_name.split('.') name = [parts[0]] for part in parts[1:]: if part != name[-1]: name.append(part) if name[0] in ('abjad', 'experimental', 'ide'): return str('.'.join(name[2:])) return str('.'.join(name)) from abjad.tools import documentationtools addresses = ('abjad', 'experimental', 'ide') module_name, _, class_name = cls.__module__.rpartition('.') node_name = get_node_name(module_name + '.' + class_name) importlib.import_module(module_name) lineage = documentationtools.InheritanceGraph( addresses=addresses, lineage_addresses=((module_name, class_name), )) graph = lineage.__graph__() maximum_node_count = 30 if maximum_node_count < len(graph.leaves): lineage = documentationtools.InheritanceGraph( addresses=addresses, lineage_addresses=((module_name, class_name), ), lineage_prune_distance=2, ) graph = lineage.__graph__() if maximum_node_count < len(graph.leaves): lineage = documentationtools.InheritanceGraph( addresses=addresses, lineage_addresses=((module_name, class_name), ), lineage_prune_distance=1, ) graph = lineage.__graph__() if maximum_node_count < len(graph.leaves): lineage = documentationtools.InheritanceGraph( addresses=((module_name, class_name), ), ) graph = lineage.__graph__() graph_node = graph[node_name] graph_node.attributes['color'] = 'black' graph_node.attributes['fontcolor'] = 'white' graph_node.attributes['style'] = ('filled', 'rounded') graph_node = graph[node_name] graph_node.attributes['label'] = \ '<<B>{}</B>>'.format(graph_node.attributes['label']) return graph def _get_source_directory(self): root_package = importlib.import_module(self.root_package_name) root_package_path = root_package.__path__[0] path_parts = [root_package_path] path_parts.extend(self.source_directory_path_parts) source_directory = os.path.join(*path_parts) return source_directory def _get_tools_packages(self): r''' ''' root_module = self._get_root_module() tools_packages_module = self._get_tools_packages_module() tools_packages = [] for name in dir(tools_packages_module): if name.startswith('_'): continue module = getattr(tools_packages_module, name) if not isinstance(module, types.ModuleType): continue if not module.__package__.startswith(root_module.__package__): continue tools_packages.append(module) tools_packages.sort(key=lambda x: x.__name__) tools_packages = tuple(tools_packages) return tools_packages def _get_root_module(self): r''' ''' root_module = importlib.import_module(self.root_package_name) return root_module def _get_tools_packages_module(self): r''' ''' tools_packages_module = importlib.import_module( self.tools_packages_package_path) return tools_packages_module def _get_tools_package_contents(self, tools_package): r''' ''' classes = [] functions = [] for name in dir(tools_package): if name.startswith('_'): continue obj = getattr(tools_package, name) if not hasattr(obj, '__module__'): print('Warning: no nominative object in {}'.format(obj)) continue if not obj.__module__.startswith(tools_package.__package__): continue if isinstance(obj, type): classes.append(obj) elif isinstance(obj, types.FunctionType): functions.append(obj) classes.sort(key=lambda x: x.__name__) classes = tuple(classes) functions.sort(key=lambda x: x.__name__) functions = tuple(functions) return classes, functions def _get_tools_package_rst(self, tools_package): r''' ''' from abjad.tools import documentationtools classes, functions = self._get_tools_package_contents(tools_package, ) document = documentationtools.ReSTDocument() heading = documentationtools.ReSTHeading( level=2, text=tools_package.__name__.split('.')[-1], ) document.append(heading) automodule_directive = documentationtools.ReSTAutodocDirective( argument=tools_package.__name__, directive='automodule', ) document.append(automodule_directive) ignored_classes = self._get_ignored_classes() classes = [_ for _ in classes if _ not in ignored_classes] if classes: rule = documentationtools.ReSTHorizontalRule() document.append(rule) lineage_heading = documentationtools.ReSTHeading( level=3, text='Lineage', ) document.append(lineage_heading) lineage_graph = self._get_tools_package_graph(tools_package) graphviz_directive = documentationtools.ReSTGraphvizDirective( graph=lineage_graph, ) graphviz_container = documentationtools.ReSTDirective( directive='container', argument='graphviz', ) graphviz_container.append(graphviz_directive) document.append(graphviz_container) if classes: sections = {} for cls in classes: documentation_section = getattr( cls, '__documentation_section__', None, ) if documentation_section is None: if issubclass(cls, enum.Enum): documentation_section = 'Enumerations' elif issubclass(cls, Exception): documentation_section = 'Errors' else: documentation_section = 'Classes' if inspect.isabstract(cls): documentation_section = 'Abstract Classes' if documentation_section not in sections: sections[documentation_section] = [] sections[documentation_section].append(cls) section_names = sorted(sections) if 'Main Classes' in sections: section_names.remove('Main Classes') section_names.insert(0, 'Main Classes') if 'Errors' in sections: section_names.remove('Errors') section_names.append('Errors') for section_name in section_names: rule = documentationtools.ReSTHorizontalRule() document.append(rule) heading = documentationtools.ReSTHeading( level=3, text=section_name, ) #heading = documentationtools.ReSTDirective( # directive='rubric', # argument=section_name, # ) document.append(heading) toc = documentationtools.ReSTTOCDirective( options={ #'caption': section_name, 'hidden': True, #'name': '{}__{}'.format( # tools_package.__name__, # section_name, # ), }, ) for cls in sections[section_name]: class_name = cls.__name__ if class_name == 'Index': class_name = '_Index' toc_item = documentationtools.ReSTTOCItem( text=class_name, ) toc.append(toc_item) document.append(toc) autosummary = documentationtools.ReSTAutosummaryDirective( options={ 'nosignatures': True, }, ) for cls in sections[section_name]: item = documentationtools.ReSTAutosummaryItem( text=cls.__name__, ) autosummary.append(item) document.append(autosummary) if functions: if classes: rule = documentationtools.ReSTHorizontalRule() document.append(rule) section_name = 'Functions' heading = documentationtools.ReSTHeading( level=3, text=section_name, ) #heading = documentationtools.ReSTDirective( # directive='rubric', # argument=section_name, # ) document.append(heading) toc = documentationtools.ReSTTOCDirective( options={ #'caption': section_name, 'hidden': True, #'name': '{}__{}'.format( # tools_package.__name__, # section_name, # ), }, ) for function in functions: toc_item = documentationtools.ReSTTOCItem( text=function.__name__, ) toc.append(toc_item) document.append(toc) autosummary = documentationtools.ReSTAutosummaryDirective(options={ 'nosignatures': True, }, ) for function in functions: item = documentationtools.ReSTAutosummaryItem( text=function.__name__, ) autosummary.append(item) document.append(autosummary) return document def _module_path_to_file_path(self, module_path, source_directory): r''' ''' parts = module_path.split('.') parts = parts[1:] if parts[-1] == 'Index': parts[-1] = '_' + parts[-1] + '.rst' else: parts[-1] = parts[-1] + '.rst' parts.insert(0, self._get_api_directory_path(source_directory)) path = os.path.join(*parts) return path def _package_path_to_file_path(self, package_path, source_directory): r''' ''' parts = package_path.split('.') parts = parts[1:] parts.append('index.rst') parts.insert(0, self._get_api_directory_path(source_directory)) path = os.path.join(*parts) return path def _remove_api_directory(self): r''' ''' path = self._get_api_directory_path() if os.path.exists(path): shutil.rmtree(path) def _write(self, file_path, string, rewritten_files): r''' ''' should_write = True if os.path.exists(file_path): with open(file_path, 'r') as file_pointer: old_string = file_pointer.read() if old_string == string: should_write = False if should_write: if os.path.exists(file_path): print('{}{}'.format( self.prefix_rewrote, os.path.relpath(file_path), )) else: print('{}{}'.format( self.prefix_wrote, os.path.relpath(file_path), )) with open(file_path, 'w') as file_pointer: file_pointer.write(string) else: print('{}{}'.format( self.prefix_preserved, os.path.relpath(file_path), )) rewritten_files.add(file_path) ### PUBLIC METHODS ### def execute(self): r'''Executes documentation manager. ''' print('Rebuilding documentation source.') source_directory = self._get_source_directory() with systemtools.TemporaryDirectoryChange( directory=source_directory, verbose=True, ): rewritten_files = set() tools_packages = self._get_tools_packages() api_index_rst = self._get_api_index_rst(tools_packages) api_index_file_path = self._get_api_index_file_path( source_directory) self._ensure_directory(api_index_file_path) self._write( api_index_file_path, api_index_rst.rest_format, rewritten_files, ) ignored_classes = self._get_ignored_classes() for package in tools_packages: tools_package_rst = self._get_tools_package_rst(package) tools_package_file_path = self._package_path_to_file_path( package.__package__, source_directory, ) self._ensure_directory(tools_package_file_path) self._write( tools_package_file_path, tools_package_rst.rest_format, rewritten_files, ) classes, functions = \ self._get_tools_package_contents(package) for cls in classes: file_path = self._module_path_to_file_path( cls.__module__, source_directory, ) if cls in ignored_classes: print('{}{}'.format( self.prefix_ignored, os.path.relpath(file_path), )) continue rst = self._get_class_rst(cls) self._write(file_path, rst.rest_format, rewritten_files) for function in functions: file_path = self._module_path_to_file_path( function.__module__, source_directory, ) rst = self._get_function_rst(function) self._write(file_path, rst.rest_format, rewritten_files) for root, directory_names, file_names in os.walk( self._get_api_directory_path(source_directory), topdown=False, ): for file_name in file_names[:]: file_path = os.path.join(root, file_name) if not file_path.endswith('.rst'): continue if file_path not in rewritten_files: file_names.remove(file_name) os.remove(file_path) print('{}{}'.format( self.prefix_pruned, os.path.relpath(file_path), )) if not file_names and not directory_names: shutil.rmtree(root) print('{}{}'.format( self.prefix_pruned, os.path.relpath(root), )) @staticmethod def make_readme(): r'''Creates README.rst file. ''' import abjad abjad_path = abjad.__path__[0] version = abjad.__version__ docs_path = os.path.join(abjad_path, 'docs', 'source') abstract_path = os.path.join(docs_path, 'abstract.txt') badges_path = os.path.join(docs_path, 'badges.txt') links_path = os.path.join(docs_path, 'links.txt') installation_path = os.path.join(docs_path, 'installation.rst') result = 'Abjad {}'.format(version) result = ['#' * len(result), result, '#' * len(result)] with open(abstract_path, 'r') as file_pointer: result.append('') result.append(file_pointer.read()) with open(links_path, 'r') as file_pointer: result.append('') result.append(file_pointer.read()) with open(badges_path, 'r') as file_pointer: result.append('') result.append(file_pointer.read()) with open(installation_path, 'r') as file_pointer: result.append('') result.append(file_pointer.read()) result = '\n'.join(result) readme_path = os.path.join(abjad_path, '..', 'README.rst') with open(readme_path, 'w') as file_pointer: file_pointer.write(result)