def _interpret_asset_path(self, node): """ find an absolute path for a target assert Returns the absolute path to an assert. For unsupported asset types, this method will return ``None`` values. This method should not be invoked on external assets (i.e. URLs). Args: node: the node to parse Returns: the absolute path """ path = None if isinstance(node, nodes.image): # uri's will be relative to documentation root. path = str(node['uri']) elif isinstance(node, addnodes.download_reference): # reftarget will be a reference to the asset with respect to the # document (refdoc) holding this reference. Use reftarget and refdoc # to find a proper path. docdir = os.path.dirname(node['refdoc']) path = os.path.join(docdir, node['reftarget']) abspath = find_env_abspath(self.env, self.outdir, path) if not abspath: logger.verbose('failed to find path: %s' % path) return abspath
def _parse_doctree_title(self, docname, doctree): """ parse a doctree for a raw title value Examine a document's doctree value to find a title value from a title section element. If no title is found, a title can be automatically generated (if configuration permits) or a `None` value is returned. """ doctitle = None title_element = self._find_title_element(doctree) if title_element: doctitle = title_element.astext() if not doctitle: if not self.config.confluence_disable_autogen_title: doctitle = "autogen-{}".format(docname) if self.publish: ConfluenceLogger.warn( "document will be published using an " "generated title value: {}".format(docname)) elif self.publish: ConfluenceLogger.warn( "document will not be published since it " "has no title: {}".format(docname)) return doctitle
def write(self, build_docnames, updated_docnames, method='update'): docnames = self.env.all_docs if self.config.master_doc not in docnames: ConfluenceLogger.error('singleconfluence required master_doc') return root_doctitle = self._process_root_document() if not root_doctitle: ConfluenceLogger.error( 'singleconfluence required title on master_doc') return with progress_message(__('assembling single confluence document')): self.env.toc_secnumbers = self.assemble_toc_secnumbers() self.env.toc_fignumbers = self.assemble_toc_fignumbers() # register title targets for references before assembling doc # re-works them into a single document for docname in docnames: doctree = self.env.get_doctree(docname) self._register_doctree_title_targets(docname, doctree) doctree = self.assemble_doctree() self._prepare_doctree_writing(self.config.master_doc, doctree) self.assets.processDocument(doctree, self.config.master_doc) with progress_message(__('writing single confluence document')): self.write_doc_serialized(self.config.master_doc, doctree) self.write_doc(self.config.master_doc, doctree)
def depart_document(self, node): self.document = '' # prepend header (if any) if self.builder.config.confluence_header_file is not None: headerFile = path.join(self.builder.env.srcdir, self.builder.config.confluence_header_file) try: with io.open(headerFile, encoding='utf-8') as file: self.document += file.read() + self.nl except (IOError, OSError) as err: ConfluenceLogger.warn('error reading file ' '{}: {}'.format(headerFile, err)) self.document += ''.join(self.body) # append footer (if any) if self.builder.config.confluence_footer_file is not None: footerFile = path.join(self.builder.env.srcdir, self.builder.config.confluence_footer_file) try: with io.open(footerFile, encoding='utf-8') as file: self.document += file.read() + self.nl except (IOError, OSError) as err: ConfluenceLogger.warn('error reading file ' '{}: {}'.format(footerFile, err))
def process_file_node(self, node, docname, standalone=False): """ process an file node This method will process an file node for asset tracking. Asset information is tracked in this manager and other helper methods can be used to pull asset information when needed. Args: node: the file node docname: the document's name standalone (optional): ignore hash mappings (defaults to False) Returns: the key, document name and path """ target = node['reftarget'] if target.find('://') == -1: logger.verbose('process file node: %s' % target) path = self._interpret_asset_path(node) if path: return self._handle_entry(path, docname, standalone) return None, None, None
def process_image_node(self, node, docname, standalone=False): """ process an image node This method will process an image node for asset tracking. Asset information is tracked in this manager and other helper methods can be used to pull asset information when needed. Args: node: the image node docname: the document's name standalone (optional): ignore hash mappings (defaults to False) Returns: the key, document name and path """ uri = str(node['uri']) if not uri.startswith('data:') and uri.find('://') == -1: logger.verbose('process image node: %s' % uri) path = self._interpret_asset_path(node) if path: return self._handle_entry(path, docname, standalone) return None, None, None
def remove_page(self, page_id): if self.dryrun: self._dryrun('removing page', page_id) return elif self.onlynew: self._onlynew('page removal restricted', page_id) return try: try: self.rest_client.delete('content', page_id) except ConfluenceBadApiError as ex: if str(ex).find('Transaction rolled back') == -1: raise logger.warn('delete failed; retrying...') time.sleep(3) self.rest_client.delete('content', page_id) except ConfluenceBadApiError as ex: # Check if Confluence reports that this content does not exist. If # so, we want to suppress the API error. This is most likely a # result of a Confluence instance reporting a page descendant # identifier which no longer exists (possibly a caching issue). if str(ex).find('No content found with id') == -1: raise logger.verbose('ignore missing delete for page ' 'identifier: {}'.format(page_id)) except ConfluencePermissionError: raise ConfluencePermissionError( """Publish user does not have permission to delete """ """from the configured space.""")
def build_main(args_parser): """ build mainline The mainline for the 'build' action. Args: args_parser: the argument parser to use for argument processing Returns: the exit code """ args_parser.add_argument('-D', action='append', default=[], dest='define') args_parser.add_argument('--output-dir', '-o') known_args = sys.argv[1:] args, unknown_args = args_parser.parse_known_args(known_args) if unknown_args: logger.warn('unknown arguments: {}'.format(' '.join(unknown_args))) defines = {} for val in args.define: try: key, val = val.split('=', 1) defines[key] = val except ValueError: logger.error('invalid define provided in command line') return 1 work_dir = args.work_dir if args.work_dir else os.getcwd() if args.output_dir: output_dir = args.output_dir else: output_dir = os.path.join(work_dir, '_build', 'confluence') doctrees_dir = os.path.join(output_dir, '.doctrees') builder = args.action if args.action else DEFAULT_BUILDER verbosity = 0 if args.verbose: try: verbosity = int(args.verbose) except ValueError: pass # run sphinx engine with docutils_namespace(): app = Sphinx( work_dir, # document sources work_dir, # directory with configuration output_dir, # output for generated documents doctrees_dir, # output for doctree files builder, # builder to execute confoverrides=defines, # configuration overload freshenv=True, # fresh environment verbosity=verbosity) # verbosity app.build(force_all=True) return 0
def finish(self): # restore environment's get_doctree if it was temporarily replaced if self._original_get_doctree: self.env.get_doctree = self._original_get_doctree if self.publish: self.legacy_assets = {} self.legacy_pages = None self.parent_id = self.publisher.getBasePageId() for docname in status_iterator(self.publish_docnames, 'publishing documents... ', length=len(self.publish_docnames), verbosity=self.app.verbosity): if self._check_publish_skip(docname): self.verbose(docname + ' skipped due to configuration') continue docfile = path.join(self.outdir, self.file_transform(docname)) try: with io.open(docfile, 'r', encoding='utf-8') as file: output = file.read() self.publish_doc(docname, output) except (IOError, OSError) as err: ConfluenceLogger.warn("error reading file %s: " "%s" % (docfile, err)) def to_asset_name(asset): return asset[0] assets = self.assets.build() for asset in status_iterator(assets, 'publishing assets... ', length=len(assets), verbosity=self.app.verbosity, stringify_func=to_asset_name): key, absfile, type, hash, docname = asset if self._check_publish_skip(docname): self.verbose(key + ' skipped due to configuration') continue try: with open(absfile, 'rb') as file: output = file.read() self.publish_asset(key, docname, output, type, hash) except (IOError, OSError) as err: ConfluenceLogger.warn("error reading asset %s: " "%s" % (key, err)) self.publish_purge() self.publish_finalize() self.info('building intersphinx... ', nonl=True) build_intersphinx(self) self.info('done\n')
def _replace_inheritance_diagram(self, doctree): """ replace inheritance diagrams with images Inheritance diagrams are pre-processed and replaced with respective images in the processed documentation set. Typically, the node support from `sphinx.ext.inheritance_diagram` would be added to the builder; however, this extension renders graphs during the translation phase (which is not ideal for how assets are managed in this extension). Instead, this implementation just traverses for inheritance diagrams, generates renderings and replaces the nodes with image nodes (which in turn will be handled by the existing image-based implementation). Note that the interactive image map is not handled in this implementation since Confluence does not support image maps (without external extensions). Args: doctree: the doctree to replace blocks on """ if inheritance_diagram is None: return # graphviz's render_dot call expects a translator to be passed in; mock # a translator tied to our self-builder class MockTranslator: def __init__(self, builder): self.builder = builder mock_translator = MockTranslator(self) for node in doctree.traverse(inheritance_diagram.inheritance_diagram): graph = node['graph'] graph_hash = inheritance_diagram.get_graph_hash(node) name = 'inheritance%s' % graph_hash dotcode = graph.generate_dot(name, {}, env=self.env) try: _, out_filename = render_dot(mock_translator, dotcode, {}, self.graphviz_output_format, 'inheritance') if not out_filename: node.parent.remove(node) continue new_node = nodes.image(candidates={'?'}, uri=out_filename) if 'align' in node: new_node['align'] = node['align'] node.replace_self(new_node) except GraphvizError as exc: ConfluenceLogger.warn('dot code {}: {}'.format(dotcode, exc)) node.parent.remove(node)
def replace_graphviz_nodes(builder, doctree): """ replace graphviz nodes with images graphviz nodes are pre-processed and replaced with respective images in the processed documentation set. Typically, the node support from `sphinx.ext.graphviz` would be added to the builder; however, this extension renders graphs during the translation phase (which is not ideal for how assets are managed in this extension). Instead, this implementation just traverses for graphviz nodes, generates renderings and replaces the nodes with image nodes (which in turn will be handled by the existing image-based implementation). Args: builder: the builder doctree: the doctree to replace blocks on """ # allow users to disabled implemented extension changes restricted = builder.config.confluence_adv_restricted if 'ext-graphviz' in restricted: return if graphviz is None: return # graphviz's render_dot call expects a translator to be passed in; mock a # translator tied to our builder class MockTranslator: def __init__(self, builder): self.builder = builder mock_translator = MockTranslator(builder) for node in doctree.traverse(graphviz): try: _, out_filename = render_dot(mock_translator, node['code'], node['options'], builder.graphviz_output_format, 'graphviz') if not out_filename: node.parent.remove(node) continue new_node = nodes.image(candidates={'?'}, uri=out_filename) if 'align' in node: new_node['align'] = node['align'] node.replace_self(new_node) except GraphvizError as exc: ConfluenceLogger.warn('dot code {}: {}'.format(node['code'], exc)) node.parent.remove(node)
def write_doc(self, docname, doctree): if docname in self.omitted_docnames: return if self.prev_next_loc in ('top', 'both'): navnode = self._build_navigation_node(docname) if navnode: navnode.top = True doctree.insert(0, navnode) if self.prev_next_loc in ('bottom', 'both'): navnode = self._build_navigation_node(docname) if navnode: navnode.bottom = True doctree.append(navnode) self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.fignumbers = self.env.toc_fignumbers.get(docname, {}) # remove title from page contents (if any) if self.config.confluence_remove_title: title_element = self._find_title_element(doctree) if title_element: # If the removed title is referenced to from within the same # document (i.e. a local table of contents entry), flag any # references pointing to it as a "top" (anchor) reference. This # can be used later in a translator to hint at what type of link # to build. if 'refid' in title_element: for node in doctree.traverse(nodes.reference): if 'ids' in node and node['ids']: for id in node['ids']: if id == title_element['refid']: node['top-reference'] = True break title_element.parent.remove(title_element) # This method is taken from TextBuilder.write_doc() # with minor changes to support :confval:`rst_file_transform`. destination = StringOutput(encoding='utf-8') self.writer.write(doctree, destination) outfilename = path.join(self.outdir, self.file_transform(docname)) if self.writer.output: ensuredir(path.dirname(outfilename)) try: with io.open(outfilename, 'w', encoding='utf-8') as file: file.write(self.writer.output) except (IOError, OSError) as err: ConfluenceLogger.warn("error writing file " "%s: %s" % (outfilename, err))
def register_toctree_depth(docname, depth): """ register the toctree-depth for the provided document name Documents using toctree's will only use the first toctree's 'maxdepth' option [1]. This method provides the ability to track the depth of a document before toctree resolution removes any hints at the maximum depth desired. [1]: http://www.sphinx-doc.org/en/stable/markup/toctree.html#id3 """ ConfluenceState.doc2ttd[docname] = depth logger.verbose('track %s toc-depth: %s' % (docname, depth))
def register_target(refid, target): """ register a reference to a specific (anchor) target When interpreting a reference in reStructuredText, the reference could point to an anchor in the same document, another document or an anchor in another document. In Confluence, the target name is typically dependent on the document's title name (auto-generated targets provided by Confluence; ex. title#header). This register method allows a builder to track the target value to use for a provided reference (so that a writer can properly prepare a link; see also `target`). """ ConfluenceState.refid2target[refid] = target logger.verbose('mapping %s to target: %s' % (refid, target))
def _replace_math_blocks(self, doctree): """ replace math blocks with images Math blocks are pre-processed and replaced with respective images in the list of documents to process. This is to help prepare additional images into the asset management for this extension. Math support will work on systems which have latex/dvipng installed. Args: doctree: the doctree to replace blocks on """ if imgmath is None: return # imgmath's render_math call expects a translator to be passed # in; mock a translator tied to our self-builder class MockTranslator: def __init__(self, builder): self.builder = builder mock_translator = MockTranslator(self) for node in itertools.chain(doctree.traverse(nodes.math), doctree.traverse(nodes.math_block)): try: if not isinstance(node, nodes.math): if node['nowrap']: latex = node.astext() else: latex = wrap_displaymath(node.astext(), None, False) else: latex = '$' + node.astext() + '$' mf, depth = imgmath.render_math(mock_translator, latex) if not mf: continue new_node = nodes.image(candidates={'?'}, uri=path.join(self.outdir, mf), **node.attributes) new_node['from_math'] = True if not isinstance(node, nodes.math): new_node['align'] = 'center' if depth is not None: new_node['math_depth'] = depth node.replace_self(new_node) except imgmath.MathExtError as exc: ConfluenceLogger.warn('inline latex {}: {}'.format( node.astext(), exc))
def register_upload_id(docname, id_): """ register a page (upload) identifier for a docname When a publisher creates/updates a page on a Confluence instance, the resulting page will have an identifier for it. This state utility class can help track the Confluence page's identifier by invoking this registration method. This method is primarily used to help track/order published documents into a hierarchical fashion (see `registerParentDocname`). It is important to note that the order of published documents will determine if a page's upload identifier is tracked in this state (see also `uploadId`). """ ConfluenceState.doc2uploadId[docname] = id_ logger.verbose("tracking docname %s's upload id: %s" % (docname, id_))
def replace_sphinxcontrib_mermaid_nodes(builder, doctree): """ replace mermaid nodes with images mermaid nodes are pre-processed and replaced with respective images in the processed documentation set. Args: builder: the builder doctree: the doctree to replace blocks on """ # allow users to disabled third-party implemented extension changes restricted = builder.config.confluence_adv_restricted if 'ext-sphinxcontrib.mermaid' in restricted: return if not sphinxcontrib_mermaid: return # mermaid's mermaid_render call expects a translator to be passed in; mock a # translator tied to our builder class MockTranslator: def __init__(self, builder): self.builder = builder mock_translator = MockTranslator(builder) for node in doctree.traverse(mermaid): try: format_ = builder.config.mermaid_output_format if format_ == 'raw': format_ = 'png' fname, _ = mermaid_render(mock_translator, node['code'], node['options'], format_, 'mermaid') if not fname: node.parent.remove(node) continue new_node = nodes.image(candidates={'?'}, uri=fname) if 'align' in node: new_node['align'] = node['align'] node.replace_self(new_node) except MermaidError as exc: ConfluenceLogger.warn('mermaid code %r: ' % node['code'] + str(exc)) node.parent.remove(node)
def replace_sphinx_diagrams_nodes(builder, doctree): """ replace sphinx-diagrams nodes with images sphinx-diagrams nodes are pre-processed and replaced with respective images in the processed documentation set. Args: builder: the builder doctree: the doctree to replace blocks on """ # allow users to disabled third-party implemented extension changes restricted = builder.config.confluence_adv_restricted if 'ext-sphinx_diagrams' in restricted: return if not sphinx_diagrams: return # sphinx-diagrams's render call expects a translator to be passed in; # mock a translator tied to our builder class MockTranslator: def __init__(self, builder): self.builder = builder mock_translator = MockTranslator(builder) for node in doctree.traverse(sphinx_diagrams_diagrams): try: fname, _ = sphinx_diagrams_render(mock_translator, node['code'], node['options'], 'diagrams') if not fname: node.parent.remove(node) continue new_node = nodes.image(candidates={'?'}, uri=fname) if 'align' in node: new_node['align'] = node['align'] new_container = nodes.paragraph() new_container.append(new_node) node.replace_self(new_container) except DiagramsError as exc: ConfluenceLogger.warn('diagrams code %r: ' % node['code'] + str(exc)) node.parent.remove(node)
def register_parent_docname(docname, parent_docname): """ register a parent docname for a provided docname When using Sphinx's toctree, documents defined in the tree can be considered child pages (see the configuration option 'confluence_page_hierarchy'). This method helps track a parent document for a provided child document. With the ability to track a parent document and track publish upload identifiers (see `registerUploadId`), the publish operation can help ensure pages are structured in a hierarchical fashion (see also `parentDocname`). [1]: http://www.sphinx-doc.org/en/stable/markup/toctree.html#directive-toctree """ ConfluenceState.doc2parentDoc[docname] = parent_docname logger.verbose('setting parent of %s to: %s' % (docname, parent_docname))
def main(): parser = argparse.ArgumentParser( prog='sphinx-build-confluence', add_help=False, description='Sphinx extension to output Atlassian Confluence content.', ) parser.add_argument('action', nargs='?') parser.add_argument('--color', default='auto', action='store_const', const='yes') parser.add_argument('--help', '-h', action='store_true') parser.add_argument('--no-color', '-N', dest='color', action='store_const', const='no') parser.add_argument('--verbose', '-V', action='count', default=0) parser.add_argument('--version', action='version', version='%(prog)s ' + version) parser.add_argument('--work-dir') args, _ = parser.parse_known_args() if args.help: print(usage()) sys.exit(0) if args.color == 'no' or (args.color == 'auto' and 'MSYSTEM' not in os.environ and not color_terminal()): nocolor() # pre-load logging support if sphinx is not loaded (to prevent blank lines) logger.initialize(preload=True) # invoke a desired command mainline if args.action == 'report': rv = report_main(parser) elif args.action == 'wipe': rv = wipe_main(parser) else: rv = build_main(parser) return rv
def write(self, build_docnames, updated_docnames, method='update'): docnames = self.env.all_docs if self.config.root_doc not in docnames: logger.error('singleconfluence requires root_doc') return root_doctitle = self._process_root_document() if not root_doctitle: logger.error('singleconfluence requires title on root_doc') return with progress_message(C('assembling single confluence document')): # assemble toc section/figure numbers # # Both the environment's `toc_secnumbers` and `toc_fignumbers` # are populated; however, they do not contain a complete list of # each document's section/figure numbers. The assembling process # will create a dictionary keys of '<docname>/<id>' which the writer # implementations can used to build desired references when invoked # with a `singleconfluence` builder. Unlike Sphinx's `singlehtml` # builder, this builder will update the existing number dictionaries # to hold the original mappings (for other post-transforms, # extensions, etc.) and the newer mappings for reference building. assembled_toc_secnumbers = self.assemble_toc_secnumbers() assembled_toc_fignumbers = self.assemble_toc_fignumbers() self.env.toc_secnumbers.setdefault( self.config.root_doc, {}).update(assembled_toc_secnumbers[self.config.root_doc]) self.env.toc_fignumbers.setdefault( self.config.root_doc, {}).update(assembled_toc_fignumbers[self.config.root_doc]) # register title targets for references before assembling doc # re-works them into a single document for docname in docnames: doctree = self.env.get_doctree(docname) self._register_doctree_title_targets(docname, doctree) doctree = self.assemble_doctree() self._prepare_doctree_writing(self.config.root_doc, doctree) self.assets.process_document(doctree, self.config.root_doc) with progress_message(C('writing single confluence document')): self.write_doc_serialized(self.config.root_doc, doctree) self.write_doc(self.config.root_doc, doctree)
def _onlynew(self, msg, id=None, misc=''): """ log an only-new mode message Accepts a message to be printed out when running in "only-new" mode. A message may be accompanied by an identifier which should be translated to a name (if possible). Args: msg: the message id (optional): identifier (name mapping) associated with the message """ s = '[only-new] ' s += msg if id and id in self._name_cache: s += ' ' + self._name_cache[id] if id: s += ' ({})'.format(id) ConfluenceLogger.info(s + min(80, 80 - len(s)) * ' ') # 80c-min clearing
def unknown_visit(self, node): node_name = node.__class__.__name__ ignore_nodes = self.builder.config.confluence_adv_ignore_nodes if node_name in ignore_nodes: ConfluenceLogger.verbose('ignore node {} (conf)'.format(node_name)) raise nodes.SkipNode # allow users to override unknown nodes # # A node handler allows an advanced user to provide implementation to # process a node not supported by this extension. This is to assist in # providing a quick alternative to supporting another third party # extension in this translator (without having to take the time in # building a third extension). handler = self.builder.config.confluence_adv_node_handler if handler and isinstance(handler, dict) and node_name in handler: handler[node_name](self, node) raise nodes.SkipNode raise NotImplementedError('unknown node: ' + node_name)
def deprecated(validator): """ inform users of deprecated configurations This call will check if the provided configuration has any configurations which have been flagged as deprecated. If a deprecated configuration is detected, a warning message will be provided to the user. Args: validator: the configuration validator """ config = validator.config # inform users of a deprecated configuration being used for key, msg in DEPRECATED_CONFIGS.items(): if config[key] is not None: logger.warn('%s deprecated; %s' % (key, msg)) # promote singleconfluence over confluence_max_doc_depth=0 if config.confluence_max_doc_depth == 0: logger.warn('confluence_max_doc_depth with a value of zero ' "is deprecated; use the 'singleconfluence' builder instead") elif config.confluence_max_doc_depth: logger.warn('confluence_max_doc_depth is deprecated and will ' "be removed; consider using the 'singleconfluence' builder instead")
def register_title(docname, title, config): """ register the title for the provided document name In Confluence, a page is identified by the name/title of a page (at least, from the user's perspective). When processing a series of document names, the title value used for a document is based off the first heading detected. This register method allows a builder to track a document's title name name, so it may provide a document's contents and target title when passed to the publish operation. If a prefix (or postfix) value is provided, it will be added to the beginning (or at the end) of the provided title value. """ try_max = CONFLUENCE_MAX_TITLE_LEN base_tail = '' postfix = None prefix = None if config and (not config.confluence_ignore_titlefix_on_index or docname != config.root_doc): postfix = config.confluence_publish_postfix prefix = config.confluence_publish_prefix if prefix: title = prefix + title if postfix: base_tail += postfix if len(title) + len(base_tail) > try_max: warning = 'document title has been trimmed due to length: %s' % title if len(base_tail) > 0: warning += '; With postfix: %s' % base_tail logger.warn(warning) title = title[0:try_max - len(base_tail)] base_title = title title += base_tail # check if title is already used; if so, append a new value offset = 2 while title.lower() in ConfluenceState.title2doc: if offset == 2: logger.warn('title conflict detected with ' "'{}' and '{}'".format( ConfluenceState.title2doc[title.lower()], docname)) tail = ' ({}){}'.format(offset, base_tail) if len(base_title) + len(tail) > try_max: base_title = base_title[0:(try_max - len(tail))] title = base_title + tail offset += 1 ConfluenceState.doc2title[docname] = title ConfluenceState.title2doc[title.lower()] = docname logger.verbose('mapping %s to title: %s' % (docname, title)) return title
def warnings(validator): """ inform users of any warnings related to a configuration state This call will check if the provided configuration has any configurations which may be a concern to a user. If a concern is observed in the configuration, a warning message will be provided to the user. Args: validator: the configuration validator """ config = validator.config # check if any user defined mime types are unknown if config.confluence_additional_mime_types is not None: for mime_type in config.confluence_additional_mime_types: if not mimetypes.guess_extension(mime_type): ConfluenceLogger.warn('confluence_additional_mime_types ' 'defines an unknown mime type: ' + mime_type) # warn when ssl validation is disabled if config.confluence_disable_ssl_validation: ConfluenceLogger.warn('confluence_disable_ssl_validation is set; ' 'consider using confluence_ca_cert instead') # confluence_file_suffix "cannot" end with a dot if (config.confluence_file_suffix and config.confluence_file_suffix.endswith('.')): ConfluenceLogger.warn('confluence_file_suffix ends with a period; ' 'a default value will be applied instead')
def _dryrun(self, msg, id_=None, misc=''): """ log a dry run mode message Accepts a message to be printed out when running in "dry run" mode. A message may be accompanied by an identifier which should be translated to a name (if possible). Args: msg: the message id_ (optional): identifier (name mapping) associated with the message misc (optional): additional information to append """ s = '[dryrun] ' s += msg if id_ and id_ in self._name_cache: s += ' ' + self._name_cache[id_] if id_: s += ' ({})'.format(id_) if misc: s += ' ' + misc logger.info(s + min(80, 80 - len(s)) * ' ') # 80c-min clearing
def publish_asset(self, key, docname, output, type, hash): conf = self.config publisher = self.publisher title = ConfluenceState.title(docname) page_id = ConfluenceState.uploadId(docname) if not page_id: # A page identifier may not be tracked in cases where only a subset # of documents are published and the target page an asset will be # published to was not part of the request. In this case, ask the # Confluence instance what the target page's identifier is. page_id, _ = publisher.getPage(title) if page_id: ConfluenceState.registerUploadId(docname, page_id) else: ConfluenceLogger.warn('cannot publish asset since publishing ' 'point cannot be found ({}): {}'.format( key, docname)) return if conf.confluence_asset_override is None: # "automatic" management -- check if already published; if not, push attachment_id = publisher.storeAttachment(page_id, key, output, type, hash) elif conf.confluence_asset_override: # forced publishing of the asset attachment_id = publisher.storeAttachment(page_id, key, output, type, hash, force=True) if attachment_id and conf.confluence_purge: if page_id in self.legacy_assets: legacy_asset_info = self.legacy_assets[page_id] if attachment_id in legacy_asset_info: legacy_asset_info.pop(attachment_id, None)
def _handle_common_request(self, rsp): # if confluence or a proxy reports a retry-after delay (to pace us), # track it to delay the next request made # (https://datatracker.ietf.org/doc/html/rfc2616.html#section-14.37) raw_delay = rsp.headers.get(RSP_HEADER_RETRY_AFTER) if raw_delay: delay = None try: # attempt to parse a seconds value from the header delay = int(raw_delay) except ValueError: # if seconds are not provided, attempt to parse parsed_dtz = parsedate_tz(raw_delay) if parsed_dtz: target_datetime = mktime_tz(parsed_dtz) delay = target_datetime - time.time() if delay > 0: self.next_delay = delay # if this delay is over a minute, provide a notice to a client # that requests are being delayed -- but we'll only notify a # user once if delay >= 60 and not self._reported_large_delay: logger.warn('(warning) site has reported a long ' 'rate-limit delay ({} seconds)'.format( math.ceil(delay))) self._reported_large_delay = True if rsp.status_code == 401: raise ConfluenceAuthenticationFailedUrlError if rsp.status_code == 403: raise ConfluencePermissionError('rest-call') if rsp.status_code == 407: raise ConfluenceProxyPermissionError if rsp.status_code == 429: raise ConfluenceRateLimited
def main(): parser = argparse.ArgumentParser( prog='sphinx-build-confluence', add_help=False, description='Sphinx extension to output Atlassian Confluence content.') parser.add_argument('action', nargs='?') parser.add_argument('--help', '-h', action='store_true') parser.add_argument('--no-color', '-N', dest='color') parser.add_argument('--verbose', '-V', action='count', default=0) parser.add_argument('--version', action='version', version='%(prog)s ' + version) parser.add_argument('--work-dir') args, _ = parser.parse_known_args() if args.help: print(usage()) sys.exit(0) logger.initialize(preload=True) if args.color == 'no' or (args.color == 'auto' and not color_terminal()): nocolor() # disable color (on windows) by default when using virtualenv since it # appears to be causing issues elif getattr(sys, 'base_prefix', sys.prefix) != sys.prefix: if sys.platform == 'win32': nocolor() # invoke a desired command mainline if args.action == 'report': rv = report_main(parser) elif args.action == 'wipe': rv = wipe_main(parser) else: rv = build_main(parser) return rv