Example #1
0
def fixup_keywords(app, exception):
    # only works for .chm output
    if getattr(app.builder, 'name', '') != 'htmlhelp' or exception:
        return

    getLogger(__name__).info('fixing HTML escapes in keywords file...')
    outdir = app.builder.outdir
    outname = app.builder.config.htmlhelp_basename
    with app.builder.open_file(outdir, outname + '.hhk', 'r') as f:
        index = f.read()
    with app.builder.open_file(outdir, outname + '.hhk', 'w') as f:
        f.write(index.replace(''', '''))
Example #2
0
File: coqdomain.py Project: coq/coq
 def _warn_if_undocumented(self):
     document = self.state.document
     config = document.settings.env.config
     report = config.report_undocumented_coq_objects
     if report and not self.content and "undocumented" not in self.options:
         # This is annoyingly convoluted, but we don't want to raise warnings
         # or interrupt the generation of the current node.  For more details
         # see https://github.com/sphinx-doc/sphinx/issues/4976.
         msg = 'No contents in directive {}'.format(self.name)
         node = document.reporter.info(msg, line=self.lineno)
         getLogger(__name__).info(node.astext())
         if report == "warning":
             raise self.warning(msg)
Example #3
0
    def test_docassert_html_method(self):
        fLOG(
            __file__,
            self._testMethodName,
            OutputPrint=__name__ == "__main__")

        class MyStream:
            def __init__(self):
                self.rows = []

            def write(self, text):
                fLOG(
                    "[warning*] {0} - '{1}'".format(len(self), text.strip("\n\r ")))
                self.rows.append(text)

            def getvalue(self):
                return "\n".join(self.rows)

            def __len__(self):
                return len(self.rows)

        logger1 = getLogger("MockSphinxApp")
        logger2 = getLogger("docassert")
        log_capture_string = MyStream()  # StringIO()
        ch = logging.StreamHandler(log_capture_string)
        ch.setLevel(logging.DEBUG)
        logger1.logger.addHandler(ch)
        logger2.logger.addHandler(ch)
        logger2.warning("try")

        this = os.path.abspath(os.path.dirname(__file__))
        data = os.path.join(this, "datadoc")
        with sys_path_append(data):
            obj, name = import_object("exsig.clex.onemethod", "method")
            newstring = ".. automethod:: exsig.clex.onemethod"
            html = rst2html(newstring)
            self.assertTrue(html is not None)
        fLOG(len(log_capture_string))

        lines = log_capture_string.getvalue().split("\n")
        if len(lines) == 0:
            raise Exception("no warning")
        nb = 0
        for line in lines:
            if "'onemethod' has no parameter 'c'" in line:
                nb += 1
        if nb == 0:
            raise Exception("not the right warning")
        for line in lines:
            if "'onemethod' has undocumented parameters 'b, self'" in line:
                raise Exception(line)
Example #4
0
def test_info_location(app, status, warning):
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    logger.info('message1', location='index')
    assert 'index.txt: message1' in status.getvalue()

    logger.info('message2', location=('index', 10))
    assert 'index.txt:10: message2' in status.getvalue()

    logger.info('message3', location=None)
    assert '\nmessage3' in status.getvalue()

    node = nodes.Node()
    node.source, node.line = ('index.txt', 10)
    logger.info('message4', location=node)
    assert 'index.txt:10: message4' in status.getvalue()

    node.source, node.line = ('index.txt', None)
    logger.info('message5', location=node)
    assert 'index.txt:: message5' in status.getvalue()

    node.source, node.line = (None, 10)
    logger.info('message6', location=node)
    assert '<unknown>:10: message6' in status.getvalue()

    node.source, node.line = (None, None)
    logger.info('message7', location=node)
    assert '\nmessage7' in status.getvalue()
Example #5
0
def depart_fitb_node(self, node):
    # If there were fewer blanks than feedback items, add blanks at the end of the question.
    blankCount = 0
    for _ in node.traverse(BlankNode):
        blankCount += 1
    while blankCount < len(node.feedbackArray):
        visit_blank_node(self, None)
        blankCount += 1

    # Warn if there are fewer feedback items than blanks.
    if len(node.feedbackArray) < blankCount:
        # Taken from the example in the `logging API <http://www.sphinx-doc.org/en/stable/extdev/logging.html#logging-api>`_.
        logger = logging.getLogger(__name__)
        logger.warning('Not enough feedback for the number of blanks supplied.', location=node)

    # Generate the HTML.
    node.fitb_options['json'] = json.dumps(node.feedbackArray)
    res = node.template_end % node.fitb_options
    self.body.append(res)

    # add HTML to the Database and clean up
    addHTMLToDB(node.fitb_options['divid'],
                node.fitb_options['basecourse'],
                "".join(self.body[self.body.index(node.delimiter) + 1:]))

    self.body.remove(node.delimiter)
Example #6
0
def copy_assets(app, exception):
    """ Copy asset files to the output """
    if 'getLogger' in dir(logging):
        log = logging.getLogger(__name__).info  # pylint: disable=no-member
    else:
        log = app.info
    builders = get_compatible_builders(app)
    if exception:
        return
    if app.builder.name not in builders:
        if not app.config['sphinx_tabs_nowarn']:
            app.warn(
                'Not copying tabs assets! Not compatible with %s builder' %
                app.builder.name)
        return

    log('Copying tabs assets')

    installdir = os.path.join(app.builder.outdir, '_static', 'sphinx_tabs')

    for path in FILES:
        source = resource_filename('sphinx_tabs', path)
        dest = os.path.join(installdir, path)

        destdir = os.path.dirname(dest)
        if not os.path.exists(destdir):
            os.makedirs(destdir)

        copyfile(source, dest)
Example #7
0
File: conf.py Project: gwpy/gwpy
def build_cli_examples(_):
    logger = logging.getLogger('cli-examples')

    clidir = os.path.join(SPHINX_DIR, 'cli')
    exini = os.path.join(clidir, 'examples.ini')
    exdir = os.path.join(clidir, 'examples')
    if not os.path.isdir(exdir):
        os.makedirs(exdir)

    config = ConfigParser()
    config.read(exini)

    rsts = []
    for sect in config.sections():
        rst, cmd = _build_cli_example(config, sect, exdir, logger)
        if cmd:
            logger.info('[cli] running example {0!r}'.format(sect))
            logger.debug('[cli] $ {0}'.format(cmd))
            subprocess.check_call(cmd, shell=True)
            logger.debug('[cli] wrote {0}'.format(cmd.split()[-1]))
        rsts.append(rst)

    with open(os.path.join(exdir, 'examples.rst'), 'w') as f:
        f.write('.. toctree::\n   :glob:\n\n')
        for rst in rsts:
            f.write('   {0}\n'.format(rst[len(SPHINX_DIR):]))
Example #8
0
File: conf.py Project: gwpy/gwpy
def build_examples(_):
    logger = logging.getLogger('examples')
    logger.info('[examples] converting examples to RST...')

    srcdir = os.path.join(SPHINX_DIR, os.pardir, 'examples')
    outdir = os.path.join(SPHINX_DIR, 'examples')
    ex2rst = os.path.join(SPHINX_DIR, 'ex2rst.py')

    if not os.path.isdir(outdir):
        os.makedirs(outdir)
        logger.debug('[examples] created {0}'.format(outdir))

    for exdir in next(os.walk(srcdir))[1]:
        subdir = os.path.join(outdir, exdir)
        if not os.path.isdir(subdir):
            os.makedirs(subdir)
        # copy index
        index = os.path.join(subdir, 'index.rst')
        shutil.copyfile(os.path.join(srcdir, exdir, 'index.rst'), index)
        logger.debug('[examples] copied {0}'.format(index))
        # render python script as RST
        for expy in glob.glob(os.path.join(srcdir, exdir, '*.py')):
            target = os.path.join(
                subdir, os.path.basename(expy).replace('.py', '.rst'))
            subprocess.Popen([sys.executable, ex2rst, expy, target])
            logger.debug('[examples] wrote {0}'.format(target))
        logger.info('[examples] converted all in examples/{0}'.format(exdir))
Example #9
0
def test_suppress_warnings(app, status, warning):
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    app._warncount = 0  # force reset

    app.config.suppress_warnings = []
    warning.truncate(0)
    logger.warning('message1', type='test', subtype='logging')
    logger.warning('message2', type='test', subtype='crash')
    logger.warning('message3', type='actual', subtype='logging')
    assert 'message1' in warning.getvalue()
    assert 'message2' in warning.getvalue()
    assert 'message3' in warning.getvalue()
    assert app._warncount == 3

    app.config.suppress_warnings = ['test']
    warning.truncate(0)
    logger.warning('message1', type='test', subtype='logging')
    logger.warning('message2', type='test', subtype='crash')
    logger.warning('message3', type='actual', subtype='logging')
    assert 'message1' not in warning.getvalue()
    assert 'message2' not in warning.getvalue()
    assert 'message3' in warning.getvalue()
    assert app._warncount == 4

    app.config.suppress_warnings = ['test.logging']
    warning.truncate(0)
    logger.warning('message1', type='test', subtype='logging')
    logger.warning('message2', type='test', subtype='crash')
    logger.warning('message3', type='actual', subtype='logging')
    assert 'message1' not in warning.getvalue()
    assert 'message2' in warning.getvalue()
    assert 'message3' in warning.getvalue()
    assert app._warncount == 6
Example #10
0
def test_warning_location(app, status, warning):
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    logger.warning('message1', location='index')
    assert 'index.txt: WARNING: message1' in warning.getvalue()

    logger.warning('message2', location=('index', 10))
    assert 'index.txt:10: WARNING: message2' in warning.getvalue()

    logger.warning('message3', location=None)
    assert colorize('darkred', 'WARNING: message3') in warning.getvalue()

    node = nodes.Node()
    node.source, node.line = ('index.txt', 10)
    logger.warning('message4', location=node)
    assert 'index.txt:10: WARNING: message4' in warning.getvalue()

    node.source, node.line = ('index.txt', None)
    logger.warning('message5', location=node)
    assert 'index.txt:: WARNING: message5' in warning.getvalue()

    node.source, node.line = (None, 10)
    logger.warning('message6', location=node)
    assert '<unknown>:10: WARNING: message6' in warning.getvalue()

    node.source, node.line = (None, None)
    logger.warning('message7', location=node)
    assert colorize('darkred', 'WARNING: message7') in warning.getvalue()
Example #11
0
def test_colored_logs(app, status, warning):
    app.verbosity = 2
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    # default colors
    logger.debug('message1')
    logger.verbose('message2')
    logger.info('message3')
    logger.warning('message4')
    logger.critical('message5')
    logger.error('message6')

    assert colorize('darkgray', 'message1') in status.getvalue()
    assert 'message2\n' in status.getvalue()  # not colored
    assert 'message3\n' in status.getvalue()  # not colored
    assert colorize('darkred', 'WARNING: message4') in warning.getvalue()
    assert 'WARNING: message5\n' in warning.getvalue()  # not colored
    assert 'WARNING: message6\n' in warning.getvalue()  # not colored

    # color specification
    logger.debug('message7', color='white')
    logger.info('message8', color='red')
    assert colorize('white', 'message7') in status.getvalue()
    assert colorize('red', 'message8') in status.getvalue()
Example #12
0
def check_parameters(documenter, doc):
    """
    Check that all documented parameters match a formal parameter for the
    function. Documented params which don't match the actual function may be
    typos.
    """
    guessed = set(doc['guessed_params'] or [])
    if not guessed:
        return

    documented = {
        # param name can be of the form [foo.bar.baz=default]\ndescription
        jsdoc.ParamDoc(text).name.split('.')[0]
        for text in doc.get_as_list('param')
    }
    odd = documented - guessed
    if not odd:
        return

    # use sphinx logging API if available, otherwise fall back to warning
    # via the app object (deprecated in 1.6, removed in 2.0)
    # sphinx.util.logging exists in 1.5 but is basically useless
    if hasattr(sphinx_logging, 'getLogger'):
        logger = sphinx_logging.getLogger('autojsdoc').warning
    else:
        logger = documenter.directive.env.app.warn

    logger("Found documented params %s not in formal parameter list "
             "of function %s in module %s (%s)" % (
        ', '.join(odd),
        doc.name,
        documenter.modname,
        doc['sourcemodule']['sourcefile'],
    ))
Example #13
0
    def test_docassert_html(self):
        fLOG(
            __file__,
            self._testMethodName,
            OutputPrint=__name__ == "__main__")

        logger1 = getLogger("MockSphinxApp")
        logger2 = getLogger("docassert")

        log_capture_string = StringIO()
        ch = logging.StreamHandler(log_capture_string)
        ch.setLevel(logging.DEBUG)
        logger1.logger.addHandler(ch)
        logger2.logger.addHandler(ch)

        this = os.path.abspath(os.path.dirname(__file__))
        data = os.path.join(this, "datadoc")
        with sys_path_append(data):
            obj, name = import_object("exdocassert.onefunction", "function")
            docstring = obj.__doc__
            with warnings.catch_warnings(record=True) as ws:
                html = rst2html(docstring)
                if "if a and b have different" not in html:
                    raise Exception(html)

            newstring = ".. autofunction:: exdocassert.onefunction"
            with warnings.catch_warnings(record=True) as ws:
                html = rst2html(newstring)
                for i, w in enumerate(ws):
                    fLOG(i, ":", w)
                if "if a and b have different" not in html:
                    html = rst2html(newstring, fLOG=fLOG)
                    fLOG("number of warnings", len(ws))
                    for i, w in enumerate(ws):
                        fLOG(i, ":", str(w).replace("\\n", "\n"))
                    raise Exception(html)

            from docutils.parsers.rst.directives import _directives
            self.assertTrue("autofunction" in _directives)

        lines = log_capture_string.getvalue().split("\n")
        if len(lines) > 0:
            for line in lines:
                if "'onefunction' has no parameter 'TypeError'" in line:
                    raise Exception(
                        "This warning should not happen.\n{0}".format("\n".join(lines)))
        self.assertTrue("<strong>a</strong>" in html)
Example #14
0
def warning(context, message, *args, **kwargs):
    # type: (Dict, unicode, Any, Any) -> unicode
    if 'pagename' in context:
        filename = context.get('pagename') + context.get('file_suffix', '')
        message = 'in rendering %s: %s' % (filename, message)
    logger = logging.getLogger('sphinx.themes')
    logger.warning(message, *args, **kwargs)
    return ''  # return empty string not to output any values
    def initialize():
        """
        initialize the confluence logger

        Before using the Confluence logger utility class, it needs to be
        initialized. This method should be invoked once (ideally before any
        attempts made to log).
        """
        ConfluenceLogger.logger = logging.getLogger("confluence")
Example #16
0
def test_nonl_info_log(app, status, warning):
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    logger.info('message1', nonl=True)
    logger.info('message2')
    logger.info('message3')

    assert 'message1message2\nmessage3' in status.getvalue()
Example #17
0
File: conf.py Project: gwpy/gwpy
def write_citing_rst(_):
    logger = logging.getLogger('zenodo')
    here = os.path.dirname(__file__)
    with open(os.path.join(here, 'citing.rst.in'), 'r') as fobj:
        citing = fobj.read()
    citing += '\n' + zenodo.format_citations(597016)
    out = os.path.join(here, 'citing.rst')
    with open(out, 'w') as f:
        f.write(citing)
    logger.info('[zenodo] wrote {0}'.format(out))
Example #18
0
def test_verbosity_filter(app, status, warning):
    # verbosity = 0: INFO
    app.verbosity = 0
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    logger.info('message1')
    logger.verbose('message2')
    logger.debug('message3')

    assert 'message1' in status.getvalue()
    assert 'message2' not in status.getvalue()
    assert 'message3' not in status.getvalue()
    assert 'message4' not in status.getvalue()

    # verbosity = 1: VERBOSE
    app.verbosity = 1
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    logger.info('message1')
    logger.verbose('message2')
    logger.debug('message3')

    assert 'message1' in status.getvalue()
    assert 'message2' in status.getvalue()
    assert 'message3' not in status.getvalue()
    assert 'message4' not in status.getvalue()

    # verbosity = 2: DEBUG
    app.verbosity = 2
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    logger.info('message1')
    logger.verbose('message2')
    logger.debug('message3')

    assert 'message1' in status.getvalue()
    assert 'message2' in status.getvalue()
    assert 'message3' in status.getvalue()
    assert 'message4' not in status.getvalue()
Example #19
0
 def setup(app):
     msg = ('The sphinx_gallery extension is not installed, so the '
            'gallery will not be built.  You will probably see '
            'additional warnings about undefined references due '
            'to this.')
     try:
         app.warn(msg)
     except AttributeError:
         # Sphinx 1.6+
         from sphinx.util import logging
         logger = logging.getLogger(__name__)
         logger.warning(msg)
Example #20
0
def test_warningiserror(app, status, warning):
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    # if False, warning is not error
    app.warningiserror = False
    logger.warning('message')

    # if True, warning raises SphinxWarning exception
    app.warningiserror = True
    with pytest.raises(SphinxWarning):
        logger.warning('message')
Example #21
0
def test_logging_in_ParallelTasks(app, status, warning):
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    def child_process():
        logger.info('message1')
        logger.warning('message2', location='index')

    tasks = ParallelTasks(1)
    tasks.add_task(child_process)
    tasks.join()
    assert 'message1' in status.getvalue()
    assert 'index.txt: WARNING: message2' in warning.getvalue()
Example #22
0
def test_output_with_unencodable_char(app, status, warning):
    class StreamWriter(codecs.StreamWriter):
        def write(self, object):
            self.stream.write(object.encode('cp1252').decode('cp1252'))

    logging.setup(app, StreamWriter(status), warning)
    logger = logging.getLogger(__name__)

    # info with UnicodeEncodeError
    status.truncate(0)
    status.seek(0)
    logger.info(u"unicode \u206d...")
    assert status.getvalue() == "unicode ?...\n"
Example #23
0
def setup(app):
    """Install the plugin.

    :param app: Sphinx application context.
    """
    from sphinx.util import logging
    logger = logging.getLogger(__name__)
    logger.info('Initializing GitHub plugin')
    app.add_role('ghissue', ghissue_role)
    app.add_role('ghpull', ghissue_role)
    app.add_role('ghuser', ghuser_role)
    app.add_role('ghcommit', ghcommit_role)
    app.add_config_value('github_project_url', None, 'env')
    return {'parallel_read_safe': True}
Example #24
0
        def run(self):
            '''
            Called when parsing the document.
            '''
            env = self.state.document.settings.env

            for item in " ".join(self.arguments).split(","):
                item = item.strip()
                if item == "none":
                    continue

                if not item:
                    logging.getLogger(__name__).warn(
                        "%s: empty string in '%s' directive" %
                        (env.docname, name,))
                    continue

                if item not in env.filing[name]:
                    env.filing[name][item] = []
                env.filing[name][item].append(env.docname)
                env.blog_metadata[env.docname].filing[name].append(
                    (utils.name_from_title(item), item))

            return []
Example #25
0
    def process_link(self, env, refnode, has_explicit_title, title, target):

        logger = logging.getLogger(__name__)

        if has_explicit_title:
            return title, target

        directive = get_config_directive(title)
        #logger.debug('process_link({}, {})'.format(title, target))
        #logger.debug('process_link: ' + pformat(directive))

        if 'internaltarget' in directive:
            return directive['displayname'], directive['internaltarget']
        else:
            return directive['displayname'], target
Example #26
0
def test_pending_warnings(app, status, warning):
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    logger.warning('message1')
    with logging.pending_warnings():
        # not logged yet (bufferred) in here
        logger.warning('message2')
        logger.warning('message3')
        assert 'WARNING: message1' in warning.getvalue()
        assert 'WARNING: message2' not in warning.getvalue()
        assert 'WARNING: message3' not in warning.getvalue()

    # actually logged as ordered
    assert 'WARNING: message2\nWARNING: message3' in strip_escseq(warning.getvalue())
Example #27
0
def test_warningiserror(app, status, warning):
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    # if False, warning is not error
    app.warningiserror = False
    logger.warning('message')

    # if True, warning raises SphinxWarning exception
    app.warningiserror = True
    with pytest.raises(SphinxWarning):
        logger.warning('message: %s', 'arg')

    # message contains format string (refs: #4070)
    with pytest.raises(SphinxWarning):
        logger.warning('%s')
Example #28
0
def test_prefixed_warnings(app, status, warning):
    logging.setup(app, status, warning)
    logger = logging.getLogger(__name__)

    logger.warning('message1')
    with prefixed_warnings('PREFIX:'):
        logger.warning('message2')
        with prefixed_warnings('Another PREFIX:'):
            logger.warning('message3')
        logger.warning('message4')
    logger.warning('message5')

    assert 'WARNING: message1' in warning.getvalue()
    assert 'WARNING: PREFIX: message2' in warning.getvalue()
    assert 'WARNING: Another PREFIX: message3' in warning.getvalue()
    assert 'WARNING: PREFIX: message4' in warning.getvalue()
    assert 'WARNING: message5' in warning.getvalue()
Example #29
0
def setup(app):
    from sphinx.ext.autodoc import cut_lines
    from sphinx.util.docfields import GroupedField
    app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
    app.add_object_type('confval', 'confval',
                        objname='configuration value',
                        indextemplate='pair: %s; configuration value')
    fdesc = GroupedField('parameter', label='Parameters',
                         names=['param'], can_collapse=True)
    app.add_object_type('event', 'event', 'pair: %s; event', parse_event,
                        doc_field_types=[fdesc])

    # workaround for RTD
    from sphinx.util import logging
    logger = logging.getLogger(__name__)
    app.info = lambda *args, **kwargs: logger.info(*args, **kwargs)
    app.warn = lambda *args, **kwargs: logger.warning(*args, **kwargs)
    app.debug = lambda *args, **kwargs: logger.debug(*args, **kwargs)
    def test_youtube_size_warning(self):
        fLOG(
            __file__,
            self._testMethodName,
            OutputPrint=__name__ == "__main__")

        from docutils import nodes as skip_

        content = """
                    test a directive
                    ================

                    before

                    .. youtube:: https://www.youhtube.comgSchPGmtikI
                        :width: 300

                    this code shoud appear___

                    after
                    """.replace("                    ", "")
        if sys.version_info[0] >= 3:
            content = content.replace('u"', '"')

        tives = [("youtube", YoutubeDirective, youtube_node,
                  visit_youtube_node, depart_youtube_node)]

        logger = getLogger("youtube")
        log_capture_string = StringIO()
        ch = logging.StreamHandler(log_capture_string)
        ch.setLevel(logging.DEBUG)
        logger.logger.addHandler(ch)

        cst = rst2html(content, writer="custom",
                       keep_warnings=True, directives=tives)
        lines = log_capture_string.getvalue()
        t1 = "[youtube] unable to extract video id from"
        if t1 not in lines:
            raise Exception(lines)
Example #31
0
def get_config_directive(text):
    """
    This function generates from the signature
    the different required formats of a configuration directive.
    The signature (text) must be given (depending on the type) as:

    <dir|sd|fd|console>/<resourcetype_lower_case>/<DirectiveInCamelCase> = <value>

    Examples for the different types:

    Daemon:
    dir

    Resource Type:
    dir/job

    Resource Name:
    dir/job = backup-client1

    (Reference to a) Resource Directive:
    dir/job/TlsAlwaysIncrementalMaxFullAcl

    Resource Directive With Value:
    dir/job/TlsAlwaysIncrementalMaxFullAcl = False
    """

    logger = logging.getLogger(__name__)

    templates = {
        1: {"shortid": u"{Daemon}", "display": u"{Daemon}"},
        2: {
            "shortid": u"{Resource}",
            # Resource-Type
            "display": u"{Resource} ({Dmn})",
            # Resource-Name
            "displayWithValue": u"{value} ({Dmn}->{Resource})",
        },
        3: {
            "shortid": u"{Directive}",
            "display": u"{Directive} ({Dmn}->{Resource})",
            "displayWithValue": u"{Directive} ({Dmn}->{Resource}) = {value}",
            "indextemplate": u"Configuration Directive; {Directive} ({Dmn}->{Resource})",
            "internaltargettemplate": u"{dmn}/{resource}/{CamelCaseDirective}",
            # Latex: directiveDirJobCancel%20Lower%20Level%20Duplicates
            # The follow targettemplate will create identical anchors as Latex,
            # but as the base URL is likly it be different, it does not help (and still looks ugly).
            # targettemplate = u'directive{dmn}{resource}{directive}'
            "targettemplate": u"config-{Dmn}_{Resource}_{CamelCaseDirective}",
        },
        4: {
            "shortid": u"{Sub1}",
            "display": u"{Sub1} ({Dmn}->{Resource}->{Directive})",
            "displayWithValue": u"{Sub1} ({Dmn}->{Resource}->{Directive}) = {value}",
            "indextemplate": u"Configuration Directive; {Sub1} ({Dmn}->{Resource}->{Directive})",
            "internaltargettemplate": u"{dmn}/{resource}/{CamelCaseDirective}/{CamelCaseSub1}",
            "targettemplate": u"config-{Dmn}_{Resource}_{CamelCaseDirective}_{CamelCaseSub1}",
        },
        5: {
            "shortid": u"{Sub2}",
            "display": u"{Sub2} ({Dmn}->{Resource}->{Directive}->{Sub1})",
            "displayWithValue": u"{Sub2} ({Dmn}->{Resource}->{Directive}->{Sub1}) = {value}",
            "indextemplate": u"Configuration Directive; {Sub2} ({Dmn}->{Resource}->{Directive}->{Sub1})",
            "internaltargettemplate": u"{dmn}/{resource}/{CamelCaseDirective}/{CamelCaseSub1}/{CamelCaseSub2}",
            "targettemplate": u"config-{Dmn}_{Resource}_{CamelCaseDirective}_{CamelCaseSub1}_{CamelCaseSub2}",
        },
    }

    result = {"signature": text}

    try:
        key, value = text.split("=", 1)
        result["value"] = value.strip()
    except ValueError:
        # fall back
        key = text

    inputComponent = key.strip().split("/", 4)
    components = len(inputComponent)

    if components >= 1:
        daemon = inputComponent[0].lower()
        if daemon == "director" or daemon == "dir":
            result["Daemon"] = "Director"
            result["dmn"] = "dir"
            result["Dmn"] = "Dir"
        elif daemon == "storage daemon" or daemon == "storage" or daemon == "sd":
            result["Daemon"] = "Storage Daemon"
            result["dmn"] = "sd"
            result["Dmn"] = "Sd"
        elif daemon == "file daemon" or daemon == "file" or daemon == "fd":
            result["Daemon"] = "File Daemon"
            result["dmn"] = "fd"
            result["Dmn"] = "Fd"
        elif daemon == "bconsole" or daemon == "console":
            result["Daemon"] = "Console"
            result["dmn"] = "console"
            result["Dmn"] = "Console"
        else:
            # TODO: raise
            result["Daemon"] = "UNKNOWN"
            result["dmn"] = "UNKNOWN"
            result["Dmn"] = "UNKNOWN"

    if components >= 2:
        result["resource"] = inputComponent[1].replace(" ", "").lower()
        result["Resource"] = inputComponent[1].replace(" ", "").capitalize()

    if components >= 3:
        # input_directive should be without spaces.
        # However, we make sure, by removing all spaces.
        result["CamelCaseDirective"] = uppercaseFirstLetter(
            inputComponent[2].replace(" ", "")
        )
        result["Directive"] = convertCamelCase2Spaces(result["CamelCaseDirective"])

        if components >= 4:
            # e.g. fileset include/exclude directive
            # dir/fileset/include/File
            result["CamelCaseSub1"] = uppercaseFirstLetter(
                inputComponent[3].replace(" ", "")
            )
            result["Sub1"] = convertCamelCase2Spaces(result["CamelCaseSub1"])

        if components >= 5:
            # e.g. fileset include options
            # dir/fileset/include/options/basejob
            result["CamelCaseSub2"] = uppercaseFirstLetter(
                inputComponent[4].replace(" ", "")
            )
            result["Sub2"] = convertCamelCase2Spaces(result["CamelCaseSub2"])

        result["indexentry"] = templates[components]["indextemplate"].format(**result)
        result["target"] = templates[components]["targettemplate"].format(**result)
        result["internaltarget"] = templates[components][
            "internaltargettemplate"
        ].format(**result)

    result["shortid"] = templates[components]["shortid"].format(**result)
    if "value" in result:
        result["displayname"] = templates[components]["displayWithValue"].format(
            **result
        )
    else:
        result["displayname"] = templates[components]["display"].format(**result)

    # logger.debug('[bareos] ' + pformat(result))

    return result
Example #32
0
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""

from os import path
from sys import version_info as python_version

from sphinx import version_info as sphinx_version
from sphinx.locale import _
from sphinx.util.logging import getLogger


__version__ = '1.0.0'
__version_full__ = __version__

logger = getLogger(__name__)


def get_html_theme_path():
    """Return list of HTML theme paths."""
    cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
    return cur_dir


def config_initiated(app, config):
    theme_options = config.html_theme_options or {}
    if theme_options.get('canonical_url'):
        logger.warning(
            _('The canonical_url option is deprecated, use the html_baseurl option from Sphinx instead.')
        )
Example #33
0
def get_logger(name):
    return logging.getLogger(name)
Example #34
0
import os
import sys
import json
from json import JSONDecodeError
from sphinx.errors import ExtensionError
import jinja2
from docutils.parsers import rst
from pathlib import Path
from bs4 import BeautifulSoup as bs
from sphinx.util import logging
from pydata_sphinx_theme import index_toctree
from .directives.code import DoxygenSnippet

SPHINX_LOGGER = logging.getLogger(__name__)


def setup_edit_url(app, pagename, templatename, context, doctree):
    """Add a function that jinja can access for returning the edit URL of a page."""
    def has_github_page():
        doxygen_mapping_file = app.config.html_context.get(
            'doxygen_mapping_file')
        name = pagename.rsplit('-')[0]
        if name in doxygen_mapping_file:
            return True
        return False

    def get_edit_url():
        """Return a URL for an "edit this page" link."""
        doc_context = dict()
        doc_context.update(**context)
    def test_full_documentation_module_template(self):
        """
        This test might fail in sphinx-gallery due to a very long filename.
        Please look into the following commit:
        https://github.com/sdpython/sphinx-gallery/commit/3ae9f13250cf25c75e1b17b2fade98b7a9940b0d.
        """
        fLOG(__file__,
             self._testMethodName,
             OutputPrint=__name__ == "__main__")

        if is_travis_or_appveyor() in ('travis', 'appveyor'):
            # travis fails due to the following:
            #       sitep = [_ for _ in site.getsitepackages() if "packages" in _]
            # AttributeError: 'module' object has no attribute
            # 'getsitepackages'
            # It fails for python 2.7 (encoding issue).
            return

        temp = get_temp_folder(__file__,
                               "temp_full_documentation_module_template",
                               clean=__name__ != "__main__")

        clog = CustomLog(temp)
        this_pyq = os.path.normpath(
            os.path.abspath(
                os.path.join(os.path.dirname(pyquickhelper.__file__), "..")))

        class MyStream:
            def __init__(self):
                self.rows = []

            def write(self, text):
                clog("[warning*] {0} - '{1}'".format(len(self),
                                                     text.strip("\n\r ")))
                self.rows.append(text)

            def getvalue(self):
                return "\n".join(self.rows)

            def __len__(self):
                return len(self.rows)

        rem = os.path.join(temp, "python3_module_template-master", "_doc",
                           "sphinxdoc", "build")
        if os.path.exists(rem):
            remove_folder(rem)
        url = "https://github.com/sdpython/python3_module_template/archive/master.zip"
        fLOG("[ut] download", url)
        download(url, temp, fLOG=fLOG, flatten=False)
        self.assertTrue(not os.path.exists(os.path.join(temp, "src")))
        root = os.path.join(temp, "python3_module_template-master")

        with sys_path_append(os.path.join(root, "src")):
            # Checks that the unit test might fails.
            coucou = os.path.join(
                temp, "python3_module_template-master", "_doc", "sphinxdoc",
                "source", "gallery",
                "python3_module_template.subproject2.exclude_from_code_coverage.NotImplementedClass.__init__.examples"
            )
            if not os.path.exists(coucou):
                fLOG("[ut] creating file '{0}'".format(coucou))
                clog("[ut] creating file '{0}'".format(coucou))
                dirname = os.path.dirname(coucou)
                os.makedirs(dirname)
                try:
                    # replicating what sphinx_gallery does
                    open(coucou, "w").close()
                except Exception as e:
                    warnings.warn("Unable to create '{0}' due to '{1}'".format(
                        coucou, e))
            else:
                fLOG("[ut] file exists '{0}'".format(coucou))
                clog("[ut] file exists '{0}'".format(coucou))

            # documentation
            fLOG("generate documentation", root)
            var = "python3_module_template"

            # we modify conf.py to let it find pyquickhelper
            pyq = os.path.abspath(os.path.dirname(pyquickhelper.__file__))
            confpy = os.path.join(root, "_doc", "sphinxdoc", "source",
                                  "conf.py")
            if not os.path.exists(confpy):
                raise FileNotFoundError("Unable to find '{0}' and\n{1}".format(
                    confpy, os.listdir(temp)))
            with open(confpy, "r", encoding="utf8") as f:
                lines = f.read().split("\n")
            fi = len(lines) - 1
            for i, line in enumerate(lines):
                if line.startswith("sys."):
                    fi = i
                    break
            addition = "sys.path.append(r'{0}')".format(pyq)
            lines[fi] = "{0}\n{1}".format(addition, lines[fi])
            with open(confpy, "w", encoding="utf8") as f:
                f.write("\n".join(lines))

            # test
            for i in range(0, 3):
                fLOG("\n")
                fLOG("\n")
                fLOG("\n")
                fLOG("#################################################", i)
                fLOG("#################################################", i)
                fLOG("#################################################", i)

                # we add access to pyquickhelper
                p = os.path.abspath(os.path.dirname(pyquickhelper.__file__))
                p = os.path.join(p, 'src')
                fLOG("PYTHONPATH=", p)
                os.environ["PYTHONPATH"] = p
                if p not in sys.path:
                    pos = len(sys.path)
                    sys.path.append(p)
                else:
                    pos = -1

                if "conf" in sys.modules:
                    del sys.modules["conf"]

                fLOG(
                    "[test_full_documentation] **********************************"
                )
                fLOG("[test_full_documentation] begin",
                     list(roles._roles.keys()))
                fLOG(
                    "[test_full_documentation] **********************************"
                )

                direct_call = i % 2 == 0
                layout = ["pdf", "html"]

                logger1 = getLogger("docassert")
                logger2 = getLogger("tocdelay")
                logger3 = getLogger("downloadlink")
                log_capture_string = MyStream()  # StringIO()
                ch = logging.StreamHandler(log_capture_string)
                ch.setLevel(logging.DEBUG)
                logger1.logger.addHandler(ch)
                logger2.logger.addHandler(ch)
                logger3.logger.addHandler(ch)

                with warnings.catch_warnings(record=True) as ww:
                    warnings.simplefilter("always")
                    generate_help_sphinx(var,
                                         module_name=var,
                                         root=root,
                                         layout=layout,
                                         extra_ext=["tohelp"],
                                         from_repo=False,
                                         direct_call=direct_call,
                                         parallel=1,
                                         fLOG=clog,
                                         extra_paths=[this_pyq])
                    for w in ww:
                        if isinstance(w, dict):
                            rows = ["----"] + [
                                "{0}={1}".format(k, v)
                                for k, v in sorted(w.items())
                            ]
                            sw = "\n".join(rows)
                        elif isinstance(w, warnings.WarningMessage):
                            rows = [
                                "-----",
                                str(type(w)), w.filename,
                                str(w.lineno),
                                str(w.message)
                            ]
                            sw = "\n".join(rows)
                        else:
                            sw = str(w)
                        if "WARNING:" in sw and "ERROR/" in sw:
                            raise Exception(
                                "A warning is not expected:\n{0}".format(sw))

                fLOG(
                    "[test_full_documentation] **********************************"
                )
                fLOG("[test_full_documentation] END")
                fLOG(
                    "[test_full_documentation] **********************************"
                )

                lines = log_capture_string.getvalue().split("\n")
                for line in lines:
                    if not line.strip():
                        continue
                    if "[docassert]" in line:
                        raise Exception(line)
                    if "[tocdelay]" in line:
                        fLOG("   ", line)
                    if '[tocdelay] ERROR' in line:
                        raise Exception(line)
                    if '[downloadlink]' in line:
                        fLOG(line)

                # we clean
                if "pyquickhelper" in sys.modules:
                    del sys.modules["pyquickhelper"]
                os.environ["PYTHONPATH"] = ""
                if pos >= 0:
                    del sys.path[pos]

                # blog index
                blog = os.path.join(root, "_doc", "sphinxdoc", "build", "html",
                                    "blog", "blogindex.html")
                with open(blog, "r", encoding="utf-8") as f:
                    content = f.read()
                self.assertIn("2015", content)
                self.assertIn('"2016/2016-06-11_blogpost_with_label.html"',
                              content)
                spl = content.split("2016-06")
                if len(spl) <= 2:
                    raise Exception("Two expected:\n" + content)

                # checkings
                files = [
                    os.path.join(root, "_doc", "sphinxdoc", "build", "html",
                                 "index.html"),
                    os.path.join(root, "_doc", "sphinxdoc", "build", "html",
                                 "all_indexes.html"),
                    os.path.join(root, "_doc", "sphinxdoc", "build", "html",
                                 "all_notebooks.html"),
                ]
                for f in files:
                    if not os.path.exists(f):
                        raise FileNotFoundError(
                            "Not found '{0}'\n---\n{1}".format(
                                f, "\n".join(lines)))

                self.assertTrue(not os.path.exists(os.path.join(temp, "_doc")))

                rss = os.path.join(root, "_doc", "sphinxdoc", "source", "blog",
                                   "rss.xml")
                with open(rss, "r", encoding="utf8") as f:
                    content_rss = f.read()

                self.assertTrue("__BLOG_ROOT__" not in content_rss)
                # this should be replaced when uploading the stream onto the website
                # the website is unknown when producing the documentation
                # it should be resolved when uploading (the documentation could be
                # uploaded at different places)

                # checks some links were processed
                fhtml = os.path.join(temp, "python3_module_template-master",
                                     "_doc", "sphinxdoc", "build", "html",
                                     "index.html")
                with open(fhtml, "r", encoding="utf8") as f:
                    content = f.read()
                if '<td><p><a class="reference internal" href="index_ext-tohelp.html#ext-tohelp"><span class="std std-ref">ext-tohelp</span></a></p></td>' not in content:
                    raise Exception(content)

                # checks some links were processed
                fhtml = os.path.join(temp, "python3_module_template-master",
                                     "_doc", "sphinxdoc", "build", "html",
                                     "all_notebooks.html")
                with open(fhtml, "r", encoding="utf8") as f:
                    content = f.read()
                if '<img alt="_images/custom_notebooks.thumb.png" src="_images/custom_notebooks.thumb.png" />' not in content:
                    raise Exception(content)

                # checks slideshow was added
                fhtml = os.path.join(temp, "python3_module_template-master",
                                     "build", "notebooks", "bslides",
                                     "custom_notebooks.ipynb")
                with open(fhtml, "r", encoding="utf8") as f:
                    content = f.read()
                self.assertTrue('"slide"' in content)

                # reveal.js + images
                rev = [
                    os.path.join(root, "_doc", "sphinxdoc", "source",
                                 "phdoc_static", "reveal.js"),
                    os.path.join(root, "_doc", "sphinxdoc", "build", "html",
                                 "_downloads", "reveal.js"),
                    os.path.join(root, "_doc", "sphinxdoc", "build", "html",
                                 "notebooks", "reveal.js"),
                    os.path.join(root, "_doc", "sphinxdoc", "build", "html",
                                 "notebooks", "Python_logo_and_wordmark.png"),
                    os.path.join(root, "_doc", "sphinxdoc", "build", "html",
                                 "notebooks", "custom_notebooks.slides.html"),
                    os.path.join(root, "_doc", "sphinxdoc", "build", "html",
                                 "notebooks", "custom_notebooks.html"),
                    os.path.join(root, "_doc", "sphinxdoc", "build", "html",
                                 "_downloads", "rss.xml")
                ]
                for r in rev:
                    if not os.path.exists(r):
                        logs = os.path.join(temp, "log_custom_000.txt")
                        with open(logs, "r", encoding='utf-8') as f:
                            content = f.read()
                        found = os.listdir(os.path.dirname(r))
                        found2 = os.listdir(
                            os.path.dirname(
                                r.replace("build",
                                          "source").replace("/html", "")))
                        raise FileNotFoundError(
                            "Unable to find '{0}' in\n{1}\n----\n{2}\n---LOGS--\n{3}"
                            .format(r, "\n".join(sorted(found)),
                                    "\n".join(sorted(found2)), content))

                history = os.path.join(root, "_doc", "sphinxdoc", "build",
                                       "html", "HISTORY.html")
                if not os.path.exists(history):
                    raise FileNotFoundError(history)
                with open(history, "r", encoding="utf-8") as f:
                    content = f.read()

                tofind = 'python3_module_template'
                if tofind not in content:
                    raise Exception("Unable to find '{0}' in\n{1}".format(
                        tofind, content))

            # final check
            logs = os.path.join(temp, "log_custom_000.txt")
            with open(logs, "r", encoding='utf-8') as f:
                content = f.read()
            if "[downloadlink] node" not in content:
                raise Exception(content)
            if "[downloadlink] HTML" not in content:
                raise Exception(content)
            if "[downloadlink] copy" not in content:
                raise Exception(content)
Example #36
0
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.deprecation import RemovedInSphinx40Warning, RemovedInSphinx50Warning
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType, Index, IndexEntry
from sphinx.environment import BuildEnvironment
from sphinx.locale import _, __
from sphinx.roles import XRefRole
from sphinx.util import logging
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.util.docutils import SphinxDirective
from sphinx.util.inspect import signature_from_str
from sphinx.util.nodes import make_id, make_refnode
from sphinx.util.typing import TextlikeNode

logger = logging.getLogger(__name__)

# REs for Python signatures
py_sig_re = re.compile(
    r'''^ ([\w.]*\.)?            # class name(s)
          (\w+)  \s*             # thing name
          (?: \(\s*(.*)\s*\)     # optional: arguments
           (?:\s* -> \s* (.*))?  #           return annotation
          )? $                   # and nothing more
          ''', re.VERBOSE)

pairindextypes = {
    'module': _('module'),
    'keyword': _('keyword'),
    'operator': _('operator'),
    'object': _('object'),
class ExecuteNotebookWriter():
    """
    Executes jupyter notebook written in python or julia
    """
    logger = logging.getLogger(__name__)
    startFlag = 0

    def __init__(self, builderSelf):
        pass

    def execute_notebook(self, builderSelf, nb, filename, params, futures):
        execute_nb_config = builderSelf.config["jupyter_execute_nb"]
        coverage = builderSelf.config["jupyter_make_coverage"]
        timeout = execute_nb_config["timeout"]
        filename = filename
        subdirectory = ''
        full_path = filename
        # check if there are subdirectories
        index = filename.rfind('/')
        if index > 0:
            subdirectory = filename[0:index]
            filename = filename[index + 1:]

        language = nb.metadata.kernelspec.language
        if (language.lower().find('python') != -1):
            language = 'python'
        elif (language.lower().find('julia') != -1):
            language = 'julia'

        ## adding latex metadata
        if builderSelf.config["jupyter_target_pdf"]:
            nb = self.add_latex_metadata(builderSelf, nb, subdirectory,
                                         filename)

        # - Parse Directories and execute them - #
        if coverage:
            self.execution_cases(builderSelf, params['destination'], False,
                                 subdirectory, language, futures, nb, filename,
                                 full_path)
        else:
            self.execution_cases(builderSelf, params['destination'], True,
                                 subdirectory, language, futures, nb, filename,
                                 full_path)

    def add_latex_metadata(self, builder, nb, subdirectory, filename=""):

        ## initialize latex metadata
        if 'latex_metadata' not in nb['metadata']:
            nb['metadata']['latex_metadata'] = {}

        ## check for relative paths
        path = ''
        if subdirectory != '':
            path = "../"
            slashes = subdirectory.count('/')
            for i in range(slashes):
                path += "../"

        ## add check for logo here as well
        if nb.metadata.title:
            nb.metadata.latex_metadata.title = nb.metadata.title
        if "jupyter_pdf_logo" in builder.config and builder.config[
                'jupyter_pdf_logo']:
            nb.metadata.latex_metadata.logo = path + builder.config[
                'jupyter_pdf_logo']

        if builder.config["jupyter_bib_file"]:
            nb.metadata.latex_metadata.bib = path + builder.config[
                "jupyter_bib_file"]

        if builder.config["jupyter_pdf_author"]:
            nb.metadata.latex_metadata.author = builder.config[
                "jupyter_pdf_author"]

        if builder.config["jupyter_pdf_book_index"] is not None and (
                filename
                and builder.config["jupyter_pdf_book_index"] in filename):
            nb.metadata.latex_metadata.jupyter_pdf_book_title = builder.config[
                "jupyter_pdf_book_title"]

        # nb_string = json.dumps(nb_obj, indent=2, sort_keys=True)
        return nb

    def execution_cases(self, builderSelf, directory, allow_errors,
                        subdirectory, language, futures, nb, filename,
                        full_path):
        ## function to handle the cases of execution for coverage reports or html conversion pipeline
        if subdirectory != '':
            builderSelf.executed_notebook_dir = directory + "/" + subdirectory
        else:
            builderSelf.executed_notebook_dir = directory

        ## ensure that executed notebook directory
        ensuredir(builderSelf.executed_notebook_dir)
        ## specifying kernels
        if language == 'python':
            if (sys.version_info > (3, 0)):
                # Python 3 code in this block
                ep = ExecutePreprocessor(timeout=-1,
                                         allow_errors=allow_errors,
                                         kernel_name='python3')
            else:
                # Python 2 code in this block
                ep = ExecutePreprocessor(timeout=-1,
                                         allow_errors=allow_errors,
                                         kernel_name='python2')
        elif language == 'julia':
            ep = ExecutePreprocessor(timeout=-1, allow_errors=allow_errors)

        ### calling this function before starting work to ensure it starts recording
        if (self.startFlag == 0):
            self.startFlag = 1
            builderSelf.client.get_task_stream()

        future = builderSelf.client.submit(
            ep.preprocess, nb, {
                "metadata": {
                    "path": builderSelf.executed_notebook_dir,
                    "filename": filename,
                    "filename_with_path": full_path
                }
            })

        ### dictionary to store info for errors in future
        future_dict = {
            "filename": full_path,
            "filename_with_path": full_path,
            "language_info": nb['metadata']['kernelspec']
        }
        builderSelf.futuresInfo[future.key] = future_dict

        futures.append(future)

    def task_execution_time(self, builderSelf):
        ## calculates execution time of each task in client using get task stream
        task_Info_latest = builderSelf.client.get_task_stream()[-1]
        time_tuple = task_Info_latest['startstops'][0]

        if version.parse(dask.__version__) < version.parse("2.10.0"):
            computing_time = time_tuple[2] - time_tuple[1]
        else:
            computing_time = time_tuple['stop'] - time_tuple['start']
        return computing_time

    def check_execution_completion(self, builderSelf, future, nb,
                                   error_results, count, total_count,
                                   futures_name, params):
        error_result = []
        builderSelf.dask_log['futures'].append(str(future))
        status = 'pass'

        # computing time for each task
        computing_time = self.task_execution_time(builderSelf)

        # store the exceptions in an error result array
        if future.status == 'error':
            status = 'fail'
            try:
                builderSelf.execution_status_code = 1
            except:
                self.logger.warning(
                    "No execution status code defined in builder")

            for key, val in builderSelf.futuresInfo.items():
                if key == future.key:
                    filename_with_path = val['filename_with_path']
                    filename = val['filename']
                    language_info = val['language_info']
            error_result.append(future.exception())

        else:
            passed_metadata = nb[1]['metadata']
            filename = passed_metadata['filename']
            filename_with_path = passed_metadata['filename_with_path']
            executed_nb = nb[0]
            language_info = executed_nb['metadata']['kernelspec']
            executed_nb['metadata']['filename_with_path'] = filename_with_path
            executed_nb['metadata']['download_nb'] = builderSelf.config[
                'jupyter_download_nb']
            if (builderSelf.config['jupyter_download_nb']):
                executed_nb['metadata'][
                    'download_nb_path'] = builderSelf.config[
                        'jupyter_download_nb_urlpath']
            if (futures_name.startswith('delayed') != -1):
                # adding in executed notebooks list
                params['executed_notebooks'].append(filename)
                key_to_delete = False
                for nb, arr in params['dependency_lists'].items():
                    executed = 0
                    for elem in arr:
                        if elem in params['executed_notebooks']:
                            executed += 1
                    if (executed == len(arr)):
                        key_to_delete = nb
                        notebook = params['delayed_notebooks'].get(nb)
                        builderSelf._execute_notebook_class.execute_notebook(
                            builderSelf, notebook, nb, params,
                            params['delayed_futures'])
                if (key_to_delete):
                    del params['dependency_lists'][str(key_to_delete)]
                    key_to_delete = False
            notebook_name = "{}.ipynb".format(filename)
            executed_notebook_path = os.path.join(passed_metadata['path'],
                                                  notebook_name)

            #Parse Executed notebook to remove hide-output blocks
            for cell in executed_nb['cells']:
                if cell['cell_type'] == "code":
                    if cell['metadata']['hide-output']:
                        cell['outputs'] = []
            #Write Executed Notebook as File
            with open(executed_notebook_path, "wt", encoding="UTF-8") as f:
                nbformat.write(executed_nb, f)

            ## generate html if needed
            if (builderSelf.config['jupyter_generate_html']
                    and params['target'] == 'website'):
                builderSelf._convert_class.convert(executed_nb, filename,
                                                   language_info,
                                                   params['destination'],
                                                   passed_metadata['path'])

            ## generate pdfs if set to true
            if (builderSelf.config['jupyter_target_pdf']):
                builderSelf._pdf_class.convert_to_latex(
                    builderSelf, filename_with_path,
                    executed_nb['metadata']['latex_metadata'])
                builderSelf._pdf_class.move_pdf(builderSelf)

        print('({}/{})  {} -- {} -- {:.2f}s'.format(count, total_count,
                                                    filename, status,
                                                    computing_time))

        # storing error info if any execution throws an error
        results = dict()
        results['runtime'] = computing_time
        results['filename'] = filename_with_path
        results['errors'] = error_result
        results['language'] = language_info
        error_results.append(results)
        return filename

    def save_executed_notebook(self, builderSelf, params):
        error_results = []

        builderSelf.dask_log[
            'scheduler_info'] = builderSelf.client.scheduler_info()
        builderSelf.dask_log['futures'] = []

        ## create an instance of the class id config set
        if (builderSelf.config['jupyter_generate_html']
                and params['target'] == 'website'):
            builderSelf._convert_class = convertToHtmlWriter(builderSelf)

        # this for loop gathers results in the background
        total_count = len(params['futures'])
        count = 0
        update_count_delayed = 1
        for future, nb in as_completed(params['futures'],
                                       with_results=True,
                                       raise_errors=False):
            count += 1
            builderSelf._execute_notebook_class.check_execution_completion(
                builderSelf, future, nb, error_results, count, total_count,
                'futures', params)

        for future, nb in as_completed(params['delayed_futures'],
                                       with_results=True,
                                       raise_errors=False):
            count += 1
            if update_count_delayed == 1:
                update_count_delayed = 0
                total_count += len(params['delayed_futures'])
            builderSelf._execute_notebook_class.check_execution_completion(
                builderSelf, future, nb, error_results, count, total_count,
                'delayed_futures', params)

        return error_results

    def produce_code_execution_report(self,
                                      builderSelf,
                                      error_results,
                                      params,
                                      fln="code-execution-results.json"):
        """
        Updates the JSON file that contains the results of the execution of each notebook.
        """
        ensuredir(builderSelf.reportdir)
        json_filename = builderSelf.reportdir + fln

        if os.path.isfile(json_filename):
            with open(json_filename, encoding="UTF-8") as json_file:
                json_data = json.load(json_file)
                temp_dictionary = dict()
                for item in json_data['results']:
                    name = item['filename']
                    language = item['language']
                    if name not in temp_dictionary:
                        temp_dictionary[name] = dict()
                    temp_dictionary[name][language] = item
                json_data['results'] = []

        else:
            temp_dictionary = dict()
            json_data = dict()
            json_data['results'] = []

        # Generate the data for the JSON file.
        for notebook_errors in error_results:
            runtime = int(notebook_errors['runtime'] * 10)
            name = notebook_errors['filename']
            language = notebook_errors['language']['name']
            seconds = (runtime % 600) / 10
            minutes = int(runtime / 600)

            extension = ''
            if (language.lower().find('python') != -1):
                extension = 'py'
            elif (language.lower().find('julia') != -1):
                extension = 'jl'

            nicer_runtime = str(minutes) + ":" + (
                "0" + str(seconds) if seconds < 10 else str(seconds))
            new_dictionary = {
                'filename': name,
                'runtime': nicer_runtime,
                'num_errors': len(notebook_errors['errors']),
                'extension': extension,
                'language': language
            }

            if name not in temp_dictionary:
                temp_dictionary[name] = dict()
            temp_dictionary[name][language] = new_dictionary

        temp_list = []
        for key in temp_dictionary:
            for second_key in temp_dictionary[key]:
                temp_list.append(temp_dictionary[key][second_key])

        for item in sorted(temp_list, key=lambda k: k['filename']):
            json_data['results'].append(item)
        json_data['run_time'] = time.strftime("%d-%m-%Y %H:%M:%S")

        try:
            if (sys.version_info > (3, 0)):
                with open(json_filename, "w") as json_file:
                    json.dump(json_data, json_file)
            else:
                with open(json_filename, "w") as json_file:
                    x = json.dumps(json_data, ensure_ascii=False)
                    if isinstance(x, str):
                        x = unicode(x, 'UTF-8')
                    json_file.write(x)
        except IOError:
            self.logger.warning(
                "Unable to save lecture status JSON file. Does the {} directory exist?"
                .format(builderSelf.reportdir))

    def produce_dask_processing_report(self,
                                       builderSelf,
                                       params,
                                       fln="dask-reports.json"):
        """
            produces a report of dask execution
        """
        ensuredir(builderSelf.reportdir)
        json_filename = builderSelf.reportdir + fln

        try:
            if (sys.version_info > (3, 0)):
                with open(json_filename, "w") as json_file:
                    json.dump(builderSelf.dask_log, json_file)
            else:
                with open(json_filename, "w") as json_file:
                    x = json.dumps(builderSelf.dask_log, ensure_ascii=False)
                    if isinstance(x, str):
                        x = unicode(x, 'UTF-8')
                    json_file.write(x)
        except IOError:
            self.logger.warning(
                "Unable to save dask reports JSON file. Does the {} directory exist?"
                .format(builderSelf.reportdir))

    def create_coverage_report(self, builderSelf, error_results, params):
        """
        Creates a coverage report of the errors in notebook
        """
        errors = []
        error_results = []
        errors_by_language = dict()
        produce_text_reports = builderSelf.config["jupyter_execute_nb"][
            "text_reports"]

        #Parse Error Set
        for full_error_set in error_results:
            error_result = full_error_set['errors']
            filename = full_error_set['filename']
            current_language = full_error_set['language']
            language_name = current_language['extension']
            if error_result:
                if language_name not in errors_by_language:
                    errors_by_language[language_name] = dict()
                    errors_by_language[language_name][
                        'display_name'] = current_language['display_name']
                    errors_by_language[language_name][
                        'language'] = current_language['language']
                    errors_by_language[language_name]['files'] = dict()

                errors += error_result
                error_files.append(filename)

                errors_by_language[language_name]['files'][
                    filename] = error_result

        # Create the error report from the HTML template, if it exists.
        templateFolder = builderSelf.config['jupyter_template_path']
        error_report_template_file = templateFolder + "/" + builderSelf.config[
            "jupyter_template_coverage_file_path"]

        error_report_template = []
        if not os.path.isfile(error_report_template_file):
            print("Unable to generate error report - template not found.")
        else:
            with open(error_report_template_file,
                      encoding="UTF-8") as inputFile:
                error_report_template = inputFile.readlines()

        lang_summary = ""
        notebook_looper = ""
        for lang_ext in errors_by_language:
            language_display_name = errors_by_language[lang_ext][
                'display_name']
            language = errors_by_language[lang_ext]['language']
            errors_by_file = errors_by_language[lang_ext]['files']
            error_dir = builderSelf.errordir.format(lang_ext)

            if produce_text_reports:
                # set specific language output file
                lang_error_dir = "{}/{}_errors".format(error_dir, lang_ext)
                # purge language results directory and recreate
                shutil.rmtree(path=lang_error_dir, ignore_errors=True)
                os.makedirs(lang_error_dir)

            if errors_by_file:
                # errors dictionary
                error_count_dict = dict()
                error_files_dict = dict()

                # create the HTML for the notebook filenames
                notebook_list_HTML = ""

                # write to results file
                # overview output file
                if produce_text_reports:
                    results_file = open(
                        "{}/{}_overview.txt".format(error_dir, lang_ext), 'w')
                    results_file.write(
                        language_display_name +
                        " execution errors occurred in the notebooks below:\n")

                self.logger.error(
                    language_display_name +
                    " execution errors occurred in the notebooks below")

                error_number = 1
                for filename in errors_by_file:
                    self.logger.error(filename)

                    number_of_errors = str(len(errors_by_file[filename]))
                    if produce_text_reports:
                        results_file.write("\t{} - {} errors.\n".format(
                            filename, number_of_errors))

                    notebook_list_HTML += "<li><a href=\"#{}\">{}</a></li>".format(
                        lang_ext + "_" + filename, filename)
                    notebook_looper += "<h3 id=\"{}\">{} - {} {} errors</h3>\n".format(
                        lang_ext + "_" + filename, filename, number_of_errors,
                        language_display_name)

                    if produce_text_reports:
                        error_file = open(
                            "{}/{}_errors.txt".format(lang_error_dir,
                                                      filename), "w")

                    for error in errors_by_file[filename]:
                        # Some errors don't provide a traceback. Make sure that some useful information is provided
                        # to the report - if nothing else, the type of error that was caught.
                        traceback = getattr(error, "traceback", None)
                        if traceback is None:
                            error.traceback = str(error)

                        last_line = error.traceback.splitlines()[-1]
                        if last_line not in error_files_dict:
                            error_files_dict[last_line] = []
                        error_files_dict[last_line].append(error_number)

                        if last_line not in error_count_dict:
                            error_count_dict[last_line] = 0
                        error_count_dict[last_line] += 1

                        notebook_looper += "<pre><code class=\""\
                                        + language\
                                        + "\">{}</code></pre>\n".format(error.traceback)

                        error_number += 1

                        if produce_text_reports:
                            error_file.write(error.traceback + "\n")

                    if produce_text_reports:
                        error_file.close()

                if notebook_list_HTML != "":
                    lang_summary += "<p>" + language_display_name \
                                    + " execution errors occured in the following notebooks:</p><ul>"\
                                    + notebook_list_HTML + " </ul>"

                # write error count and errors to overview.txt
                if produce_text_reports:
                    results_file.write(
                        "\n----------------------\nError count details: [count] error\n\n"
                    )
                    for key, value in error_count_dict.items():
                        results_file.write("[{}] {}\n".format(value, key))
                        results_file.write('\n')

                    results_file.write(
                        "\nFor specifics, including the cell block, refer to [notebook name]_errors.txt\n"
                    )
                    results_file.close()

            else:
                # no errors. Update overview to show that
                if produce_text_reports:
                    results_file = open(
                        "{}/{}_overview.txt".format(error_dir, lang_ext), 'w')
                    results_file.write("No errors occurred!\n")
                    results_file.close()

            # create the dictionary of variables to inject into the HTML template
            variables = dict()
            variables['ERROR_SUMMARY'] = lang_summary
            variables['NOTEBOOK_LOOP'] = notebook_looper
            variables['DATETIME'] = time.strftime("%c")

            # Save the error report.
            filename = "errors-" + time.strftime("%d%m%Y") + ".html"
            full_error_report_filename = os.path.normpath(error_dir + "/" +
                                                          filename)
            with open(full_error_report_filename, "w",
                      encoding="UTF-8") as error_output_file:
                for line in error_report_template:
                    for keyName in variables:
                        target_string = "{" + keyName + "}"
                        line = line.replace(target_string, variables[keyName])

                    error_output_file.write(line)
def check_typed_make_field(
        self,
        types,  # type: Dict[unicode, List[nodes.Node]]
        domain,  # type: unicode
        items,  # type: Tuple
        env=None,  # type: BuildEnvironment
        # type inspect.Parameters (to check that all parameters are documented)
    parameters=None,
        function_name=None,  # str
        docname=None,  # str
        kind=None  # str
):
    """
    Overwrites function
    `make_field <https://github.com/sphinx-doc/sphinx/blob/master/sphinx/util/docfields.py#L197>`_.
    Processes one argument of a function.

    @param      self            from original function
    @param      types           from original function
    @param      domain          from original function
    @param      items           from original function
    @param      env             from original function
    @param      parameters      list of known arguments for the function or method
    @param      function_name   function name these arguments belong to
    @param      docname         document which contains the object
    @param      kind            tells which kind of object *function_name* is (function, method or class)

    Example of warnings it raises:

    ::

        [docassert] 'onefunction' has no parameter 'a' (in '...project_name\\subproject\\myexampleb.py').
        [docassert] 'onefunction' has undocumented parameters 'a, b' (...project_name\\subproject\\myexampleb.py').

    """
    if parameters is None:
        parameters = None
        check_params = {}
    else:
        parameters = list(parameters)
        if kind == "method":
            parameters = parameters[1:]

        def kg(p):
            "local function"
            return p if isinstance(p, str) else p.name

        check_params = {kg(p): 0 for p in parameters}
    logger = logging.getLogger("docassert")

    def check_item(fieldarg, content, logger):
        "local function"
        if fieldarg not in check_params:
            if function_name is not None:
                logger.warning(
                    "[docassert] '{0}' has no parameter '{1}' (in '{2}').".
                    format(function_name, fieldarg, docname))
        else:
            check_params[fieldarg] += 1
            if check_params[fieldarg] > 1:
                logger.warning(
                    "[docassert] '{1}' of '{0}' is duplicated (in '{2}').".
                    format(function_name, fieldarg, docname))

    if isinstance(items, list):
        for fieldarg, content in items:
            check_item(fieldarg, content, logger)
        mini = None if len(check_params) == 0 else min(check_params.values())
        if mini == 0:
            check_params = list(check_params.items())
            nodoc = list(sorted(k for k, v in check_params if v == 0))
            if len(nodoc) > 0:
                if len(nodoc) == 1 and nodoc[0] == 'self':
                    # Behavior should be improved.
                    pass
                else:
                    logger.warning(
                        "[docassert] '{0}' has undocumented parameters '{1}' (in '{2}')."
                        .format(function_name, ", ".join(nodoc), docname))
    else:
        # Documentation related to the return.
        pass
    contain all of the previously failed references.

"""

from collections import defaultdict
import json
import logging
from pathlib import Path

from docutils.utils import get_source_line
from docutils import nodes
from sphinx.util import logging as sphinx_logging

import matplotlib

logger = sphinx_logging.getLogger(__name__)


class MissingReferenceFilter(logging.Filter):
    """
    A logging filter designed to record missing reference warning messages
    for use by this extension
    """
    def __init__(self, app):
        self.app = app
        super().__init__()

    def _record_reference(self, record):
        if not (getattr(record, 'type', '') == 'ref'
                and isinstance(getattr(record, 'location', None), nodes.Node)):
            return
    def override_transform(self, other_self, node):
        """
        Transform a single field list *node*.
        Overwrite function `transform <https://github.com/sphinx-doc/sphinx/blob/master/sphinx/util/docfields.py#L271>`_.
        It only adds extra verification and returns results from the replaced function.

        @param      other_self      the builder
        @param      node            node the replaced function changes or replace

        The function parses the original function and checks that the list of arguments declared
        by the function is the same the list of documented arguments.
        """
        typemap = other_self.typemap
        entries = []
        groupindices = {}  # type: Dict[unicode, int]
        types = {}  # type: Dict[unicode, Dict]

        # step 1: traverse all fields and collect field types and content
        for field in node:
            fieldname, fieldbody = field
            try:
                # split into field type and argument
                fieldtype, fieldarg = fieldname.astext().split(None, 1)
            except ValueError:
                # maybe an argument-less field type?
                fieldtype, fieldarg = fieldname.astext(), ''
            if fieldtype != "param":
                continue
            typedesc, is_typefield = typemap.get(fieldtype, (None, None))

            # sort out unknown fields
            if typedesc is None or typedesc.has_arg != bool(fieldarg):
                # either the field name is unknown, or the argument doesn't
                # match the spec; capitalize field name and be done with it
                new_fieldname = fieldtype[0:1].upper() + fieldtype[1:]
                if fieldarg:
                    new_fieldname += ' ' + fieldarg
                fieldname[0] = nodes.Text(new_fieldname)
                entries.append(field)
                continue

            typename = typedesc.name

            # collect the content, trying not to keep unnecessary paragraphs
            if _is_single_paragraph(fieldbody):
                content = fieldbody.children[0].children
            else:
                content = fieldbody.children

            # if the field specifies a type, put it in the types collection
            if is_typefield:
                # filter out only inline nodes; others will result in invalid
                # markup being written out
                content = [
                    n for n in content
                    if isinstance(n, (nodes.Inline, nodes.Text))
                ]
                if content:
                    types.setdefault(typename, {})[fieldarg] = content
                continue

            # also support syntax like ``:param type name:``
            if typedesc.is_typed:
                try:
                    argtype, argname = fieldarg.split(None, 1)
                except ValueError:
                    pass
                else:
                    types.setdefault(typename,
                                     {})[argname] = [nodes.Text(argtype)]
                    fieldarg = argname

            translatable_content = nodes.inline(fieldbody.rawsource,
                                                translatable=True)
            translatable_content.document = fieldbody.parent.document
            translatable_content.source = fieldbody.parent.source
            translatable_content.line = fieldbody.parent.line
            translatable_content += content

            # Import object, get the list of parameters
            docs = fieldbody.parent.source.split(":docstring of")[-1].strip()

            myfunc = None
            funckind = None
            function_name = None
            excs = []
            try:
                myfunc, function_name, funckind = import_any_object(docs)
            except ImportError as e:
                excs.append(e)

            if myfunc is None:
                if len(excs) > 0:
                    reasons = "\n".join("   {0}".format(e) for e in excs)
                else:
                    reasons = "unknown"
                logger = logging.getLogger("docassert")
                logger.warning(
                    "[docassert] unable to import object '{0}', reasons:\n{1}".
                    format(docs, reasons))
                myfunc = None

            if myfunc is None:
                signature = None
                parameters = None
            else:
                try:
                    signature = inspect.signature(myfunc)
                    parameters = signature.parameters
                except TypeError:
                    logger = logging.getLogger("docassert")
                    logger.warning(
                        "[docassert] unable to get signature of '{0}'.".format(
                            docs))
                    signature = None
                    parameters = None

            # grouped entries need to be collected in one entry, while others
            # get one entry per field
            if typedesc.is_grouped:
                if typename in groupindices:
                    group = entries[groupindices[typename]]
                else:
                    groupindices[typename] = len(entries)
                    group = [typedesc, []]
                    entries.append(group)
                entry = typedesc.make_entry(fieldarg, [translatable_content])
                group[1].append(entry)
            else:
                entry = typedesc.make_entry(fieldarg, [translatable_content])
                entries.append([typedesc, entry])

        # step 2: all entries are collected, check the parameters list.
        try:
            env = other_self.directive.state.document.settings.env
        except AttributeError as e:
            logger = logging.getLogger("docassert")
            logger.warning("[docassert] {0}".format(e))
            env = None

        docname = fieldbody.parent.source.split(':docstring')[0]

        for entry in entries:
            if isinstance(entry, nodes.field):
                # raise NotImplementedError()
                logger = logging.getLogger("docassert")
                logger.warning(
                    "[docassert] unable to checl [nodes.field] {0}".format(
                        entry))
            else:
                fieldtype, content = entry
                fieldtypes = types.get(fieldtype.name, {})
                check_typed_make_field(other_self,
                                       fieldtypes,
                                       other_self.directive.domain,
                                       content,
                                       env=env,
                                       parameters=parameters,
                                       function_name=function_name,
                                       docname=docname,
                                       kind=funckind)

        return self.replaced(other_self, node)