示例#1
0
def test_status_iterator(app, status, warning):
    logging.setup(app, status, warning)

    # test for old_status_iterator
    status.truncate(0)
    yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... '))
    output = strip_escseq(status.getvalue())
    assert 'testing ... hello sphinx world \n' in output
    assert yields == ['hello', 'sphinx', 'world']

    # test for status_iterator (verbosity=0)
    status.truncate(0)
    yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... ',
                                  length=3, verbosity=0))
    output = strip_escseq(status.getvalue())
    assert 'testing ... [ 33%] hello                \r' in output
    assert 'testing ... [ 66%] sphinx               \r' in output
    assert 'testing ... [100%] world                \r\n' in output
    assert yields == ['hello', 'sphinx', 'world']

    # test for status_iterator (verbosity=1)
    status.truncate(0)
    yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... ',
                                  length=3, verbosity=1))
    output = strip_escseq(status.getvalue())
    assert 'testing ... [ 33%] hello\n' in output
    assert 'testing ... [ 66%] sphinx\n' in output
    assert 'testing ... [100%] world\n\n' in output
    assert yields == ['hello', 'sphinx', 'world']
示例#2
0
    def _write_parallel(self, docnames, nproc):
        # type: (Sequence[unicode], int) -> None
        def write_process(docs):
            # type: (List[Tuple[unicode, nodes.Node]]) -> None
            self.app.phase = BuildPhase.WRITING
            for docname, doctree in docs:
                self.write_doc(docname, doctree)

        # warm up caches/compile templates using the first document
        firstname, docnames = docnames[0], docnames[1:]
        self.app.phase = BuildPhase.RESOLVING
        doctree = self.env.get_and_resolve_doctree(firstname, self)
        self.app.phase = BuildPhase.WRITING
        self.write_doc_serialized(firstname, doctree)
        self.write_doc(firstname, doctree)

        tasks = ParallelTasks(nproc)
        chunks = make_chunks(docnames, nproc)

        self.app.phase = BuildPhase.RESOLVING
        for chunk in status_iterator(chunks, __('writing output... '), "darkgreen",
                                     len(chunks), self.app.verbosity):
            arg = []
            for i, docname in enumerate(chunk):
                doctree = self.env.get_and_resolve_doctree(docname, self)
                self.write_doc_serialized(docname, doctree)
                arg.append((docname, doctree))
            tasks.add_task(write_process, arg)

        # make sure all threads have finished
        logger.info(bold(__('waiting for workers...')))
        tasks.join()
示例#3
0
def install_collapse_static_files(app, env):
    STATICS_DIR_PATH = os.path.join(app.builder.outdir, IMAGE_DIR_NAME)
    dest_path = os.path.join(STATICS_DIR_PATH, 'sphinx-needs')

    source_folder = os.path.join(os.path.dirname(__file__), "libs/html")
    files_to_copy = [os.path.join(source_folder, "sphinx_needs_collapse.js")]

    if parse_version(sphinx_version) < parse_version("1.6"):
        global status_iterator
        status_iterator = app.status_iterator

    for source_file_path in status_iterator(
            files_to_copy,
            'Copying static files for sphinx-needs collapse support...', brown,
            len(files_to_copy)):

        if not os.path.isabs(source_file_path):
            raise IOError(
                "Path must be absolute. Got: {}".format(source_file_path))

        if not os.path.exists(source_file_path):
            raise IOError("File not found: {}".format(source_file_path))

        dest_file_path = os.path.join(
            dest_path, os.path.relpath(source_file_path, source_folder))

        if not os.path.exists(os.path.dirname(dest_file_path)):
            ensuredir(os.path.dirname(dest_file_path))

        copyfile(source_file_path, dest_file_path)

        safe_remove_file("sphinx-needs/libs/html/sphinx_needs_collapse.js",
                         app)
        safe_add_file("sphinx-needs/libs/html/sphinx_needs_collapse.js", app)
示例#4
0
文件: __init__.py 项目: rds0751/odfo
    def _read_parallel(self, docnames, app, nproc):
        # type: (List[unicode], Sphinx, int) -> None
        # clear all outdated docs at once
        for docname in docnames:
            app.emit('env-purge-doc', self, docname)
            self.clear_doc(docname)

        def read_process(docs):
            # type: (List[unicode]) -> unicode
            self.app = app
            with sphinx_smartquotes_action(self):
                for docname in docs:
                    self.read_doc(docname, app)
            # allow pickling self to send it back
            return BuildEnvironment.dumps(self)

        def merge(docs, otherenv):
            # type: (List[unicode], unicode) -> None
            env = BuildEnvironment.loads(otherenv)
            self.merge_info_from(docs, env, app)

        tasks = ParallelTasks(nproc)
        chunks = make_chunks(docnames, nproc)

        for chunk in status_iterator(chunks, 'reading sources... ', "purple",
                                     len(chunks), self.app.verbosity):
            tasks.add_task(read_process, chunk, merge)

        # make sure all threads have finished
        logger.info(bold('waiting for workers...'))
        tasks.join()
示例#5
0
    def _write_parallel(self, docnames, nproc):
        # type: (Sequence[unicode], int) -> None
        def write_process(docs):
            # type: (List[Tuple[unicode, nodes.Node]]) -> None
            self.app.phase = BuildPhase.WRITING
            for docname, doctree in docs:
                self.write_doc(docname, doctree)

        # warm up caches/compile templates using the first document
        firstname, docnames = docnames[0], docnames[1:]
        self.app.phase = BuildPhase.RESOLVING
        doctree = self.env.get_and_resolve_doctree(firstname, self)
        self.app.phase = BuildPhase.WRITING
        self.write_doc_serialized(firstname, doctree)
        self.write_doc(firstname, doctree)

        tasks = ParallelTasks(nproc)
        chunks = make_chunks(docnames, nproc)

        self.app.phase = BuildPhase.RESOLVING
        for chunk in status_iterator(chunks,
                                     __('writing output... '), "darkgreen",
                                     len(chunks), self.app.verbosity):
            arg = []
            for i, docname in enumerate(chunk):
                doctree = self.env.get_and_resolve_doctree(docname, self)
                self.write_doc_serialized(docname, doctree)
                arg.append((docname, doctree))
            tasks.add_task(write_process, arg)

        # make sure all threads have finished
        logger.info(bold(__('waiting for workers...')))
        tasks.join()
示例#6
0
    def _read_parallel(self, docnames, nproc):
        # type: (List[unicode], int) -> None
        # clear all outdated docs at once
        for docname in docnames:
            self.app.emit('env-purge-doc', self.env, docname)
            self.env.clear_doc(docname)

        def read_process(docs):
            # type: (List[unicode]) -> bytes
            self.env.app = self.app
            for docname in docs:
                self.read_doc(docname)
            # allow pickling self to send it back
            return pickle.dumps(self.env, pickle.HIGHEST_PROTOCOL)

        def merge(docs, otherenv):
            # type: (List[unicode], bytes) -> None
            env = pickle.loads(otherenv)
            self.env.merge_info_from(docs, env, self.app)

        tasks = ParallelTasks(nproc)
        chunks = make_chunks(docnames, nproc)

        for chunk in status_iterator(chunks, 'reading sources... ', "purple",
                                     len(chunks), self.app.verbosity):
            tasks.add_task(read_process, chunk, merge)

        # make sure all threads have finished
        logger.info(bold('waiting for workers...'))
        tasks.join()
示例#7
0
    def copy_resources(self):
        """Copy supporting resources to the output folder."""

        resource_iterator = status_iterator(
            self.resources.items(),
            "copying resources... ",
            "brown",
            len(self.resources),
            self.app.verbosity,
            stringify_func=lambda r: r[0],
        )

        for dest, (op, value) in resource_iterator:
            logger.debug("[tutorial]: %s: (%s, %s)", dest, op, value)

            destination = pathlib.Path(self.outdir, dest)
            if not destination.parent.exists():
                destination.parent.mkdir(parents=True)

            if op == "copy":
                copyfile(str(value), str(destination))
                continue

            if op == "create":
                with destination.open("w") as f:
                    f.write(value)

                continue

            raise TypeError(f"Unknown resource operation: '{op}'")
示例#8
0
def install_backend_static_files(app, env):
    STATICS_DIR_PATH = os.path.join(app.builder.outdir, STATICS_DIR_NAME)
    dest_path = os.path.join(
        STATICS_DIR_PATH, 'sphinxcontrib-images',
        app.sphinxcontrib_images_backend.__class__.__name__)
    files_to_copy = app.sphinxcontrib_images_backend.STATIC_FILES

    for source_file_path in status_iterator(
            files_to_copy, 'Copying static files for sphinxcontrib-images...',
            brown, len(files_to_copy)):

        dest_file_path = os.path.join(dest_path, source_file_path)

        if not os.path.exists(os.path.dirname(dest_file_path)):
            ensuredir(os.path.dirname(dest_file_path))

        source_file_path = os.path.join(
            os.path.dirname(sys.modules[app.sphinxcontrib_images_backend.
                                        __class__.__module__].__file__),
            source_file_path)

        copyfile(source_file_path, dest_file_path)

        if dest_file_path.endswith('.js'):
            app.add_javascript(
                os.path.relpath(dest_file_path, STATICS_DIR_PATH))
        elif dest_file_path.endswith('.css'):
            app.add_stylesheet(
                os.path.relpath(dest_file_path, STATICS_DIR_PATH))
示例#9
0
 def copy_image_files(self) -> None:
     if self.images:
         stringify_func = ImageAdapter(self.app.env).get_original_image_uri
         for src in status_iterator(self.images,
                                    __('copying images... '),
                                    "brown",
                                    len(self.images),
                                    self.app.verbosity,
                                    stringify_func=stringify_func):
             dest = self.images[src]
             try:
                 copy_asset_file(path.join(self.srcdir, src),
                                 path.join(self.outdir, dest))
             except Exception as err:
                 logger.warning(__('cannot copy image file %r: %s'),
                                path.join(self.srcdir, src), err)
     if self.config.latex_logo:
         if not path.isfile(path.join(self.confdir,
                                      self.config.latex_logo)):
             raise SphinxError(
                 __('logo file %r does not exist') % self.config.latex_logo)
         else:
             copy_asset_file(
                 path.join(self.confdir, self.config.latex_logo),
                 self.outdir)
示例#10
0
    def _read_parallel(self, docnames, nproc):
        # type: (List[unicode], int) -> None
        # clear all outdated docs at once
        for docname in docnames:
            self.app.emit('env-purge-doc', self.env, docname)
            self.env.clear_doc(docname)

        def read_process(docs):
            # type: (List[unicode]) -> bytes
            self.env.app = self.app
            for docname in docs:
                self.read_doc(docname)
            # allow pickling self to send it back
            return pickle.dumps(self.env, pickle.HIGHEST_PROTOCOL)

        def merge(docs, otherenv):
            # type: (List[unicode], bytes) -> None
            env = pickle.loads(otherenv)
            self.env.merge_info_from(docs, env, self.app)

        tasks = ParallelTasks(nproc)
        chunks = make_chunks(docnames, nproc)

        for chunk in status_iterator(chunks, 'reading sources... ', "purple",
                                     len(chunks), self.app.verbosity):
            tasks.add_task(read_process, chunk, merge)

        # make sure all threads have finished
        logger.info(bold('waiting for workers...'))
        tasks.join()
示例#11
0
def install_styles_static_files(app, env):
    STATICS_DIR_PATH = os.path.join(app.builder.outdir, IMAGE_DIR_NAME)
    dest_path = os.path.join(STATICS_DIR_PATH, 'sphinx-needs')

    files_to_copy = ["common.css"]

    if app.config.needs_css == 'modern.css':
        source_folder = os.path.join(os.path.dirname(__file__), "css/modern/")
        for root, dirs, files in os.walk(source_folder):
            for single_file in files:
                files_to_copy.append(os.path.join(root, single_file))
    elif app.config.needs_css == 'dark.css':
        source_folder = os.path.join(os.path.dirname(__file__), "css/dark/")
        for root, dirs, files in os.walk(source_folder):
            for single_file in files:
                files_to_copy.append(os.path.join(root, single_file))
    elif app.config.needs_css == 'blank.css':
        source_folder = os.path.join(os.path.dirname(__file__), "css/blank/")
        for root, dirs, files in os.walk(source_folder):
            for single_file in files:
                files_to_copy.append(os.path.join(root, single_file))
    else:
        files_to_copy += [app.config.needs_css]

    # Be sure no "old" css layout is already set
    safe_remove_file("sphinx-needs/common.css", app)
    safe_remove_file("sphinx-needs/blank.css", app)
    safe_remove_file("sphinx-needs/modern.css", app)
    safe_remove_file("sphinx-needs/dark.css", app)

    if parse_version(sphinx_version) < parse_version("1.6"):
        global status_iterator
        status_iterator = app.status_iterator

    for source_file_path in status_iterator(
            files_to_copy,
            'Copying static files for sphinx-needs custom style support...',
            brown, len(files_to_copy)):

        if not os.path.isabs(source_file_path):
            source_file_path = os.path.join(os.path.dirname(__file__), "css",
                                            source_file_path)

        if not os.path.exists(source_file_path):
            source_file_path = os.path.join(os.path.dirname(__file__), "css",
                                            "blank", "blank.css")
            logger.warning(
                "{0} not found. Copying sphinx-internal blank.css".format(
                    source_file_path))

        dest_file_path = os.path.join(dest_path,
                                      os.path.basename(source_file_path))

        if not os.path.exists(os.path.dirname(dest_file_path)):
            ensuredir(os.path.dirname(dest_file_path))

        copyfile(source_file_path, dest_file_path)

        safe_add_file(os.path.relpath(dest_file_path, STATICS_DIR_PATH), app)
示例#12
0
 def _read_serial(self, docnames: List[str]) -> None:
     for docname in status_iterator(docnames,
                                    __('reading sources... '), "purple",
                                    len(docnames), self.app.verbosity):
         # remove all inventory entries for that file
         self.events.emit('env-purge-doc', self.env, docname)
         self.env.clear_doc(docname)
         self.read_doc(docname)
示例#13
0
 def _read_serial(self, docnames):
     # type: (List[unicode]) -> None
     for docname in status_iterator(docnames, 'reading sources... ', "purple",
                                    len(docnames), self.app.verbosity):
         # remove all inventory entries for that file
         self.app.emit('env-purge-doc', self.env, docname)
         self.env.clear_doc(docname)
         self.read_doc(docname)
示例#14
0
 def _write_serial(self, docnames):
     # type: (Sequence[unicode]) -> None
     with logging.pending_warnings():
         for docname in status_iterator(docnames, 'writing output... ', "darkgreen",
                                        len(docnames), self.app.verbosity):
             doctree = self.env.get_and_resolve_doctree(docname, self)
             self.write_doc_serialized(docname, doctree)
             self.write_doc(docname, doctree)
    def finish(self):
        self.env.get_doctree = self._original_get_doctree

        if self.publish:
            self.legacy_assets = {}
            self.legacy_pages = None
            self.parent_id = self.publisher.getBasePageId()

            for docname in status_iterator(
                    self.publish_docnames, 'publishing documents... ',
                    length=len(self.publish_docnames),
                    verbosity=self.app.verbosity):
                if self.publish_subset and docname not in self.publish_subset:
                    continue
                docfile = path.join(self.outdir, self.file_transform(docname))

                try:
                    with io.open(docfile, 'r', encoding='utf-8') as file:
                        output = file.read()
                        self.publish_doc(docname, output)

                except (IOError, OSError) as err:
                    ConfluenceLogger.warn("error reading file %s: "
                        "%s" % (docfile, err))

            def to_asset_name(asset):
                return asset[0]

            assets = self.assets.build()
            for asset in status_iterator(assets, 'publishing assets... ',
                    length=len(assets), verbosity=self.app.verbosity,
                    stringify_func=to_asset_name):
                key, absfile, type, hash, docname = asset
                if self.publish_subset and docname not in self.publish_subset:
                    continue

                try:
                    with open(absfile, 'rb') as file:
                        output = file.read()
                        self.publish_asset(key, docname, output, type, hash)
                except (IOError, OSError) as err:
                    ConfluenceLogger.warn("error reading asset %s: "
                        "%s" % (key, err))

            self.publish_purge()
            self.publish_finalize()
示例#16
0
    def write(self, build_docnames, updated_docnames, method='update'):
        for docname in status_iterator(sorted(build_docnames),
                                       'running lint... ',
                                       length=len(build_docnames)):
            self.check_doc(docname, self.env.get_doctree(docname))

        for warning in self.warnings:
            self.warn(warning)
示例#17
0
文件: __init__.py 项目: nwf/sphinx
 def _read_serial(self, docnames, app):
     # type: (List[unicode], Sphinx) -> None
     for docname in status_iterator(docnames, 'reading sources... ', "purple",
                                    len(docnames), self.app.verbosity):
         # remove all inventory entries for that file
         app.emit('env-purge-doc', self, docname)
         self.clear_doc(docname)
         self.read_doc(docname, app)
示例#18
0
    def finish(self):
        self.env.get_doctree = self._original_get_doctree

        if self.publish:
            self.legacy_assets = {}
            self.legacy_pages = None
            self.parent_id = self.publisher.getBasePageId()

            for docname in status_iterator(
                    self.publish_docnames, 'publishing documents... ',
                    length=len(self.publish_docnames),
                    verbosity=self.app.verbosity):
                if self.publish_subset and docname not in self.publish_subset:
                    continue
                docfile = path.join(self.outdir, self.file_transform(docname))

                try:
                    with io.open(docfile, 'r', encoding='utf-8') as file:
                        output = file.read()
                        self.publish_doc(docname, output)

                except (IOError, OSError) as err:
                    ConfluenceLogger.warn("error reading file %s: "
                        "%s" % (docfile, err))

            def to_asset_name(asset):
                return asset[0]

            assets = self.assets.build()
            for asset in status_iterator(assets, 'publishing assets... ',
                    length=len(assets), verbosity=self.app.verbosity,
                    stringify_func=to_asset_name):
                key, absfile, type, hash, docname = asset
                if self.publish_subset and docname not in self.publish_subset:
                    continue

                try:
                    with open(absfile, 'rb') as file:
                        output = file.read()
                        self.publish_asset(key, docname, output, type, hash)
                except (IOError, OSError) as err:
                    ConfluenceLogger.warn("error reading asset %s: "
                        "%s" % (key, err))

            self.publish_purge()
            self.publish_finalize()
示例#19
0
 def _write_serial(self, docnames):
     # type: (Sequence[unicode]) -> None
     with logging.pending_warnings():
         for docname in status_iterator(docnames, 'writing output... ', "darkgreen",
                                        len(docnames), self.app.verbosity):
             doctree = self.env.get_and_resolve_doctree(docname, self)
             self.write_doc_serialized(docname, doctree)
             self.write_doc(docname, doctree)
示例#20
0
 def status_iterator(self, elements, message):
     return status_iterator(
         elements,
         message,
         "purple",
         length=len(elements),
         verbosity=self.sphinx_env.app.verbosity,
     )
示例#21
0
def find_copy_make_toc(type_, docs_dir, search_root, header):
    """Look for documentation files, copy them to the build directory, and
    generate toc file linking to pod/site documentation.
    Args:
        type_ (str): either "pod" or "site".
        docs_dir (str): Directory this script is located in.
        search_root (str): Directory to search for PODs or sites.
        header (str): header of the toc file.
    """
    def _docname(item):
        """Helper for status_iterator()."""
        return str(os.path.basename(item))

    # destination directory to copy docs to
    sphinx_subdir = 'sphinx_{}s'.format(type_)
    sphinx_dir = os.path.join(docs_dir, sphinx_subdir)
    if not os.path.isdir(sphinx_dir):
        os.makedirs(sphinx_dir)

    # find PODs or sites as directories under search_root
    entries = [x for x in os.listdir(search_root) \
        if os.path.isdir(os.path.join(search_root, x)) and x[0].isalnum()
    ]
    # Case-insensitive alpha sort
    entries = sorted(entries, key=(lambda s: s.lower()))  # handles unicode
    if 'example' in entries:
        # put example POD documentation first
        entries.remove('example')
        entries.insert(0, 'example')

    # find documentation files
    # = all non-PDF files (.rst and graphics) in /doc subdirectory
    docs = []
    for entry in entries:
        doc_dir = os.path.join(search_root, entry, 'doc')
        if os.path.isdir(doc_dir):
            docs.extend([
                os.path.join(doc_dir, x) for x in os.listdir(doc_dir) \
                if os.path.isfile(os.path.join(doc_dir, x)) and not x.endswith('.pdf')
            ])

    # copy the docs we found
    iter_ = status_iterator(docs,
                            'Copying {} files... '.format(type_),
                            color='purple',
                            stringify_func=_docname)
    for source in iter_:
        shutil.copy2(source, sphinx_dir)

    # create toc file, either "pod_toc.rst" or "site_toc.rst"
    toc_path = os.path.join(docs_dir, 'sphinx', '{}_toc.rst'.format(type_))
    if os.path.exists(toc_path):
        os.remove(toc_path)
    with open(toc_path, 'w') as file_:
        file_.write(header)
        for entry in entries:
            # correct number of spaces for .rst indentation
            file_.write('   ../{}/{}\n'.format(sphinx_subdir, entry))
示例#22
0
 def status_iterator(self, iterable, summary, colorfunc=darkgreen, length=0,
                     stringify_func=_display_chunk):
     # type: (Iterable, unicode, Callable, int, Callable[[Any], unicode]) -> Iterable
     warnings.warn('app.status_iterator() is now deprecated. '
                   'Use sphinx.util.status_iterator() instead.',
                   RemovedInSphinx17Warning)
     for item in status_iterator(iterable, summary, length=length, verbosity=self.verbosity,
                                 color="darkgreen", stringify_func=stringify_func):
         yield item
示例#23
0
 def status_iterator(self, iterable, summary, colorfunc=darkgreen, length=0,
                     stringify_func=_display_chunk):
     # type: (Iterable, unicode, Callable, int, Callable[[Any], unicode]) -> Iterable
     warnings.warn('app.status_iterator() is now deprecated. '
                   'Use sphinx.util.status_iterator() instead.',
                   RemovedInSphinx17Warning)
     for item in status_iterator(iterable, summary, length=length, verbosity=self.verbosity,
                                 color="darkgreen", stringify_func=stringify_func):
         yield item
示例#24
0
    def _parse(self):
        # 1. pull in global configuration from conf.py
        self.sections = self.env.config.changelog_sections
        self.inner_tag_sort = self.env.config.changelog_inner_tag_sort + [""]

        # 2. examine top level directives inside the .. changelog::
        # directive.  version, release date
        self._parsed_content = parsed = _parse_content(self.content)
        self.version = version = parsed.get('version', '')
        self.release_date = parsed.get('released', None)
        self.is_released = bool(self.release_date)
        self.env.temp_data['ChangeLogDirective'] = self

        content = self.content

        # 3. read extra per-file included notes
        if 'include_notes_from' in parsed:
            if content.items and content.items[0]:
                source = content.items[0][0]
                path = os.path.join(os.path.dirname(source),
                                    parsed['include_notes_from'])
            else:
                path = parsed['include_notes_from']
            if not os.path.exists(path):
                raise Exception("included nodes path %s does not exist" % path)

            content = copy.deepcopy(content)

            files = [
                fname for fname in os.listdir(path) if fname.endswith(".rst")
            ]
            for fname in status_iterator(
                    files,
                    "reading changelog note files (version %s)..." % version,
                    "purple",
                    length=len(files),
                    verbosity=self.env.app.verbosity):
                fpath = os.path.join(path, fname)
                with open(fpath) as handle:
                    content.append("", path, 0)
                    for num, line in enumerate(handle):

                        if "\t" in line:
                            warnings.warn("file %s has a tab in it! please "
                                          "convert to spaces." % fname)
                            line = line.replace("\t", "    ")
                        line = line.rstrip()
                        content.append(line, path, num)

        # 4. parse the content of the .. changelog:: directive. This
        # is where we parse individual .. change:: directives and construct
        # a list of items, stored in the env via self.get_changes_list(env)
        p = nodes.paragraph(
            '',
            '',
        )
        self.state.nested_parse(content[1:], 0, p)
示例#25
0
 def _write_serial(self, docnames: Sequence[str]) -> None:
     with logging.pending_warnings():
         for docname in status_iterator(docnames, __('writing output... '), "darkgreen",
                                        len(docnames), self.app.verbosity):
             self.app.phase = BuildPhase.RESOLVING
             doctree = self.env.get_and_resolve_doctree(docname, self)
             self.app.phase = BuildPhase.WRITING
             self.write_doc_serialized(docname, doctree)
             self.write_doc(docname, doctree)
示例#26
0
 def _write_serial(self, docnames):
     # type: (Sequence[str]) -> None
     with logging.pending_warnings():
         for docname in status_iterator(docnames, __('writing output... '), "darkgreen",
                                        len(docnames), self.app.verbosity):
             self.app.phase = BuildPhase.RESOLVING
             doctree = self.env.get_and_resolve_doctree(docname, self)
             self.app.phase = BuildPhase.WRITING
             self.write_doc_serialized(docname, doctree)
             self.write_doc(docname, doctree)
示例#27
0
    def load(self):
        """
        Read the files associated with the sls objects and parse their comment blocks
        """
        # Process all the sls objects and their files
        for sls_obj in status_iterator(
                self.sls_objects,
                bold("[AutoSaltSLS] Reading primary sls... "),
                "darkgreen",
                self.sls_objects_count,
                1,
                stringify_func=_stringify_sls,
        ):
            # Parse the sls object's file and add to the object as an entry
            sls_obj.parse_file()

            # Some debugging info
            if sls_obj.header.has_text:
                logger.debug("[AutoSaltSLS] {0} extracted header:\n{1}".format(
                    "Top File" if sls_obj.topfile else "File",
                    sls_obj.header.annotated_text,
                ))

            if sls_obj.body:
                logger.debug("[AutoSaltSLS] {0} extracted body:\n{1}".format(
                    "Top File" if sls_obj.topfile else "File",
                    sls_obj.annotated_body,
                ))

            # Now parse any files belong to its children
            for sls_child_obj in status_iterator(
                    sls_obj.children,
                    bold("[AutoSaltSLS] Reading child sls... "),
                    "darkgreen",
                    sls_obj.child_count,
                    1,
                    stringify_func=_stringify_sls,
            ):
                sls_child_obj.parse_file()
                if sls_child_obj.text:
                    logger.debug(
                        "[AutoSaltSLS] Child extracted text:\n{0}".format(
                            sls_child_obj.text))
示例#28
0
    def run(self):
        env = self.state.document.settings.env
        app = env.app

        # workaround (used below) for https://github.com/sphinx-doc/sphinx/issues/3924
        current_docname = env.docname

        docdir = dirname(env.doc2path(env.docname))

        specpath = join(docdir, self.arguments[0])

        dest_dir = join(dirname(specpath), "gallery")
        ensuredir(dest_dir)

        env.note_dependency(specpath)
        spec = json.load(open(specpath))
        details = spec['details']

        details_iter = status_iterator(
            details,
            'copying gallery files... ',
            'brown',
            len(details),
            stringify_func=lambda x: x['name'] + ".py")

        env.gallery_updated = []
        for detail in details_iter:
            src_path = abspath(join("..", detail['path']))
            dest_path = join(dest_dir, detail['name'] + ".py")

            # sphinx pickled env works only with forward slash
            docname = join(env.app.config.bokeh_gallery_dir,
                           detail['name']).replace("\\", "/")

            try:
                copyfile(src_path, dest_path)
            except OSError as e:
                raise SphinxError('cannot copy gallery file %r, reason: %s' %
                                  (src_path, e))

            try:
                env.clear_doc(docname)
                env.read_doc(docname, app=app)
                env.gallery_updated.append(docname)
            except Exception as e:
                raise SphinxError('failed to read gallery doc %r, reason: %s' %
                                  (docname, e))

        names = [detail['name'] for detail in details]
        rst_text = GALLERY_PAGE.render(names=names)

        # workaround for https://github.com/sphinx-doc/sphinx/issues/3924
        env.temp_data['docname'] = current_docname

        return self._parse(rst_text, "<bokeh-gallery>")
示例#29
0
    def finish(self):
        # type: () -> None
        super(MessageCatalogBuilder, self).finish()
        data = {
            'version':
            self.config.version,
            'copyright':
            self.config.copyright,
            'project':
            self.config.project,
            'ctime':
            datetime.fromtimestamp(timestamp,
                                   ltz).strftime('%Y-%m-%d %H:%M%z'),
        }
        for textdomain, catalog in status_iterator(
                self.catalogs.items(),  # type: ignore
                __("writing message catalogs... "),
                "darkgreen",
                len(self.catalogs),
                self.app.verbosity,
                lambda textdomain__: textdomain__[0]):
            # noop if config.gettext_compact is set
            ensuredir(path.join(self.outdir, path.dirname(textdomain)))

            pofn = path.join(self.outdir, textdomain + '.pot')
            output = StringIO()
            output.write(POHEADER % data)  # type: ignore

            for message in catalog.messages:
                positions = catalog.metadata[message]

                if self.config.gettext_location:
                    # generate "#: file1:line1\n#: file2:line2 ..."
                    output.write("#: %s\n" % "\n#: ".join(  # type: ignore
                        "%s:%s" %
                        (canon_path(relpath(source, self.outdir)), line)
                        for source, line, _ in positions))
                if self.config.gettext_uuid:
                    # generate "# uuid1\n# uuid2\n ..."
                    output.write("# %s\n" % "\n# ".join(  # type: ignore
                        uid for _, _, uid in positions))

                # message contains *one* line of text ready for translation
                message = message.replace('\\', r'\\'). \
                    replace('"', r'\"'). \
                    replace('\n', '\\n"\n"')
                output.write('msgid "%s"\nmsgstr ""\n\n' %
                             message)  # type: ignore

            content = output.getvalue()

            if should_write(pofn, content):
                with open(pofn, 'w',
                          encoding='utf-8') as pofile:  # type: ignore
                    pofile.write(content)
示例#30
0
    def finish(self) -> None:
        # Bypass MessageCatalogBuilder.finish
        I18nBuilder.finish(self)

        # This is mostly copy pasted from Sphinx
        # However, this allows
        context = {
            'version':
            self.config.version,
            'copyright':
            self.config.copyright,
            'project':
            self.config.project,
            'last_translator':
            self.config.gettext_last_translator,
            'language_team':
            self.config.gettext_language_team,
            'ctime':
            datetime.datetime.fromtimestamp(timestamp,
                                            ltz).strftime('%Y-%m-%d %H:%M%z'),
            'display_location':
            self.config.gettext_location,
            'display_uuid':
            self.config.gettext_uuid,
        }

        REGEX = self._ADMONITION_REGEX
        for textdomain, catalog in status_iterator(
                self.catalogs.items(),
                __("writing message catalogs... "),
                "darkgreen",
                len(self.catalogs),
                self.app.verbosity,
                lambda textdomain__: textdomain__[0],
        ):
            # noop if config.gettext_compact is set
            ensuredir(os.path.join(self.outdir, os.path.dirname(textdomain)))

            # Due to a bug in Sphinx where messages contain admonitions, this code makes it
            # so they're suppressed from the output to prevent the output and CI from breaking
            # This is quite a bandaid fix but it seems to work ok
            # See https://github.com/sphinx-doc/sphinx/issues/10334
            context['messages'] = [
                msg for msg in catalog if REGEX.search(msg.text) is None
            ]

            content = GettextRenderer(template_path='_templates/gettext',
                                      outdir=self.outdir).render(
                                          'message.pot_t', context)

            pofn = os.path.join(self.outdir, textdomain + '.pot')
            if should_write(pofn, content):
                with open(pofn, 'w', encoding='utf-8') as pofile:
                    pofile.write(content)
示例#31
0
def install_styles_static_files(app: Sphinx, env):

    # Do not copy static_files for our "needs" builder
    if app.builder.name == "needs":
        return

    statics_dir = Path(app.builder.outdir) / IMAGE_DIR_NAME
    css_root = Path(__file__).parent / "css"
    dest_dir = statics_dir / "sphinx-needs"

    def find_css_files() -> Iterable[Path]:
        for theme in ["modern", "dark", "blank"]:
            if app.config.needs_css == f"{theme}.css":
                css_dir = css_root / theme
                return [f for f in css_dir.glob("**/*") if f.is_file()]
        return [app.config.needs_css]

    files_to_copy = [Path("common.css")]
    files_to_copy.extend(find_css_files())

    # Be sure no "old" css layout is already set
    for theme in ["common", "modern", "dark", "blank"]:
        path = Path("sphinx-needs") / f"{theme}.css"
        safe_remove_file(path, app)

    if parse_version(sphinx_version) < parse_version("1.6"):
        global status_iterator
        status_iterator = app.status_iterator

    for source_file_path in status_iterator(
            files_to_copy,
            "Copying static files for sphinx-needs custom style support...",
            brown,
            len(files_to_copy),
    ):
        source_file_path = Path(source_file_path)

        if not source_file_path.is_absolute():
            source_file_path = css_root / source_file_path

        if not source_file_path.exists():
            source_file_path = css_root / "blank" / "blank.css"
            logger.warning(
                f"{source_file_path} not found. Copying sphinx-internal blank.css"
            )

        dest_file = dest_dir / source_file_path.name
        dest_dir.mkdir(exist_ok=True)

        copyfile(str(source_file_path), str(dest_file))

        relative_path = Path(dest_file).relative_to(statics_dir)
        safe_add_file(relative_path, app)
示例#32
0
文件: __init__.py 项目: zwq000/sphinx
    def compile_catalogs(self, catalogs: Set[CatalogInfo], message: str) -> None:
        if not self.config.gettext_auto_build:
            return

        def cat2relpath(cat: CatalogInfo) -> str:
            return relpath(cat.mo_path, self.env.srcdir).replace(path.sep, SEP)

        logger.info(bold(__('building [mo]: ')) + message)
        for catalog in status_iterator(catalogs, __('writing output... '), "darkgreen",
                                       len(catalogs), self.app.verbosity,
                                       stringify_func=cat2relpath):
            catalog.write_mo(self.config.language)
示例#33
0
def config_inited_handler(app, config):
    gallery_dir = join(app.srcdir, config.bokeh_gallery_dir)
    gallery_file = f"{gallery_dir}.json"

    if not exists(gallery_file) and isfile(gallery_file):
        raise SphinxError(
            f"could not find gallery file {gallery_file!r} for configured gallery dir {gallery_dir!r}"
        )

    gallery_file_mtime = getmtime(gallery_file)

    ensuredir(gallery_dir)

    # we will remove each file we process from this set and see if anything is
    # left at the end (and remove it in that case)
    extras = set(os.listdir(gallery_dir))

    # app.env.note_dependency(specpath)
    spec = json.load(open(gallery_file))
    details = spec["details"]

    names = {x["name"] for x in details}
    if len(names) < len(details):
        raise SphinxError(f"gallery file {gallery_file!r} has duplicate names")

    details_iter = status_iterator(details,
                                   "creating gallery file entries... ",
                                   "brown",
                                   len(details),
                                   app.verbosity,
                                   stringify_func=lambda x: x["name"] + ".rst")

    for detail in details_iter:
        detail_file_name = detail["name"] + ".rst"
        detail_file_path = join(gallery_dir, detail_file_name)

        if detail_file_path in extras:
            extras.remove(detail_file_path)

        # if the gallery detail file is newer than the gallery file, assume it is up to date
        if exists(detail_file_path
                  ) and getmtime(detail_file_path) > gallery_file_mtime:
            continue

        with open(detail_file_path, "w") as f:
            source_path = abspath(join(app.srcdir, "..", "..", detail["path"]))
            f.write(
                GALLERY_DETAIL.render(filename=detail["name"] + ".py",
                                      source_path=source_path))

    for extra_file in extras:
        os.remove(join(gallery_dir, extra_file))
示例#34
0
    def dump(self):
        """Dump store to disk."""
        from sphinx.util import status_iterator

        # Makesure dir exists
        if not path.exists(self.dirname):
            os.makedirs(self.dirname)

        # Purge orphan items
        for key, value in status_iterator(
                self._orphan_items.items(),
                'purging orphan document(s)... ',
                'brown',
                len(self._orphan_items),
                0,
                stringify_func=lambda i: self.stringify(i[0], i[1])):
            os.remove(self.itemfile(key))
            self.post_purge(key, value)

        # Dump dirty items
        for key, value in status_iterator(
                self._dirty_items.items(),
                'dumping dirty document(s)... ',
                'brown',
                len(self._dirty_items),
                0,
                stringify_func=lambda i: self.stringify(i[0], i[1])):
            with open(self.itemfile(key), 'wb') as f:
                pickle.dump(value, f)
            self.post_dump(key, value)

        # Clear all in-memory items
        self._orphan_items = {}
        self._dirty_items = {}
        self._store = {key: None for key in self._store}

        # Dump store itself
        with open(self.dictfile(), 'wb') as f:
            pickle.dump(self, f)
示例#35
0
def write_ncdumps(app):
    for name in status_iterator(
            app.config.sonatncdumph_ncfiles,
            bold("generating list of ncdump -h... "),
            length=len(app.config.sonatncdumph_ncfiles),
    ):
        txtfile = app.config.sonatncdumph_filepat.format(name)
        txtfile = os.path.join(app.builder.srcdir, txtfile)
        checkdir(txtfile, asfile=True)
        ncfile = app.config.sonatncdumph_ncfiles[name]
        if not os.path.isabs(ncfile):
            ncfile = os.path.join(app.builder.srcdir, ncfile)
        os.system('ncdump -h {ncfile} > {txtfile}'.format(**locals()))
示例#36
0
    def run(self):
        env = self.state.document.settings.env
        app = env.app

        # workaround (used below) for https://github.com/sphinx-doc/sphinx/issues/3924
        current_docname = env.docname

        docdir = dirname(env.doc2path(env.docname))

        specpath = join(docdir, self.arguments[0])

        dest_dir = join(dirname(specpath), "gallery")
        ensuredir(dest_dir)

        env.note_dependency(specpath)
        spec = json.load(open(specpath))
        details = spec['details']

        details_iter = status_iterator(details,
                                       'copying gallery files... ',
                                       'brown',
                                       len(details),
                                       stringify_func=lambda x: x['name'] + ".py")

        env.gallery_updated = []
        for detail in details_iter:
            src_path = abspath(join("..", detail['path']))
            dest_path = join(dest_dir, detail['name'] + ".py")

            # sphinx pickled env works only with forward slash
            docname = join(env.app.config.bokeh_gallery_dir, detail['name']).replace("\\","/")

            try:
                copyfile(src_path, dest_path)
            except OSError as e:
                raise SphinxError('cannot copy gallery file %r, reason: %s' % (src_path, e))

            try:
                env.clear_doc(docname)
                env.read_doc(docname, app=app)
                env.gallery_updated.append(docname)
            except Exception as e:
                raise SphinxError('failed to read gallery doc %r, reason: %s' % (docname, e))

        names = [detail['name']for detail in details]
        rst_text = GALLERY_PAGE.render(names=names)

        # workaround for https://github.com/sphinx-doc/sphinx/issues/3924
        env.temp_data['docname'] = current_docname

        return self._parse(rst_text, "<bokeh-gallery>")
示例#37
0
    def publish_purge(self):
        if self.config.confluence_purge:
            if self.publish_allowlist or self.publish_denylist:
                self.warn(
                    'confluence_purge disabled due to '
                    'confluence_publish_allowlist/confluence_publish_denylist')
                return

            if self.legacy_pages:
                for legacy_page_id in status_iterator(
                        self.legacy_pages,
                        'removing legacy pages... ',
                        length=len(self.legacy_pages),
                        verbosity=self._verbose):
                    self.publisher.remove_page(legacy_page_id)
                    # remove any pending assets to remove from the page (as they
                    # are already been removed)
                    self.legacy_assets.pop(legacy_page_id, None)

            legacy_assets = {}
            for legacy_asset_info in self.legacy_assets.values():
                for attachment_id, attachment_name in legacy_asset_info.items(
                ):
                    legacy_assets[attachment_id] = attachment_name

            if legacy_assets:

                def to_asset_name(attachment_id):
                    return legacy_assets[attachment_id]

                for attachment_id in status_iterator(
                        legacy_assets.keys(),
                        'removing legacy assets... ',
                        length=len(legacy_assets.keys()),
                        verbosity=self._verbose,
                        stringify_func=to_asset_name):

                    self.publisher.remove_attachment(attachment_id)
示例#38
0
 def copy_image_files(self):
     # type: () -> None
     if self.images:
         stringify_func = ImageAdapter(self.app.env).get_original_image_uri
         for src in status_iterator(self.images, __('copying images... '), "brown",
                                    len(self.images), self.app.verbosity,
                                    stringify_func=stringify_func):
             dest = self.images[src]
             try:
                 copy_asset_file(path.join(self.srcdir, src),
                                 path.join(self.outdir, dest))
             except Exception as err:
                 logger.warning(__('cannot copy image file %r: %s'),
                                path.join(self.srcdir, src), err)
示例#39
0
    def compile_catalogs(self, catalogs, message):
        # type: (Set[CatalogInfo], unicode) -> None
        if not self.config.gettext_auto_build:
            return

        def cat2relpath(cat):
            # type: (CatalogInfo) -> unicode
            return path.relpath(cat.mo_path, self.env.srcdir).replace(path.sep, SEP)

        logger.info(bold('building [mo]: ') + message)
        for catalog in status_iterator(catalogs, 'writing output... ', "darkgreen",
                                       len(catalogs), self.app.verbosity,
                                       stringify_func=cat2relpath):
            catalog.write_mo(self.config.language)
示例#40
0
 def copy_image_files(self):
     # type: () -> None
     if self.images:
         stringify_func = ImageAdapter(self.app.env).get_original_image_uri
         for src in status_iterator(self.images, 'copying images... ', "brown",
                                    len(self.images), self.app.verbosity,
                                    stringify_func=stringify_func):
             dest = self.images[src]
             try:
                 copy_asset_file(path.join(self.srcdir, src),
                                 path.join(self.outdir, dest))
             except Exception as err:
                 logger.warning('cannot copy image file %r: %s',
                                path.join(self.srcdir, src), err)
示例#41
0
    def compile_catalogs(self, catalogs, message):
        # type: (Set[CatalogInfo], unicode) -> None
        if not self.config.gettext_auto_build:
            return

        def cat2relpath(cat):
            # type: (CatalogInfo) -> unicode
            return relpath(cat.mo_path, self.env.srcdir).replace(path.sep, SEP)

        logger.info(bold(__('building [mo]: ')) + message)
        for catalog in status_iterator(catalogs, __('writing output... '), "darkgreen",
                                       len(catalogs), self.app.verbosity,
                                       stringify_func=cat2relpath):
            catalog.write_mo(self.config.language)
示例#42
0
 def write(self, *ignored):
     writer = TextWriter(self)
     for label in status_iterator(pydoc_topic_labels,
                                  'building topics... ',
                                  length=len(pydoc_topic_labels)):
         if label not in self.env.domaindata['std']['labels']:
             self.warn('label %r not in documentation' % label)
             continue
         docname, labelid, sectname = self.env.domaindata['std']['labels'][label]
         doctree = self.env.get_and_resolve_doctree(docname, self)
         document = new_document('<section node>')
         document.append(doctree.ids[labelid])
         destination = StringOutput(encoding='utf-8')
         writer.write(document, destination)
         self.topics[label] = writer.output
示例#43
0
文件: gettext.py 项目: LFYG/sphinx
    def _extract_from_template(self):
        # type: () -> None
        files = self._collect_templates()
        logger.info(bold('building [%s]: ' % self.name), nonl=1)
        logger.info('targets for %d template files', len(files))

        extract_translations = self.templates.environment.extract_translations

        for template in status_iterator(files, 'reading templates... ', "purple",  # type: ignore  # NOQA
                                        len(files), self.app.verbosity):
            with open(template, 'r', encoding='utf-8') as f:  # type: ignore
                context = f.read()
            for line, meth, msg in extract_translations(context):
                origin = MsgOrigin(template, line)
                self.catalogs['sphinx'].add(msg, origin)
示例#44
0
def config_inited_handler(app, config):
    gallery_dir = join(app.srcdir, config.bokeh_gallery_dir)
    gallery_file = gallery_dir + ".json"

    if not exists(gallery_file) and isfile(gallery_file):
        raise SphinxError("could not find gallery file %r for configured gallery dir %r" % (gallery_file, gallery_dir))

    gallery_file_mtime = getmtime(gallery_file)

    ensuredir(gallery_dir)

    # we will remove each file we process from this set and see if anything is
    # left at the end (and remove it in that case)
    extras = set(os.listdir(gallery_dir))

    # app.env.note_dependency(specpath)
    spec = json.load(open(gallery_file))
    details = spec['details']

    names = set(x['name'] for x in details)
    if len(names) < len(details):
        raise SphinxError("gallery file %r has duplicate names" % gallery_file)

    details_iter = status_iterator(details,
                                   'creating gallery file entries... ',
                                   'brown',
                                   len(details),
                                   app.verbosity,
                                   stringify_func=lambda x: x['name'] + ".rst")

    for detail in details_iter:
        detail_file_name = detail['name'] + ".rst"
        detail_file_path = join(gallery_dir, detail_file_name)

        if detail_file_path in extras:
            extras.remove(detail_file_path)

        # if the gallery detail file is newer than the gallery file, assume it is up to date
        if exists(detail_file_path) and getmtime(detail_file_path) > gallery_file_mtime:
            continue

        with open(detail_file_path, "w") as f:
            source_path = abspath(join(app.srcdir, "..", "..", detail['path']))
            f.write(GALLERY_DETAIL.render(filename=detail['name']+'.py', source_path=source_path))

    for extra_file in extras:
        os.remove(join(gallery_dir, extra_file))
示例#45
0
文件: gettext.py 项目: LFYG/sphinx
    def finish(self):
        # type: () -> None
        I18nBuilder.finish(self)
        data = dict(
            version = self.config.version,
            copyright = self.config.copyright,
            project = self.config.project,
            ctime = datetime.fromtimestamp(
                timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'),
        )
        for textdomain, catalog in status_iterator(iteritems(self.catalogs),  # type: ignore
                                                   "writing message catalogs... ",
                                                   "darkgreen", len(self.catalogs),
                                                   self.app.verbosity,
                                                   lambda textdomain__: textdomain__[0]):
            # noop if config.gettext_compact is set
            ensuredir(path.join(self.outdir, path.dirname(textdomain)))

            pofn = path.join(self.outdir, textdomain + '.pot')
            output = StringIO()
            output.write(POHEADER % data)  # type: ignore

            for message in catalog.messages:
                positions = catalog.metadata[message]

                if self.config.gettext_location:
                    # generate "#: file1:line1\n#: file2:line2 ..."
                    output.write("#: %s\n" % "\n#: ".join(  # type: ignore
                        "%s:%s" % (canon_path(
                            safe_relpath(source, self.outdir)), line)
                        for source, line, _ in positions))
                if self.config.gettext_uuid:
                    # generate "# uuid1\n# uuid2\n ..."
                    output.write("# %s\n" % "\n# ".join(  # type: ignore
                        uid for _, _, uid in positions))

                # message contains *one* line of text ready for translation
                message = message.replace('\\', r'\\'). \
                    replace('"', r'\"'). \
                    replace('\n', '\\n"\n"')
                output.write('msgid "%s"\nmsgstr ""\n\n' % message)  # type: ignore

            content = output.getvalue()

            if should_write(pofn, content):
                with open(pofn, 'w', encoding='utf-8') as pofile:  # type: ignore
                    pofile.write(content)
示例#46
0
def build_finished(app, exception):
    """ Generate a ``sitemap.txt`` from the collected HTML page links.

    """
    filename = join(app.outdir, "sitemap.txt")

    links_iter = status_iterator(sorted(app.sitemap_links),
                                 'adding links to sitemap... ',
                                 'brown',
                                 len(app.sitemap_links))

    try:
        with open(filename, 'w') as f:
            for link in links_iter:
                f.write("%s\n" % link)
    except OSError as e:
        raise SphinxError('cannot write sitemap.txt, reason: %s' % e)
示例#47
0
    def _extract_from_template(self):
        # type: () -> None
        files = list(self._collect_templates())
        files.sort()
        logger.info(bold(__('building [%s]: ') % self.name), nonl=True)
        logger.info(__('targets for %d template files'), len(files))

        extract_translations = self.templates.environment.extract_translations

        for template in status_iterator(files, __('reading templates... '), "purple",
                                        len(files), self.app.verbosity):
            try:
                with open(template, encoding='utf-8') as f:
                    context = f.read()
                for line, meth, msg in extract_translations(context):
                    origin = MsgOrigin(template, line)
                    self.catalogs['sphinx'].add(msg, origin)
            except Exception as exc:
                raise ThemeError('%s: %r' % (template, exc))
示例#48
0
def build_finished(app, exception):
    files = set()

    for (script, js, js_path, source) in app.env.bokeh_plot_files.values():
        files.add(js_path)

    files_iter = status_iterator(sorted(files),
                                 'copying bokeh-plot files... ',
                                 'brown',
                                 len(files),
                                 stringify_func=lambda x: basename(x))

    for file in files_iter:
        target = join(app.builder.outdir, "scripts", basename(file))
        ensuredir(dirname(target))
        try:
            copyfile(file, target)
        except OSError as e:
            raise SphinxError('cannot copy local file %r, reason: %s' % (file, e))
示例#49
0
 def copy_image_files_pil(self):
     # type: () -> None
     """Copy images using Pillow, the Python Imaging Libary.
     The method tries to read and write the files with Pillow, converting
     the format and resizing the image if necessary/possible.
     """
     ensuredir(path.join(self.outdir, self.imagedir))
     for src in status_iterator(self.images, __('copying images... '), "brown",
                                len(self.images), self.app.verbosity):
         dest = self.images[src]
         try:
             img = Image.open(path.join(self.srcdir, src))
         except OSError:
             if not self.is_vector_graphics(src):
                 logger.warning(__('cannot read image file %r: copying it instead'),
                                path.join(self.srcdir, src))
             try:
                 copyfile(path.join(self.srcdir, src),
                          path.join(self.outdir, self.imagedir, dest))
             except OSError as err:
                 logger.warning(__('cannot copy image file %r: %s'),
                                path.join(self.srcdir, src), err)
             continue
         if self.config.epub_fix_images:
             if img.mode in ('P',):
                 # See the Pillow documentation for Image.convert()
                 # https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.convert
                 img = img.convert()
         if self.config.epub_max_image_width > 0:
             (width, height) = img.size
             nw = self.config.epub_max_image_width
             if width > nw:
                 nh = (height * nw) / width
                 img = img.resize((nw, nh), Image.BICUBIC)
         try:
             img.save(path.join(self.outdir, self.imagedir, dest))
         except OSError as err:
             logger.warning(__('cannot write image file %r: %s'),
                            path.join(self.srcdir, src), err)
示例#50
0
文件: _epub_base.py 项目: LFYG/sphinx
 def copy_image_files_pil(self):
     # type: () -> None
     """Copy images using the PIL.
     The method tries to read and write the files with the PIL,
     converting the format and resizing the image if necessary/possible.
     """
     ensuredir(path.join(self.outdir, self.imagedir))
     for src in status_iterator(self.images, 'copying images... ', "brown",
                                len(self.images), self.app.verbosity):
         dest = self.images[src]
         try:
             img = Image.open(path.join(self.srcdir, src))
         except IOError:
             if not self.is_vector_graphics(src):
                 logger.warning('cannot read image file %r: copying it instead',
                                path.join(self.srcdir, src))
             try:
                 copyfile(path.join(self.srcdir, src),
                          path.join(self.outdir, self.imagedir, dest))
             except (IOError, OSError) as err:
                 logger.warning('cannot copy image file %r: %s',
                                path.join(self.srcdir, src), err)
             continue
         if self.config.epub_fix_images:
             if img.mode in ('P',):
                 # See PIL documentation for Image.convert()
                 img = img.convert()
         if self.config.epub_max_image_width > 0:
             (width, height) = img.size
             nw = self.config.epub_max_image_width
             if width > nw:
                 nh = (height * nw) / width
                 img = img.resize((nw, nh), Image.BICUBIC)
         try:
             img.save(path.join(self.outdir, self.imagedir, dest))
         except (IOError, OSError) as err:
             logger.warning('cannot write image file %r: %s',
                            path.join(self.srcdir, src), err)
示例#51
0
def _embed_code_links(app, gallery_conf, gallery_dir):
    # Add resolvers for the packages for which we want to show links
    doc_resolvers = {}

    src_gallery_dir = os.path.join(app.builder.srcdir, gallery_dir)
    for this_module, url in gallery_conf['reference_url'].items():
        try:
            if url is None:
                doc_resolvers[this_module] = SphinxDocLinkResolver(
                    app.builder.outdir,
                    src_gallery_dir,
                    relative=True)
            else:
                doc_resolvers[this_module] = SphinxDocLinkResolver(url,
                                                                   src_gallery_dir)

        except HTTPError as e:
            print("The following HTTP Error has occurred:\n")
            print(e.code)
        except URLError as e:
            print("\n...\n"
                  "Warning: Embedding the documentation hyperlinks requires "
                  "Internet access.\nPlease check your network connection.\n"
                  "Unable to continue embedding `{0}` links due to a URL "
                  "Error:\n".format(this_module))
            print(e.args)

    html_gallery_dir = os.path.abspath(os.path.join(app.builder.outdir,
                                                    gallery_dir))

    # patterns for replacement
    link_pattern = ('<a href="%s" title="View documentation for %s">%s</a>')
    orig_pattern = '<span class="n">%s</span>'
    period = '<span class="o">.</span>'

    # This could be turned into a generator if necessary, but should be okay
    flat = [[dirpath, filename]
            for dirpath, _, filenames in os.walk(html_gallery_dir)
            for filename in filenames]
    if LooseVersion(sphinx.__version__) >= LooseVersion('1.6'):
        # It will be removed once upgraded to new sphinx-gallery version
        from sphinx.util import status_iterator
        iterator = status_iterator(
            flat, os.path.basename(html_gallery_dir), color='fuchsia',
            length=len(flat), stringify_func=lambda x: os.path.basename(x[1]))
    else:
        iterator = app.status_iterator(
            flat, os.path.basename(html_gallery_dir), colorfunc=fuchsia,
            length=len(flat), stringify_func=lambda x: os.path.basename(x[1]))

    for dirpath, fname in iterator:
        full_fname = os.path.join(html_gallery_dir, dirpath, fname)
        subpath = dirpath[len(html_gallery_dir) + 1:]
        pickle_fname = os.path.join(src_gallery_dir, subpath,
                                    fname[:-5] + '_codeobj.pickle')

        if os.path.exists(pickle_fname):
            # we have a pickle file with the objects to embed links for
            with open(pickle_fname, 'rb') as fid:
                example_code_obj = pickle.load(fid)
            fid.close()
            str_repl = {}
            # generate replacement strings with the links
            for name, cobj in example_code_obj.items():
                this_module = cobj['module'].split('.')[0]

                if this_module not in doc_resolvers:
                    continue

                try:
                    link = doc_resolvers[this_module].resolve(cobj,
                                                              full_fname)
                except (HTTPError, URLError) as e:
                    if isinstance(e, HTTPError):
                        extra = e.code
                    else:
                        extra = e.reason
                    print("\n\t\tError resolving %s.%s: %r (%s)"
                          % (cobj['module'], cobj['name'], e, extra))
                    continue

                if link is not None:
                    parts = name.split('.')
                    name_html = period.join(orig_pattern % part
                                            for part in parts)
                    full_function_name = '%s.%s' % (
                        cobj['module'], cobj['name'])
                    str_repl[name_html] = link_pattern % (
                        link, full_function_name, name_html)
            # do the replacement in the html file

            # ensure greediness
            names = sorted(str_repl, key=len, reverse=True)
            regex_str = '|'.join(re.escape(name) for name in names)
            regex = re.compile(regex_str)

            def substitute_link(match):
                return str_repl[match.group()]

            if len(str_repl) > 0:
                with open(full_fname, 'rb') as fid:
                    lines_in = fid.readlines()
                with open(full_fname, 'wb') as fid:
                    for line in lines_in:
                        line = line.decode('utf-8')
                        line = regex.sub(substitute_link, line)
                        fid.write(line.encode('utf-8'))
示例#52
0
def generate_recipes(app):
    """
    Go through every folder in the `ggd-recipes/recipes` dir,
    have a README.rst file generated and generate a recipes.rst from
    the collected data.
    """
    renderer = Renderer(app)
    load_config(os.path.join(os.path.dirname(RECIPE_DIR), "config.yaml"))
    repodata = RepoData()
    # Add ggd channels to repodata object
    #repodata.channels = ['ggd-genomics', 'conda-forge', 'bioconda', 'defaults']
    recipes = []
    ## Get each folder that contains a meat.yaml file
    recipe_dirs = []
    for root, dirs, files in os.walk(RECIPE_DIR):
        if "meta.yaml" in files:
            recipe_dirs.append(root)


    if parallel_available and len(recipe_dirs) > 5:
        nproc = app.parallel
    else:
        nproc = 1

    if nproc == 1:
        for folder in status_iterator(
                recipe_dirs,
                'Generating package READMEs...',
                "purple", len(recipe_dirs), app.verbosity):
            recipes.extend(generate_readme(folder, repodata, renderer))
    else:
        tasks = ParallelTasks(nproc)
        chunks = make_chunks(recipe_dirs, nproc)

        def process_chunk(chunk):
            _recipes = []
            for folder in chunk:
                _recipes.extend(generate_readme(folder, repodata, renderer))
            return _recipes

        def merge_chunk(chunk, res):
            recipes.extend(res)


        for chunk in status_iterator(
                chunks,
                'Generating package READMEs with {} threads...'.format(nproc),
                "purple", len(chunks), app.verbosity):
            tasks.add_task(process_chunk, chunk, merge_chunk)
        logger.info("waiting for workers...")
        tasks.join()

    updated = renderer.render_to_file("source/recipes.rst", "recipes.rst_t", {
        'recipes': recipes,
        # order of columns in the table; must be keys in template_options
        'keys': ['Package', 'Version', 'Linux', 'OSX', 'NOARCH'],
		'noarch_symbol': '<i class="fa fa-desktop"></i>',
		'linux_symbol': '<i class="fa fa-linux"></i>', 
		'osx_symbol': '<i class="fa fa-apple"></i>',
		'dot_symbol': '<i class="fa fa-dot-circle-o"></i>' 
    })
    if updated:
        logger.info("Updated source/recipes.rst")
示例#53
0
def collect_pages(app):
    # type: (Sphinx) -> Iterator[Tuple[unicode, Dict[unicode, Any], unicode]]
    env = app.builder.env
    if not hasattr(env, '_viewcode_modules'):
        return
    highlighter = app.builder.highlighter  # type: ignore
    urito = app.builder.get_relative_uri

    modnames = set(env._viewcode_modules)  # type: ignore

#    app.builder.info(' (%d module code pages)' %
#                     len(env._viewcode_modules), nonl=1)

    for modname, entry in status_iterator(
            sorted(env._viewcode_modules.items()),  # type: ignore
            'highlighting module code... ', "blue",
            len(env._viewcode_modules),  # type: ignore
            app.verbosity, lambda x: x[0]):
        if not entry:
            continue
        code, tags, used, refname = entry
        # construct a page name for the highlighted source
        pagename = '_modules/' + modname.replace('.', '/')
        # highlight the source using the builder's highlighter
        if env.config.highlight_language in ('python3', 'default', 'none'):
            lexer = env.config.highlight_language
        else:
            lexer = 'python'
        highlighted = highlighter.highlight_block(code, lexer, linenos=False)
        # split the code into lines
        lines = highlighted.splitlines()
        # split off wrap markup from the first line of the actual code
        before, after = lines[0].split('<pre>')
        lines[0:1] = [before + '<pre>', after]
        # nothing to do for the last line; it always starts with </pre> anyway
        # now that we have code lines (starting at index 1), insert anchors for
        # the collected tags (HACK: this only works if the tag boundaries are
        # properly nested!)
        maxindex = len(lines) - 1
        for name, docname in used.items():
            type, start, end = tags[name]
            backlink = urito(pagename, docname) + '#' + refname + '.' + name
            lines[start] = (
                '<div class="viewcode-block" id="%s"><a class="viewcode-back" '
                'href="%s">%s</a>' % (name, backlink, _('[docs]')) +
                lines[start])
            lines[min(end, maxindex)] += '</div>'
        # try to find parents (for submodules)
        parents = []
        parent = modname
        while '.' in parent:
            parent = parent.rsplit('.', 1)[0]
            if parent in modnames:
                parents.append({
                    'link': urito(pagename, '_modules/' +
                                  parent.replace('.', '/')),
                    'title': parent})
        parents.append({'link': urito(pagename, '_modules/index'),
                        'title': _('Module code')})
        parents.reverse()
        # putting it all together
        context = {
            'parents': parents,
            'title': modname,
            'body': (_('<h1>Source code for %s</h1>') % modname +
                     '\n'.join(lines)),
        }  # type: Dict[unicode, Any]
        yield (pagename, context, 'page.html')

    if not modnames:
        return

    html = ['\n']
    # the stack logic is needed for using nested lists for submodules
    stack = ['']
    for modname in sorted(modnames):
        if modname.startswith(stack[-1]):
            stack.append(modname + '.')
            html.append('<ul>')
        else:
            stack.pop()
            while not modname.startswith(stack[-1]):
                stack.pop()
                html.append('</ul>')
            stack.append(modname + '.')
        html.append('<li><a href="%s">%s</a></li>\n' % (
            urito('_modules/index', '_modules/' + modname.replace('.', '/')),
            modname))
    html.append('</ul>' * (len(stack) - 1))
    context = {
        'title': _('Overview: module code'),
        'body': (_('<h1>All modules for which code is available</h1>') +
                 ''.join(html)),
    }

    yield ('_modules/index', context, 'page.html')
示例#54
0
文件: gen_gallery.py 项目: marqh/iris
def gen_gallery(app, doctree):
    if app.builder.name != 'html':
        return

    outdir = app.builder.outdir
    rootdir = 'examples'

    # Images we want to skip for the gallery because they are an unusual
    # size that doesn't layout well in a table, or because they may be
    # redundant with other images or uninteresting.
    skips = set([
        'mathtext_examples',
        'matshow_02',
        'matshow_03',
        'matplotlib_icon',
        ])

    thumbnails = {}
    rows = []
    random_image = []
    toc_rows = []

    link_template = ('<a href="{href}">'
                     '<img src="{thumb_file}" border="0"'
                     ' alt="{alternative_text}"/>'
                     '</a>')

    header_template = ('<div class="section" id="{}">'
                       '<h4>{}'
                       '<a class="headerlink" href="#{}"'
                       ' title="Permalink to this headline">&para;</a>'
                       '</h4>')

    toc_template = ('<li>'
                    '<a class="reference internal" href="#{}">{}</a>'
                    '</li>')

    random_image_content_template = '''
// This file was automatically generated by gen_gallery.py & should not be
// modified directly.

images = new Array();

{}

'''

    random_image_template = "['{thumbfile}', '{full_image}', '{link}'];"
    random_image_join = 'images[{}] = {}'

    dirs = ('General', 'Meteorology', 'Oceanography')

    for subdir in dirs:
        rows.append(header_template.format(subdir, subdir, subdir))
        toc_rows.append(toc_template.format(subdir, subdir))

        origdir = os.path.join(os.path.dirname(outdir), rootdir, subdir)
        if not os.path.exists(origdir):
            origdir = os.path.join(os.path.dirname(outdir), 'plot_directive',
                                   rootdir, subdir)
        thumbdir = os.path.join(outdir, rootdir, subdir, 'thumbnails')
        if not os.path.exists(thumbdir):
            os.makedirs(thumbdir)

        data = []

        for filename in sorted(glob.glob(os.path.join(origdir, '*.png'))):
            if filename.endswith('hires.png'):
                continue

            path, filename = os.path.split(filename)
            basename, ext = os.path.splitext(filename)
            if basename in skips:
                continue

            # Create thumbnails based on images in tmpdir, and place them
            # within the build tree.
            orig_path = str(os.path.join(origdir, filename))
            thumb_path = str(os.path.join(thumbdir, filename))
            if out_of_date(orig_path, thumb_path) or True:
                thumbnails[orig_path] = thumb_path

            m = multiimage.match(basename)
            if m is not None:
                basename = m.group(1)

            data.append((subdir, basename,
                         os.path.join(rootdir, subdir, 'thumbnails',
                                      filename)))

        for (subdir, basename, thumbfile) in data:
            if thumbfile is not None:
                anchor = os.path.basename(thumbfile)
                anchor = os.path.splitext(anchor)[0].replace('_', '-')
                link = 'examples/{}/{}.html#{}'.format(
                    subdir,
                    basename,
                    anchor)
                rows.append(link_template.format(
                    href=link,
                    thumb_file=thumbfile,
                    alternative_text=basename))
                random_image.append(random_image_template.format(
                    link=link,
                    thumbfile=thumbfile,
                    basename=basename,
                    full_image='_images/' + os.path.basename(thumbfile)))

        if len(data) == 0:
            warnings.warn('No thumbnails were found in {}'.format(subdir))

        # Close out the <div> opened up at the top of this loop.
        rows.append('</div>')

    # Generate JS list of images for front page.
    random_image_content = '\n'.join([random_image_join.format(i, line)
                                      for i, line in enumerate(random_image)])
    random_image_content = random_image_content_template.format(
        random_image_content)
    random_image_script_path = os.path.join(app.builder.srcdir,
                                            '_static',
                                            'random_image.js')
    with open(random_image_script_path, 'w') as fh:
        fh.write(random_image_content)

    content = template.format('\n'.join(toc_rows),
                              '\n'.join(rows))

    # Only write out the file if the contents have actually changed.
    # Otherwise, this triggers a full rebuild of the docs.

    gallery_path = os.path.join(app.builder.srcdir,
                                '_templates',
                                'gallery.html')
    if os.path.exists(gallery_path):
        with open(gallery_path, 'r') as fh:
            regenerate = fh.read() != content
    else:
        regenerate = True
    if regenerate:
        with open(gallery_path, 'w') as fh:
            fh.write(content)

    for key in status_iterator(thumbnails,
                               'generating thumbnails... ',
                               length=len(thumbnails)):
        image.thumbnail(key, thumbnails[key], 0.3)
示例#55
0
def generate_recipes(app):
    """Generates recipe RST files

    - Checks out repository
    - Prepares `RepoData`
    - Selects recipes (if BIOCONDA_FILTER_RECIPES in environment)
    - Dispatches calls to `generate_readme` for each recipe
    - Removes old RST files
    """
    source_dir = app.env.srcdir
    doctree_dir = app.env.doctreedir  # .../build/doctrees
    repo_dir = op.join(op.dirname(app.env.srcdir), "_bioconda_recipes")
    recipe_basedir = op.join(repo_dir, app.config.bioconda_recipes_path)
    repodata_cache_file = op.join(doctree_dir, 'RepoDataCache.pkl')
    repo_config_file = os.path.join(repo_dir, app.config.bioconda_config_file)
    output_dir = op.join(source_dir, 'recipes')

    # Initialize Repo and point globals at the right place
    repo = BiocondaRepo(folder=repo_dir, home=app.config.bioconda_repo_url)
    repo.checkout_master()
    load_config(repo_config_file)
    logger.info("Preloading RepoData")
    repodata = RepoData()
    repodata.set_cache(repodata_cache_file)
    repodata.df  # pylint: disable=pointless-statement
    logger.info("Preloading RepoData (done)")

    # Collect recipe names
    recipe_dirs = os.listdir(recipe_basedir)
    if 'BIOCONDA_FILTER_RECIPES' in os.environ:
        limiter = os.environ['BIOCONDA_FILTER_RECIPES']
        try:
            recipe_dirs = recipe_dirs[:int(limiter)]
        except ValueError:
            match = re.compile(limiter)
            recipe_dirs = [recipe for recipe in recipe_dirs
                           if match.search(recipe)]

    # Set up renderer preparing recipe readme.rst files
    recipe_base_url = "{base}/tree/master/{recipes}/".format(
        base=app.config.bioconda_repo_url.rstrip(".git"),
        recipes=app.config.bioconda_recipes_path
    )
    renderer = Renderer(app, {'gh_recipes': recipe_base_url})

    recipes: List[str] = []

    if parallel_available and len(recipe_dirs) > 5:
        nproc = app.parallel
    else:
        nproc = 1

    if nproc == 1:
        for folder in status_iterator(
                recipe_dirs,
                'Generating package READMEs...',
                "purple", len(recipe_dirs), app.verbosity):
            if not op.isdir(op.join(recipe_basedir, folder)):
                logger.error("Item '%s' in recipes folder is not a folder",
                             folder)
                continue
            recipes.extend(generate_readme(recipe_basedir, output_dir, folder, repodata, renderer))
    else:
        tasks = ParallelTasks(nproc)
        chunks = make_chunks(recipe_dirs, nproc)

        def process_chunk(chunk):
            _recipes: List[Dict[str, Any]] = []
            for folder in chunk:
                if not op.isdir(op.join(recipe_basedir, folder)):
                    logger.error("Item '%s' in recipes folder is not a folder",
                                 folder)
                    continue
                _recipes.extend(generate_readme(recipe_basedir, output_dir, folder, repodata, renderer))
            return _recipes

        def merge_chunk(_chunk, res):
            recipes.extend(res)

        for chunk in status_iterator(
                chunks,
                'Generating package READMEs with {} threads...'.format(nproc),
                "purple", len(chunks), app.verbosity):
            tasks.add_task(process_chunk, chunk, merge_chunk)
        logger.info("waiting for workers...")
        tasks.join()

    files_wanted = set(recipes)
    for root, dirs, files in os.walk(output_dir, topdown=False):
        for fname in files:
            path = op.join(root, fname)
            if path not in files_wanted:
                os.unlink(path)
        for dname in dirs:
            try:
                os.rmdir(op.join(root, dname))
            except OSError:
                pass