コード例 #1
0
def make_cover(soup: PageElement, options: Options):
    """ Generate a cover page.

    Arguments:
        soup {BeautifulSoup} -- target element.
        options {Options} -- the project options.
    """

    if not options.cover:
        return

    options.logger.info('Generate a cover page.')

    article = soup.new_tag('article', id='doc-cover')

    d = soup.new_tag('div', **{'class': 'wrapper'})
    article.append(d)

    box = soup.new_tag('div', **{'class': 'wrapper'})
    article.append(box)

    title = options.cover_title
    h1 = soup.new_tag('h1')
    h1.append(title)
    box.append(h1)

    sub_title = options.cover_subtitle
    if sub_title:
        h2 = soup.new_tag('h2')
        h2.append(sub_title)
        box.append(h2)

    article.append(_gen_address(soup, options))

    soup.body.insert(0, article)
コード例 #2
0
def convert_for_two_columns(soup: PageElement,
                            level: int,
                            logger: Logger = None):
    if level == 0:
        return
    elif level != 3:
        if logger:
            logger.warning('`two_columns_level` is only support `3` yet.')
        return

    if logger:
        logger.info('Converting for two-column layout(heading level 3).')

    ignored = []
    for el in soup.find_all('h3'):
        if el in ignored:
            continue
        els = [
            i for i in itertools.takewhile(
                lambda x: x.name not in ['h1', 'h2'], el.next_siblings)
        ]
        section = soup.new_tag('section',
                               **{'class': 'md-typeset two-columns'})
        el.wrap(section)
        for tag in els:
            section.append(tag)
            if tag.name == 'h3':
                ignored.append(tag)
        images_size_to_half_in(section)
コード例 #3
0
def convert_iframe(soup: PageElement, entries: list, logger: Logger = None):
    """Replace iFrame to a(anchor)

    e.g:
        ```html "before:"
        <iframe frameborder="0" height="100%" src="SRC"/>
        ```

        ```html "after:"
        <a class="converted-iframe" href="SRC" target="_blank">
          <img src="POSTER IMAGE"/>
        </a>
        ```
    """

    if len(entries) < 1:
        return

    if logger:
        logger.info('Converting <iframe> to poster image(if available).')

    for iframe in soup.find_all('iframe', src=True):
        for entry in entries:
            if iframe['src'] != entry.get('src'):
                continue

            a = soup.new_tag('a',
                             href=iframe['src'],
                             target='_blank',
                             **{'class': 'converted-iframe'})
            img_src = entry.get('img')
            if img_src:
                a.append(soup.new_tag('img', src=img_src))
            text = entry.get('text')
            if text:
                span = soup.new_tag('span')
                span.string = text
                a.append(span)

            # copy attributes
            for key, val in iframe.attrs.items():
                if key in ['style']:
                    a[key] = val

            iframe.replace_with(a)
コード例 #4
0
def _gen_address(soup: PageElement, options: Options) -> PageElement:

    box = soup.new_tag('div', **{'class': 'properties'})

    address = soup.new_tag('address')
    box.append(address)

    if options.author:
        span = soup.new_tag('p', id="author")
        span.append(options.author)
        address.append(span)

    if options.copyright:
        span = soup.new_tag('p', id="copyright")
        import html
        span.append(html.unescape(options.copyright))
        address.append(span)

    return box
コード例 #5
0
def wrap_tabbed_set_content(soup: PageElement, logger: Logger = None):
    for ts in soup.select('div.tabbed-set'):
        for radio in ts.select('input'):
            els = [i for i in itertools.takewhile(
                lambda x: x.name not in ['input'],
                radio.next_siblings)]
            wrapper = soup.new_tag('div', **{'class': 'tabbed-content--wrap'})
            radio.wrap(wrapper)
            for tag in els:
                wrapper.append(tag)

    for d in soup.select('details'):
        d['open'] = ''
コード例 #6
0
def fix_twemoji(soup: PageElement, logger: Logger = None):
    """ (workaraound) replace <svg> to <img + b64encoded data/>

    cause, don't shown WeasyPrint 51
    for after material v4.5.0

    @see https://github.com/squidfunk/mkdocs-material/pull/1330
    """

    def fix_size(svg):
        '''
        svg['width'] = 24
        svg['height'] = 24
        '''
        viewbox = _parse_viewbox(svg['viewbox'])
        width, height = (
            viewbox[2] - viewbox[0],
            viewbox[3] - viewbox[1]
        )
        svg['width'] = int(width)
        svg['height'] = int(height)
        svg['style'] = 'fill: currentColor;'

    if logger:
        logger.debug('Converting emoji SVG to img(workaround).')

    for svg in soup.select('.twemoji svg'):
        try:
            fix_size(svg)
            encoded = b64encode(str(svg).encode('utf-8')).decode('ascii')
            data = "data:image/svg+xml;charset=utf-8;base64," + encoded
            img = soup.new_tag('img', src=data,
                               **{'class': 'converted-twemoji'})
            svg.replace_with(img)

            if logger:
                logger.debug(f'> svg: {svg}')
                logger.debug(f'< img: {img}')

        except Exception as e:
            if logger:
                logger.warning(f'Failed to convert SVG: {e}')
            pass
コード例 #7
0
    def _get_content(self, soup: PageElement, page):

        def shift_heading(elem, page):
            for i in range(7, 0, -1):
                while True:
                    h = elem.find(f'h{i}')
                    if not h:
                        break
                    h.name = f'h{i + 1}'

            page_path = self._page_path_for_id(page)
            h1 = soup.new_tag('h1', id=f'{page_path}')
            h1.append(page.title)
            elem.insert(0, h1)
            return elem

        def cleanup_class(classes: []):
            if classes and len(classes):
                excludes = ['md-content__inner']
                return [c for c in classes if not (c in excludes)]
            return classes

        article = getattr(page, 'pdf-article', None)
        if article:

            page_path = self._page_path_for_id(page)
            article['id'] = f'{page_path}:'  # anchor for each page.
            article['data-url'] = f'/{page_path}'
            return article

        elif page.children:

            new_article = soup.new_tag('article')
            found = False
            for c in page.children:
                content = self._get_content(soup, c)
                if content:
                    new_article.append(content)
                    found = True

            if not found:
                return None

            child_classes = None
            for child_article in new_article.find_all('article'):
                child_article.name = 'section'
                classes = child_article.get('class')
                if classes and not child_classes:
                    child_classes = classes
                child_article['class'] = cleanup_class(classes)

            page_path = self._page_path_for_id(page)
            new_article['id'] = f'{page_path}:'  # anchor for each page.
            new_article['data-url'] = f'/{page_path}'
            if child_classes:
                new_article['class'] = child_classes

            if self._options.heading_shift:
                return shift_heading(new_article, page)
            return new_article

        return None
コード例 #8
0
def make_indexes(soup: PageElement, options: Options) -> None:
    """ Generate ordered chapter number and TOC of document.

    Arguments:
        soup {BeautifulSoup} -- DOM object of Document.
        options {Options} -- The options of this sequence.
    """

    # Step 1: (re)ordered headdings
    _inject_heading_order(soup, options)

    # Step 2: generate toc page
    level = options.toc_level
    if level < 1 or level > 3:
        return

    options.logger.info(
        f'Generate a table of contents up to heading level {level}.')

    h1li = None
    h2ul = h2li = h3ul = None
    exclude_lv2 = exclude_lv3 = False

    def makeLink(h: Tag) -> Tag:
        li = soup.new_tag('li')
        ref = h.get('id', '')
        a = soup.new_tag('a', href=f'#{ref}')
        for el in h.contents:
            if el.name == 'a':
                a.append(el.contents[0])
            else:
                a.append(clone_element(el))
        li.append(a)
        options.logger.debug(f"| [{h.get_text(separator=' ')}]({ref})")
        return li

    toc = soup.new_tag('article', id='doc-toc')
    title = soup.new_tag('h1')
    title.append(options.toc_title)
    toc.append(title)

    h1ul = soup.new_tag('ul')
    toc.append(h1ul)

    headings = soup.find_all(['h1', 'h2', 'h3'])
    for h in headings:

        if h.name == 'h1':

            h1li = makeLink(h)
            h1ul.append(h1li)
            h2ul = h2li = h3ul = None

            exclude_lv2 = _is_exclude(h.get('id', None), options)

        elif not exclude_lv2 and h.name == 'h2' and level >= 2:

            if not h2ul:
                h2ul = soup.new_tag('ul')
                h1li.append(h2ul)
            h2li = makeLink(h)
            h2ul.append(h2li)
            h3ul = None

            exclude_lv3 = _is_exclude(h.get('id', None), options)

        elif not exclude_lv2 and not exclude_lv3 \
                and h.name == 'h3' and level >= 3:

            if not h2li:
                continue
            if not h3ul:
                h3ul = soup.new_tag('ul')
                h2li.append(h3ul)
            h3li = makeLink(h)
            h3ul.append(h3li)

        else:
            continue
        pass

    soup.body.insert(0, toc)