Пример #1
0
def add_related(joined_aug):
    res = AugmentedResult()
    res.merge(joined_aug)
    soup = bs_entire_document(joined_aug.get_result())
    add_related_(soup, res)
    res.set_result(to_html_entire_document(soup))
    return res
Пример #2
0
def add_likebtn(joined_aug, likebtn):
    res = AugmentedResult()
    res.merge(joined_aug)
    soup = bs_entire_document(joined_aug.get_result())
    add_likebtn_(soup, likebtn)
    res.set_result(to_html_entire_document(soup))
    return res
Пример #3
0
def mark_errors_and_rest(joined_aug):
    soup = bs_entire_document(joined_aug.get_result())
    mark_in_html(joined_aug, soup)
    res = AugmentedResult()
    res.merge(joined_aug)
    res.set_result(to_html_entire_document(soup))
    return res
Пример #4
0
def add_style(data_aug, stylesheet):
    soup = bs_entire_document(data_aug.get_result())
    head = soup.find('head')
    assert head is not None
    link = Tag(name='link')
    link['rel'] = 'stylesheet'
    link['type'] = 'text/css'
    from mcdp_report.html import get_css_filename
    link['href'] = get_css_filename('compiled/%s' % stylesheet)
    head.append(link)
    html = to_html_entire_document(soup)
    res = AugmentedResult()
    res.merge(data_aug)
    res.set_result(html)
    return res
Пример #5
0
def make_composite(compose_config, joined_aug):
    data = joined_aug.get_result()
    soup = bs_entire_document(data)
    recipe = compose_config.recipe
    remove_status = compose_config.remove_status
    show_removed = compose_config.show_removed
    permalink_prefix = compose_config.purl_prefix
    aug = compose_go2(soup, recipe, permalink_prefix, remove_status,
                      show_removed)
    soup = aug.get_result()
    results = str(soup)
    res = AugmentedResult()
    res.merge(joined_aug)
    res.merge(aug)
    res.set_result(results)
    return res
Пример #6
0
def manual_join(template,
                files_contents,
                stylesheet,
                remove=None,
                extra_css=None,
                remove_selectors=None,
                hook_before_toc=None,
                references=None,
                resolve_references=True,
                hook_before_final_pass=None,
                require_toc_placeholder=False,
                permalink_prefix=None,
                crossrefs_aug=None,
                aug0=None):
    """
        files_contents: a list of tuples that can be cast to DocToJoin:
        where the string is a unique one to be used for job naming.

        extra_css: if not None, a string of more CSS to be added
        Remove_selectors: list of selectors to remove (e.g. ".draft").

        hook_before_toc if not None is called with hook_before_toc(soup=soup)
        just before generating the toc
    """
    result = AugmentedResult()

    if references is None:
        references = {}
    check_isinstance(files_contents, list)

    if crossrefs_aug is None:
        crossrefs = Tag(name='no-cross-refs')
    else:
        crossrefs = bs(crossrefs_aug.get_result())
        result.merge(crossrefs_aug)
    if aug0 is not None:
        result.merge(aug0)

    @contextmanager
    def timeit(_):
        yield

    with timeit('manual_join'):

        files_contents = [DocToJoin(*_) for _ in files_contents]

        # cannot use bs because entire document
        with timeit('parsing template'):
            template0 = template
            template = replace_macros(template)
            template_soup = BeautifulSoup(template,
                                          'lxml',
                                          from_encoding='utf-8')
            d = template_soup
            if d.html is None:
                s = "Invalid template"
                raise_desc(ValueError, s, template0=template0)

        with timeit('adding head'):
            assert d.html is not None
            assert '<html' in str(d)
            head = d.find('head')
            if head is None:
                msg = 'Could not find <head> in template:'
                logger.error(msg)
                logger.error(str(d))
                raise Exception(msg)
            assert head is not None
            for x in get_manual_css_frag().contents:
                head.append(x.__copy__())

        with timeit('adding stylesheet'):
            if stylesheet is not None:
                link = Tag(name='link')
                link['rel'] = 'stylesheet'
                link['type'] = 'text/css'
                from mcdp_report.html import get_css_filename
                link['href'] = get_css_filename('compiled/%s' % stylesheet)
                head.append(link)

        with timeit('making basename2soup'):
            basename2soup = OrderedDict()
            for doc_to_join in files_contents:
                if doc_to_join.docname in basename2soup:
                    msg = 'Repeated docname %r' % doc_to_join.docname
                    raise ValueError(msg)
                from .latex.latex_preprocess import assert_not_inside
                if isinstance(doc_to_join.contents, AugmentedResult):
                    result.merge(doc_to_join.contents)
                    contents = doc_to_join.contents.get_result()
                else:
                    contents = doc_to_join.contents
                assert_not_inside(contents, '<fragment')
                assert_not_inside(contents, 'DOCTYPE')

                frag = bs(contents)
                basename2soup[doc_to_join.docname] = frag

        # with timeit('fix_duplicate_ids'):
        # XXX
        # fix_duplicated_ids(basename2soup)

        with timeit('copy contents'):
            body = d.find('body')
            add_comments = False

            for docname, content in basename2soup.items():
                if add_comments:
                    body.append(NavigableString('\n\n'))
                    body.append(
                        Comment('Beginning of document dump of %r' % docname))
                    body.append(NavigableString('\n\n'))

                try_faster = True
                if try_faster:
                    for e in list(content.children):
                        body.append(e.extract())
                else:
                    copy_contents_into(content, body)

                if add_comments:
                    body.append(NavigableString('\n\n'))
                    body.append(Comment('End of document dump of %r' %
                                        docname))
                    body.append(NavigableString('\n\n'))

        with timeit('extract_bibtex_blocks'):
            extract_bibtex_blocks(d)

        with timeit('ID_PUT_BIB_HERE'):

            ID_PUT_BIB_HERE = MCDPManualConstants.ID_PUT_BIB_HERE

            bibhere = d.find('div', id=ID_PUT_BIB_HERE)
            if bibhere is None:
                msg = ('Could not find #%s in document. '
                       'Adding one at end of document.') % ID_PUT_BIB_HERE
                result.note_warning(msg)
                bibhere = Tag(name='div')
                bibhere.attrs['id'] = ID_PUT_BIB_HERE
                d.find('body').append(bibhere)

            do_bib(d, bibhere)

        with timeit('hook_before_final_pass'):
            if hook_before_final_pass is not None:
                hook_before_final_pass(soup=d)

        with timeit('document_final_pass_before_toc'):
            location = LocationUnknown()
            document_final_pass_before_toc(d, remove, remove_selectors, result,
                                           location)

        with timeit('hook_before_toc'):
            if hook_before_toc is not None:
                hook_before_toc(soup=d)

        with timeit('generate_and_add_toc'):
            try:
                generate_and_add_toc(d, raise_error=True, res=result)
            except NoTocPlaceholder as e:
                if require_toc_placeholder:
                    msg = 'Could not find toc placeholder: %s' % e
                    # logger.error(msg)
                    if aug0 is not None:
                        result.note_error(msg)
                    else:
                        raise Exception(msg)

        with timeit('document_final_pass_after_toc'):
            document_final_pass_after_toc(
                soup=d,
                crossrefs=crossrefs,
                resolve_references=resolve_references,
                res=result)

        if extra_css is not None:
            logger.info('adding extra CSS')
            add_extra_css(d, extra_css)

        with timeit('document_only_once'):
            document_only_once(d)

        location = LocationUnknown()
        substitute_github_refs(d, defaults={}, res=result, location=location)

        with timeit('another A pass'):
            for a in d.select('a[href]'):
                href = a.attrs['href']
                if href in references:
                    r = references[href]
                    a.attrs['href'] = r.url
                    if not a.children:  # empty
                        a.append(r.title)

        # do not use to_html_stripping_fragment - this is a complete doc
        # mark_in_html(result, soup=d)

        add_github_links_if_edit_url(soup=d, permalink_prefix=permalink_prefix)

        with timeit('converting to string'):
            res = unicode(d)

        with timeit('encoding'):
            res = res.encode('utf8')

        logger.info('done - %.1f MB' % (len(res) / (1024 * 1024.0)))

        result.set_result(res)
        return result
Пример #7
0
def write_errors_and_warnings_files(aug, d):
    if aug.has_result():
        id2filename = aug.get_result()
    else:
        id2filename = {}
    # print('id2filename: %s' % sorted(id2filename))
    assert isinstance(aug, AugmentedResult)
    aug.update_refs(id2filename)

    header = get_notes_panel(aug)

    manifest = []
    nwarnings = len(aug.get_notes_by_tag(MCDPManualConstants.NOTE_TAG_WARNING))
    fn = os.path.join(d, 'warnings.html')

    html = html_list_of_notes(aug,
                              MCDPManualConstants.NOTE_TAG_WARNING,
                              'warnings',
                              'warning',
                              header=header)
    # update_refs_('warnings', html, id2filename)

    write_data_to_file(str(html), fn, quiet=True)
    if nwarnings:
        manifest.append(
            dict(display='%d warnings' % nwarnings, filename='warnings.html'))
        msg = 'There were %d warnings: %s' % (nwarnings, fn)
        logger.warn(msg)

    ntasks = len(aug.get_notes_by_tag(MCDPManualConstants.NOTE_TAG_TASK))
    fn = os.path.join(d, 'tasks.html')

    html = html_list_of_notes(aug,
                              MCDPManualConstants.NOTE_TAG_TASK,
                              'tasks',
                              'task',
                              header=header)
    # update_refs_('tasks', html, id2filename)
    write_data_to_file(str(html), fn, quiet=True)
    if nwarnings:
        manifest.append(
            dict(display='%d tasks' % ntasks, filename='tasks.html'))
        msg = 'There are %d open tasks: %s' % (ntasks, fn)
        logger.info(msg)

    nerrors = len(aug.get_notes_by_tag(MCDPManualConstants.NOTE_TAG_ERROR))
    fn = os.path.join(d, 'errors.html')
    html = html_list_of_notes(aug,
                              MCDPManualConstants.NOTE_TAG_ERROR,
                              'errors',
                              'error',
                              header=header)
    # update_refs_('tasks', html, id2filename)
    write_data_to_file(str(html), fn, quiet=True)
    if nerrors:
        manifest.append(
            dict(display='%d errors' % nerrors, filename='errors.html'))

        msg = 'I am sorry to say that there were %d errors.\n\nPlease see: %s' % (
            nerrors, fn)
        logger.error('\n\n\n' + indent(msg, ' ' * 15) + '\n\n')

    fn = os.path.join(d, 'errors_and_warnings.manifest.yaml')
    write_data_to_file(yaml.dump(manifest), fn, quiet=False)

    fn = os.path.join(d, 'errors_and_warnings.pickle')
    res = AugmentedResult()
    res.merge(aug)
    write_data_to_file(pickle.dumps(res), fn, quiet=False)
Пример #8
0
def go():
    groups = OrderedDict(yaml.load(BOOKS))

    import os

    dist = 'duckuments-dist'

    html = Tag(name='html')
    head = Tag(name='head')
    meta = Tag(name='meta')
    meta.attrs['content'] = "text/html; charset=utf-8"
    meta.attrs['http-equiv'] = "Content-Type"

    stylesheet = 'v_manual_split'
    link = Tag(name='link')
    link['rel'] = 'stylesheet'
    link['type'] = 'text/css'
    link['href'] = get_css_filename('compiled/%s' % stylesheet)
    head.append(link)

    body = Tag(name='body')

    style = Tag(name='style')

    style.append(CSS)

    head.append(style)
    head.append(meta)

    html.append(head)
    html.append(body)

    divgroups = Tag(name='div')
    all_crossrefs = Tag(name='div')

    res = AugmentedResult()

    for id_group, group in groups.items():
        divgroup = Tag(name='div')
        divgroup.attrs['class'] = 'group'
        divgroup.attrs['id'] = id_group

        h0 = Tag(name='h1')
        h0.append(group['title'])

        divgroup.append(h0)

        if 'abstract' in group:
            p = Tag(name='p')
            p.append(group['abstract'])
            divgroup.append(p)

        books = group['books']
        # divbook = Tag(name='div')
        books = OrderedDict(books)
        for id_book, book in books.items():
            d = os.path.join(dist, id_book)
            change_frame(d, '../../', current_slug=id_book)

            d0 = dist

            errors_and_warnings = os.path.join(d, 'out', 'errors_and_warnings.pickle')
            if os.path.exists(errors_and_warnings):
                resi = pickle.loads(open(errors_and_warnings).read())
                # print(errors_and_warnings)

                resi.update_file_path(prefix=os.path.join(id_book, 'out'))
                res.merge(resi)
            else:
                msg = 'Path does not exist: %s' % errors_and_warnings
                logger.error(msg)

            artefacts = get_artefacts(d0, d)

            div = Tag(name='div')
            div.attrs['class'] = 'book-div'
            div.attrs['id'] = id_book
            div_inside = Tag(name='div')
            div_inside.attrs['class'] = 'div_inside'
            links = get_links2(artefacts)

            for a in links.select('a'):
                s = gettext(a)
                if 'error' in s or 'warning' in s or 'task' in s:
                    a['class'] = 'EWT'

            if False:
                h = Tag(name='h3')
                h.append(book['title'])

                # div_inside.append(h)
                if 'abstract' in book:
                    p = Tag(name='p')
                    p.append(book['abstract'])
                    div_inside.append(p)

            div_inside.append(links)
            div.append(div_inside)

            toc = os.path.join(d, 'out/toc.html')
            if os.path.exists(toc):
                data = open(toc).read()
                x = bs(data)
                for a in x.select('a[href]'):
                    href = a.attrs['href']
                    a.attrs['href'] = id_book + '/out/' + href
                x.name = 'div'  # not fragment
                div.append(x)
            crossrefs = os.path.join(d, 'crossref.html')
            if os.path.exists(crossrefs):
                x = bs(open(crossrefs).read())
                for e in x.select('[url]'):
                    all_crossrefs.append('\n\n')
                    all_crossrefs.append(e.__copy__())
            else:
                logger.error('File does not exist %s' % crossrefs)

            divgroup.append(div)
        divgroups.append(divgroup)

    out_pickle = sys.argv[3]

    nwarnings = len(res.get_notes_by_tag(MCDPManualConstants.NOTE_TAG_WARNING))
    ntasks = len(res.get_notes_by_tag(MCDPManualConstants.NOTE_TAG_TASK))
    nerrors = len(res.get_notes_by_tag(MCDPManualConstants.NOTE_TAG_ERROR))
    logger.info('%d tasks' % ntasks)
    logger.warning('%d warnings' % nwarnings)
    logger.error('%d nerrors' % nerrors)

    from mcdp_docs.mcdp_render_manual import write_errors_and_warnings_files
    write_errors_and_warnings_files(res, os.path.dirname(out_pickle))

    out_junit = os.path.join(os.path.dirname(out_pickle), 'junit', 'notes', 'junit.xml')
    s = get_junit_xml(res)
    write_data_to_file(s.encode('utf8'), out_junit)

    # write_data_to_file(pickle.dumps(res), out_pickle, quiet=False)

    extra = get_extra_content(res)

    extra.attrs['id'] = 'extra'
    body.append(extra)
    body.append(divgroups)

    embed_css_files(html)

    for e in body.select('.notes-panel'):
        e.extract()
    out = sys.argv[1]
    data = str(html)
    data = data.replace('<body>', '<body>\n<?php header1() ?>\n')
    write_data_to_file(data, out)

    manifest = [dict(display='index', filename=os.path.basename(out))]
    mf = os.path.join(os.path.dirname(out), 'summary.manifest.yaml')
    write_data_to_file(yaml.dump(manifest), mf)

    out_crossrefs = sys.argv[2]

    html = Tag(name='html')
    head = Tag(name='head')
    body = Tag(name='body')
    style = Tag(name='style')
    style.append(CROSSREF_CSS)
    head.append(style)
    html.append(head)

    script = Tag(name='script')
    script.append(CROSSREF_SCRIPT)

    container = Tag(name='div')
    container.attrs['id'] = 'container'
    body.append(container)

    details = Tag(name='details')
    summary = Tag(name='summary')
    summary.append('See all references')
    details.append(summary)
    details.append(all_crossrefs)
    body.append(details)
    body.append(script)
    html.append(body)

    write_data_to_file(str(html), out_crossrefs)

    if nerrors > 0:
        sys.exit(nerrors)