Ejemplo n.º 1
0
def generate_pdf_report(release, spec, report_week):
    """Generate html format of the report.

    :param release: Release string of the product.
    :param spec: Specification read from the specification file.
    :param report_week: Calendar week when the report is published.
    :type release: str
    :type spec: Specification
    :type report_week: str
    """

    logging.info(u"  Generating the pdf report, give me a few minutes, please "
                 u"...")

    working_dir = spec.environment[u"paths"][u"DIR[WORKING,SRC]"]

    execute_command(f"cd {working_dir} && mv -f index.pdf.template index.rst")

    _convert_all_svg_to_pdf(spec.environment[u"paths"][u"DIR[WORKING,SRC]"])

    # Convert PyPLOT graphs in HTML format to PDF.
    convert_plots = u"xvfb-run -a wkhtmltopdf {html} {pdf}"
    plots = get_files(spec.environment[u"paths"][u"DIR[STATIC,VPP]"], u"html")
    plots.extend(
        get_files(spec.environment[u"paths"][u"DIR[STATIC,DPDK]"], u"html"))
    for plot in plots:
        file_name = f"{plot.rsplit(u'.', 1)[0]}.pdf"
        logging.info(f"Converting {plot} to {file_name}")
        execute_command(convert_plots.format(html=plot, pdf=file_name))

    # Generate the LaTeX documentation
    build_dir = spec.environment[u"paths"][u"DIR[BUILD,LATEX]"]
    cmd = PDF_BUILDER.format(
        release=release,
        date=datetime.datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
        working_dir=working_dir,
        build_dir=build_dir)
    execute_command(cmd)

    # Build pdf documentation
    archive_dir = spec.environment[u"paths"][u"DIR[STATIC,ARCH]"]
    cmds = [
        f'cd {build_dir} && '
        f'pdflatex -shell-escape -interaction nonstopmode csit.tex || true',
        f'cd {build_dir} && '
        f'pdflatex -interaction nonstopmode csit.tex || true',
        f'cd {build_dir} && '
        f'cp csit.pdf ../{archive_dir}/csit_{release}.{report_week}.pdf &&'
        f'cp csit.pdf ../{archive_dir}/csit_{release}.pdf'
    ]

    for cmd in cmds:
        execute_command(cmd)

    logging.info(u"  Done.")
Ejemplo n.º 2
0
def _convert_all_svg_to_pdf(path):
    """Convert all svg files on path "path" to pdf.

    :param path: Path to the root directory with svg files to convert.
    :type path: str
    """

    svg_files = get_files(path, u"svg", full_path=True)
    for svg_file in svg_files:
        pdf_file = f"{svg_file.rsplit(u'.', 1)[0]}.pdf"
        logging.info(f"Converting {svg_file} to {pdf_file}")
        execute_command(
            f"inkscape -D -z --file={svg_file} --export-pdf={pdf_file}")
Ejemplo n.º 3
0
def file_details_split(file_spec, input_data, frmt=u"rst"):
    """Generate the file(s) with algorithms
    - file_details_split
    specified in the specification file.

    :param file_spec: File to generate.
    :param input_data: Data to process.
    :param frmt: Format can be: rst or html
    :type file_spec: pandas.Series
    :type input_data: InputData
    :type frmt: str
    """

    fileset_file_name = f"{file_spec[u'output-file']}"
    rst_header = (u"\n"
                  u".. |br| raw:: html\n\n    <br />\n\n\n"
                  u".. |prein| raw:: html\n\n    <pre>\n\n\n"
                  u".. |preout| raw:: html\n\n    </pre>\n\n")
    start_lvl = file_spec.get(u"data-start-level", 4)

    logging.info(f"  Generating the file set {fileset_file_name} ...")

    data_sets = file_spec.get(u"data", None)
    if not data_sets:
        logging.error(
            f"  No data sets specified for {file_spec[u'output-file']}, exit.")
        return

    table_sets = file_spec.get(u"dir-tables", None)
    if not table_sets:
        logging.error(
            f"  No table sets specified for {file_spec[u'output-file']}, exit."
        )
        return

    if len(data_sets) != len(table_sets):
        logging.error(
            f"  The number of data sets and the number of table sets for "
            f"{file_spec[u'output-file']} are not equal, exit.")
        return

    chapters = OrderedDict()
    for data_set, table_set in zip(data_sets, table_sets):

        logging.info(f"   Processing the table set {table_set}...")

        table_lst = None
        if frmt == u"html":
            table_lst = get_files(table_set, u".rst", full_path=True)
        elif frmt == u"rst":
            table_lst = get_files(table_set, u".csv", full_path=True)

        if not table_lst:
            logging.error(
                f"    No tables to include in {table_set}. Skipping.")
            continue

        logging.info(u"    Creating the test data set...")
        tests = input_data.filter_data(
            element=file_spec,
            params=[u"name", u"parent", u"doc", u"type", u"level"],
            data=data_set,
            data_set=u"tests",
            continue_on_error=True)
        if tests.empty:
            continue
        tests = input_data.merge_data(tests)
        tests.sort_index(inplace=True)

        logging.info(u"    Creating the suite data set...")
        suites = input_data.filter_data(element=file_spec,
                                        data=data_set,
                                        continue_on_error=True,
                                        data_set=u"suites")
        if suites.empty:
            continue
        suites = input_data.merge_data(suites)
        suites.sort_index(inplace=True)

        logging.info(u"    Generating files...")

        chapter_l1 = u""
        chapter_l2 = u"-".join(table_set.split(u"_")[-2:])
        for suite_longname, suite in suites.items():

            suite_lvl = len(suite_longname.split(u"."))
            if suite_lvl < start_lvl:
                # Not interested in this suite
                continue

            if suite_lvl == start_lvl:
                # Our top-level suite
                chapter_l1 = suite_longname.split(u'.')[-1]
                if chapters.get(chapter_l1, None) is None:
                    chapters[chapter_l1] = OrderedDict()
                if chapters[chapter_l1].get(chapter_l2, None) is None:
                    chapters[chapter_l1][chapter_l2] = OrderedDict()
                continue

            if _tests_in_suite(suite[u"name"], tests):
                groups = re.search(REGEX_NIC_SHORT, suite[u"name"])
                nic = groups.group(2) if groups else None
                if nic is None:
                    continue
                if chapters[chapter_l1][chapter_l2].get(nic, None) is None:
                    chapters[chapter_l1][chapter_l2][nic] = dict(
                        rst_file=f"{join(table_set, chapter_l1)}_{nic}.rst".
                        replace(u"2n1l-", u"").replace(u"1n1l-", u""),
                        tables=list())
                for idx, tbl_file in enumerate(table_lst):
                    if suite[u"name"] in tbl_file:
                        chapters[chapter_l1][chapter_l2][nic][
                            u"tables"].append(
                                (table_lst.pop(idx), suite[u"doc"]))
                        break
    titles = {
        # VPP Perf, MRR
        u"container_memif": u"LXC/DRC Container Memif",
        u"crypto": u"IPsec IPv4 Routing",
        u"hoststack": u"Hoststack Testing",
        u"ip4": u"IPv4 Routing",
        u"ip4_tunnels": u"IPv4 Tunnels",
        u"ip6": u"IPv6 Routing",
        u"ip6_tunnels": u"IPv6 Tunnels",
        u"l2": u"L2 Ethernet Switching",
        u"lb": u"LoadBalancer",
        u"nfv_density": u"NFV Service Density",
        u"srv6": u"SRv6 Routing",
        u"vm_vhost": u"KVM VMs vhost-user",
        u"vts": u"Virtual Topology System",
        # VPP Device
        u"interfaces": u"Interfaces",
        u"l2bd": u"L2 Bridge-domain",
        u"l2patch": u"L2 Patch",
        u"l2xc": u"L2 Cross-connect",
    }

    order_chapters = file_spec.get(u"order-chapters", None)

    if order_chapters:
        order_1 = order_chapters.get(u"level-1", None)
        order_2 = order_chapters.get(u"level-2", None)
        order_3 = order_chapters.get(u"level-3", None)
        if not order_1:
            order_1 = chapters.keys()
    else:
        order_1 = None
        order_2 = None
        order_3 = None

    for chapter_l1 in order_1:
        content_l1 = chapters.get(chapter_l1, None)
        if not content_l1:
            continue
        with open(f"{fileset_file_name}/index.rst", u"a") as file_handler:
            file_handler.write(f"    {chapter_l1}\n")
        l1_file_name = f"{join(fileset_file_name, chapter_l1)}.rst"
        title = titles.get(chapter_l1, chapter_l1)
        logging.info(f"   Generating {title} ...")
        with open(l1_file_name, u"w") as file_handler:
            file_handler.write(f"{title}\n"
                               f"{get_rst_title_char(1) * len(title)}\n\n"
                               f".. toctree::\n\n")

        if not order_2:
            order_2 = chapters[chapter_l1].keys()
        for chapter_l2 in order_2:
            content_l2 = content_l1.get(chapter_l2, None)
            if not content_l2:
                continue
            if not order_3:
                order_3 = chapters[chapter_l1][chapter_l2].keys()
            for chapter_l3 in order_3:
                content_l3 = content_l2.get(chapter_l3, None)
                if not content_l3:
                    continue
                with open(l1_file_name, u"a") as file_handler:
                    item = u"/".join(content_l3[u'rst_file'].split(u'/')[-2:])
                    file_handler.write(f"    ../{item}\n")
                logging.info(f"    Writing the file {content_l3[u'rst_file']}")
                with open(content_l3[u'rst_file'], u"w+") as file_handler:
                    title = f"{chapter_l2}-{chapter_l3}"
                    file_handler.write(
                        f"{rst_header}\n"
                        f"{title}\n"
                        f"{get_rst_title_char(2) * len(title)}\n")
                    for table in content_l3[u'tables']:
                        title = table[0].split(u"/")[-1].split(u".")[0]
                        file_handler.write(
                            f"\n{title}\n"
                            f"{get_rst_title_char(3) * len(title)}\n")
                        file_handler.write(f"\n{table[1]}\n")
                        if frmt == u"html":
                            file_handler.write(
                                f"\n.. include:: {table[0].split(u'/')[-1]}"
                                f"\n")
                        elif frmt == u"rst":
                            file_handler.write(
                                RST_INCLUDE_TABLE.format(
                                    file_latex=table[0],
                                    file_html=table[0].split(u"/")[-1]))
Ejemplo n.º 4
0
def file_test_results(file_spec, input_data, frmt=u"rst"):
    """Generate the file(s) with algorithms
    - file_test_results
    specified in the specification file.

    :param file_spec: File to generate.
    :param input_data: Data to process.
    :param frmt: Format can be: rst or html
    :type file_spec: pandas.Series
    :type input_data: InputData
    :type frmt: str
    """

    base_file_name = f"{file_spec[u'output-file']}"
    rst_header = (u"\n"
                  u".. |br| raw:: html\n\n    <br />\n\n\n"
                  u".. |prein| raw:: html\n\n    <pre>\n\n\n"
                  u".. |preout| raw:: html\n\n    </pre>\n\n")
    start_lvl = file_spec.get(u"data-start-level", 4)

    logging.info(f"  Generating the file {base_file_name} ...")

    if frmt == u"html":
        table_lst = get_files(file_spec[u"dir-tables"],
                              u".rst",
                              full_path=True)
    elif frmt == u"rst":
        table_lst = get_files(file_spec[u"dir-tables"],
                              u".csv",
                              full_path=True)
    else:
        return
    if not table_lst:
        logging.error(
            f"  No tables to include in {file_spec[u'dir-tables']}. Skipping.")
        return

    logging.info(
        f"    Creating the tests data set for the "
        f"{file_spec.get(u'type', u'')} {file_spec.get(u'title', u'')}.")

    tests = input_data.filter_data(
        file_spec,
        params=[u"name", u"parent", u"doc", u"type", u"level"],
        continue_on_error=True)
    if tests.empty:
        return
    tests = input_data.merge_data(tests)

    suites = input_data.filter_data(file_spec,
                                    continue_on_error=True,
                                    data_set=u"suites")
    if suites.empty:
        return
    suites = input_data.merge_data(suites)
    suites.sort_index(inplace=True)

    file_name = u""
    for suite_longname, suite in suites.items():

        suite_lvl = len(suite_longname.split(u"."))
        if suite_lvl < start_lvl:
            # Not interested in this suite
            continue

        if suite_lvl == start_lvl:
            # Our top-level suite
            chapter = suite_longname.split(u'.')[-1]
            file_name = f"{base_file_name}/{chapter}.rst"
            logging.info(f"    Writing file {file_name}")
            with open(f"{base_file_name}/index.rst", u"a") as file_handler:
                file_handler.write(f"    {chapter}\n")
            with open(file_name, u"a") as file_handler:
                file_handler.write(rst_header)

        title_line = get_rst_title_char(suite[u"level"] - start_lvl + 2) * \
            len(suite[u"name"])
        with open(file_name, u"a") as file_handler:
            if not (u"-ndrpdr" in suite[u"name"] or u"-mrr" in suite[u"name"]
                    or u"-dev" in suite[u"name"]):
                file_handler.write(f"\n{suite[u'name']}\n{title_line}\n")

            if _tests_in_suite(suite[u"name"], tests):
                for tbl_file in table_lst:
                    if suite[u"name"] in tbl_file:
                        file_handler.write(
                            f"\n{suite[u'name']}\n{title_line}\n")
                        file_handler.write(f"\n{suite[u'doc']}\n".replace(
                            u'|br|', u'\n\n -'))
                        if frmt == u"html":
                            file_handler.write(
                                f"\n.. include:: {tbl_file.split(u'/')[-1]}\n")
                        elif frmt == u"rst":
                            file_handler.write(
                                RST_INCLUDE_TABLE.format(
                                    file_latex=tbl_file,
                                    file_html=tbl_file.split(u"/")[-1]))
                        break

    logging.info(u"  Done.")
Ejemplo n.º 5
0
def convert_xml_to_json(spec, data):
    """Convert downloaded XML files into JSON.

    Procedure:
    - create one json file for each test,
    - gzip all json files one by one,
    - delete json files.

    :param spec: Specification read from the specification files.
    :param data: Input data parsed from output.xml files.
    :type spec: Specification
    :type data: InputData
    """

    logging.info(u"Converting downloaded XML files to JSON ...")

    template_name = spec.output.get(u"use-template", None)
    structure = spec.output.get(u"structure", u"tree")
    if template_name:
        with open(template_name, u"r") as file_handler:
            template = json.load(file_handler)
    else:
        template = None

    build_dir = spec.environment[u"paths"][u"DIR[BUILD,JSON]"]
    try:
        rmtree(build_dir)
    except FileNotFoundError:
        pass  # It does not exist

    os.mkdir(build_dir)

    for job, builds in data.data.items():
        logging.info(f"  Processing job {job}")
        if structure == "tree":
            os.makedirs(join(build_dir, job), exist_ok=True)
        for build_nr, build in builds.items():
            logging.info(f"  Processing build {build_nr}")
            if structure == "tree":
                os.makedirs(join(build_dir, job, build_nr), exist_ok=True)
            for test_id, test_data in build[u"tests"].items():
                groups = re.search(re.compile(r'-(\d+[tT](\d+[cC]))-'),
                                   test_id)
                if groups:
                    test_id = test_id.replace(groups.group(1), groups.group(2))
                logging.info(f"  Processing test {test_id}")
                if structure == "tree":
                    dirs = test_id.split(u".")[:-1]
                    name = test_id.split(u".")[-1]
                    os.makedirs(join(build_dir, job, build_nr, *dirs),
                                exist_ok=True)
                    file_name = \
                        f"{join(build_dir, job, build_nr, *dirs, name)}.json"
                else:
                    file_name = join(
                        build_dir, u'.'.join(
                            (job, build_nr, test_id, u'json')))
                suite_id = test_id.rsplit(u".", 1)[0].replace(u" ", u"_")
                _export_test_from_xml_to_json(
                    test_id, test_data, file_name, template, {
                        u"ci":
                        u"jenkins.fd.io",
                        u"job":
                        job,
                        u"build_number":
                        build_nr,
                        u"suite_id":
                        suite_id,
                        u"suite_doc":
                        build[u"suites"].get(suite_id, dict()).get(
                            u"doc", u""),
                        u"testbed":
                        build[u"metadata"].get(u"testbed", u""),
                        u"sut_version":
                        build[u"metadata"].get(u"version", u"")
                    })

    # gzip the json files:
    for file in get_files(build_dir, u"json"):
        with open(file, u"rb") as src:
            with gzip.open(f"{file}.gz", u"wb") as dst:
                dst.writelines(src)
            os.remove(file)

    logging.info(u"Done.")