Exemplo n.º 1
0
def test_perfdocs_generator_save_perfdocs_pass(logger, structured_logger,
                                               perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    templates_dir = os.path.join(top_dir, "tools", "lint", "perfdocs",
                                 "templates")
    os.makedirs(templates_dir)

    from perfdocs.generator import Generator
    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier.validate_tree()

    generator = Generator(verifier, generate=True, workspace=top_dir)

    assert not os.path.isdir(generator.perfdocs_path)

    with temp_file("index.rst",
                   tempdir=templates_dir,
                   content="{test_documentation}"):
        perfdocs_tmpdir = generator._create_perfdocs()

    generator._save_perfdocs(perfdocs_tmpdir)

    expected = ["index.rst", "mozperftest.rst"]
    files = [f for f in os.listdir(generator.perfdocs_path)]
    files.sort()

    for i, file in enumerate(files):
        assert file == expected[i]
Exemplo n.º 2
0
def test_perfdocs_generator_created_perfdocs(logger, structured_logger,
                                             perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    templates_dir = os.path.join(top_dir, "tools", "lint", "perfdocs",
                                 "templates")
    os.makedirs(templates_dir)

    from perfdocs.generator import Generator
    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier.validate_tree()

    generator = Generator(verifier, generate=True, workspace=top_dir)
    with temp_file("index.rst",
                   tempdir=templates_dir,
                   content="{test_documentation}"):
        perfdocs_tmpdir = generator._create_perfdocs()

    files = [f for f in os.listdir(perfdocs_tmpdir)]
    files.sort()
    expected_files = ["index.rst", "mozperftest.rst"]

    for i, file in enumerate(files):
        assert file == expected_files[i]

    with open(os.path.join(perfdocs_tmpdir, expected_files[0])) as f:
        filedata = f.readlines()
    assert "".join(filedata) == "  * :doc:`mozperftest`"
Exemplo n.º 3
0
def test_perfdocs_verifier_invalid_rst(logger, structured_logger,
                                       perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    rst_path = perfdocs_sample["index"]
    setup_sample_logger(logger, structured_logger, top_dir)

    # Replace the target string to invalid Keyword for test
    with open(rst_path, "r") as file:
        filedata = file.read()

    filedata = filedata.replace("documentation", "Invalid Keyword")

    with open(rst_path, "w") as file:
        file.write(filedata)

    from perfdocs.verifier import Verifier

    verifier = Verifier("top_dir")
    valid = verifier.validate_rst_content(rst_path)

    expected = (
        "Cannot find a '{documentation}' entry in the given index file",
        rst_path,
    )
    args, _ = logger.warning.call_args

    assert logger.warning.call_count == 1
    assert args == expected
    assert not valid
Exemplo n.º 4
0
def test_perfdocs_verifier_not_existing_tests_in_suites(
        logger, structured_logger, perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    with open(perfdocs_sample["config"], "r") as file:
        filedata = file.read()
        filedata = filedata.replace("Example", "DifferentName")
    with open(perfdocs_sample["config"], "w") as file:
        file.write(filedata)

    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier._check_framework_descriptions(verifier._gatherer.perfdocs_tree[0])

    expected = [
        "Could not find an existing test for DifferentName - bad test name?",
        "Could not find a test description for Example",
    ]

    assert logger.warning.call_count == 2
    for i, call in enumerate(logger.warning.call_args_list):
        args, _ = call
        assert args[0] == expected[i]
Exemplo n.º 5
0
def test_perfdocs_generator_save_perfdocs_fail(logger, shutil,
                                               structured_logger,
                                               perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    templates_dir = os.path.join(top_dir, "tools", "lint", "perfdocs",
                                 "templates")
    os.makedirs(templates_dir)

    from perfdocs.generator import Generator
    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier.validate_tree()

    generator = Generator(verifier, generate=True, workspace=top_dir)
    with temp_file("index.rst",
                   tempdir=templates_dir,
                   content="{test_documentation}"):
        perfdocs_tmpdir = generator._create_perfdocs()

    shutil.copytree = mock.Mock(side_effect=Exception())
    generator._save_perfdocs(perfdocs_tmpdir)

    expected = "There was an error while saving the documentation: "
    args, _ = logger.critical.call_args

    assert logger.critical.call_count == 1
    assert args[0] == expected
Exemplo n.º 6
0
def test_perfdocs_verifier_missing_contents_in_suite(logger, structured_logger,
                                                     perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    with open(perfdocs_sample["config"], "r") as file:
        filedata = file.read()
        filedata = filedata.replace("suite:", "InvalidSuite:")
    with open(perfdocs_sample["config"], "w") as file:
        file.write(filedata)

    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier._check_framework_descriptions(verifier._gatherer.perfdocs_tree[0])

    expected = (
        "Could not find an existing suite for InvalidSuite - bad suite name?",
        "Missing suite description for suite",
    )

    assert logger.warning.call_count == 2
    for i, call in enumerate(logger.warning.call_args_list):
        args, _ = call
        assert args[0] == expected[i]
Exemplo n.º 7
0
def test_perfdocs_verifier_invalid_dir(logger, structured_logger, perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.verifier import Verifier

    verifier = Verifier("invalid_path")
    with pytest.raises(Exception) as exceinfo:
        verifier.validate_tree()

    assert str(exceinfo.value) == "No valid perfdocs directories found"
Exemplo n.º 8
0
def test_perfdocs_verification(logger, structured_logger, perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier.validate_tree()

    # Make sure that we had no warnings
    assert logger.warning.call_count == 0
    assert logger.log.call_count == 1
    assert len(logger.mock_calls) == 1
Exemplo n.º 9
0
def test_perfdocs_verifier_validate_descriptions_pass(logger,
                                                      structured_logger,
                                                      perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier._check_framework_descriptions(verifier._gatherer.perfdocs_tree[0])

    assert logger.warning.call_count == 0
    assert logger.log.call_count == 1
    assert len(logger.mock_calls) == 1
Exemplo n.º 10
0
def test_perfdocs_generator_create_temp_dir(logger, structured_logger,
                                            perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.generator import Generator
    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier.validate_tree()

    generator = Generator(verifier, generate=True, workspace=top_dir)
    tmpdir = generator._create_temp_dir()

    assert os.path.isdir(tmpdir)
Exemplo n.º 11
0
def test_perfdocs_verifier_file_invalidation(logger, structured_logger,
                                             perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.verifier import Verifier

    Verifier.validate_yaml = mock.Mock(return_value=False)
    verifier = Verifier(top_dir)
    with pytest.raises(Exception):
        verifier.validate_tree()

    # Check if "File validation error" log is called
    # and Called with a log inside perfdocs_tree().
    assert logger.log.call_count == 2
    assert len(logger.mock_calls) == 2
Exemplo n.º 12
0
def test_perfdocs_framework_gatherers_urls(logger, structured_logger,
                                           perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.gatherer import frameworks
    from perfdocs.verifier import Verifier
    from perfdocs.generator import Generator
    from perfdocs.utils import read_yaml

    # This test is only for raptor
    gatherer = frameworks["raptor"]
    with open(perfdocs_sample["config"], "w") as f:
        f.write(DYNAMIC_SAMPLE_CONFIG.format("raptor"))

    fg = gatherer(perfdocs_sample["config_2"], top_dir)
    fg.get_suite_list = mock.Mock()
    fg.get_suite_list.return_value = {
        "suite": [perfdocs_sample["example1_manifest"]],
        "another_suite": [perfdocs_sample["example2_manifest"]],
    }

    v = Verifier(top_dir)
    gn = Generator(v, generate=True, workspace=top_dir)

    # Check to make sure that if a test is present under multiple
    # suties the urls are generated correctly for the test under
    # every suite
    for suite, suitetests in fg.get_test_list().items():
        url = fg._urls.get(suite)
        assert url is not None
        assert url[0]["test_name"] == "Example"
        assert url[0]["url"] == "Example_url"

    perfdocs_tree = gn._perfdocs_tree[0]
    yaml_content = read_yaml(
        os.path.join(os.path.join(perfdocs_tree["path"],
                                  perfdocs_tree["yml"])))
    suites = yaml_content["suites"]

    # Check that the sections for each suite are generated correctly
    for suite_name, suite_details in suites.items():
        gn._verifier._gatherer = mock.Mock(
            framework_gatherers={"raptor": gatherer})
        section = gn._verifier._gatherer.framework_gatherers[
            "raptor"].build_suite_section(
                fg, suite_name,
                suites.get(suite_name)["description"])
        assert suite_name.capitalize() == section[0]
        assert suite_name in section[2]

        tests = suites.get(suite_name).get("tests", {})
        for test_name in tests.keys():
            desc = gn._verifier._gatherer.framework_gatherers[
                "raptor"].build_test_description(fg, test_name,
                                                 tests[test_name], suite_name)
            assert suite_name in desc[0]
            assert test_name in desc[0]
Exemplo n.º 13
0
def test_perfdocs_generator_build_perfdocs(logger, structured_logger,
                                           perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.generator import Generator
    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier.validate_tree()

    generator = Generator(verifier, generate=True, workspace=top_dir)
    frameworks_info = generator.build_perfdocs_from_tree()

    expected = ["dynamic", "static"]

    for framework in sorted(frameworks_info.keys()):
        for i, framework_info in enumerate(frameworks_info[framework]):
            assert framework_info == expected[i]
Exemplo n.º 14
0
def test_perfdocs_verifier_invalid_yaml(logger, jsonschema, structured_logger,
                                        perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    yaml_path = perfdocs_sample["config"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.verifier import Verifier

    jsonschema.validate = mock.Mock(
        side_effect=Exception("Schema/ValidationError"))
    verifier = Verifier("top_dir")
    valid = verifier.validate_yaml(yaml_path)

    expected = ("YAML ValidationError: Schema/ValidationError", yaml_path)
    args, _ = logger.warning.call_args

    assert logger.warning.call_count == 1
    assert args == expected
    assert not valid
Exemplo n.º 15
0
def test_perfdocs_generator_needed_regeneration(logger, structured_logger,
                                                perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.generator import Generator
    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier.validate_tree()

    generator = Generator(verifier, generate=False, workspace=top_dir)
    generator.generate_perfdocs()

    expected = "PerfDocs need to be regenerated."
    args, _ = logger.warning.call_args

    assert logger.warning.call_count == 1
    assert args[0] == expected
Exemplo n.º 16
0
def test_perfdocs_verifier_not_existing_suite_in_test_list(
        logger, structured_logger, perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    manifest_path = perfdocs_sample["manifest"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    os.remove(manifest_path)
    verifier._check_framework_descriptions(verifier._gatherer.perfdocs_tree[0])

    expected = (
        "Could not find an existing suite for suite - bad suite name?",
        perfdocs_sample["config"],
    )
    args, _ = logger.warning.call_args

    assert logger.warning.call_count == 1
    assert args == expected
Exemplo n.º 17
0
def test_perfdocs_verifier_validate_rst_pass(logger, structured_logger,
                                             perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    rst_path = perfdocs_sample["index"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.verifier import Verifier

    valid = Verifier(top_dir).validate_rst_content(rst_path)

    assert valid
Exemplo n.º 18
0
def test_perfdocs_verifier_validate_yaml_pass(logger, structured_logger,
                                              perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    yaml_path = perfdocs_sample["config"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.verifier import Verifier

    valid = Verifier(top_dir).validate_yaml(yaml_path)

    assert valid
Exemplo n.º 19
0
def test_perfdocs_generator_create_temp_dir_fail(logger, os, structured_logger,
                                                 perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    from perfdocs.generator import Generator
    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier.validate_tree()

    generator = Generator(verifier, generate=True, workspace=top_dir)
    os.mkdir = mock.Mock(side_effect=OSError())
    os.path.isdir.return_value = False
    tmpdir = generator._create_temp_dir()

    expected = "Error creating temp file: "
    args, _ = logger.critical.call_args

    assert not tmpdir
    assert logger.critical.call_count == 1
    assert args[0] == expected
Exemplo n.º 20
0
def test_perfdocs_generator_generate_perfdocs_pass(logger, structured_logger,
                                                   perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    templates_dir = os.path.join(top_dir, "tools", "lint", "perfdocs",
                                 "templates")
    os.makedirs(templates_dir)

    from perfdocs.generator import Generator
    from perfdocs.verifier import Verifier

    verifier = Verifier(top_dir)
    verifier.validate_tree()

    generator = Generator(verifier, generate=True, workspace=top_dir)
    with temp_file("index.rst",
                   tempdir=templates_dir,
                   content="{test_documentation}"):
        generator.generate_perfdocs()

    assert logger.warning.call_count == 0
Exemplo n.º 21
0
def test_perfdocs_generator_needed_update(logger, structured_logger,
                                          perfdocs_sample):
    top_dir = perfdocs_sample["top_dir"]
    setup_sample_logger(logger, structured_logger, top_dir)

    templates_dir = os.path.join(top_dir, "tools", "lint", "perfdocs",
                                 "templates")
    os.makedirs(templates_dir)

    from perfdocs.generator import Generator
    from perfdocs.verifier import Verifier

    # Initializing perfdocs
    verifier = Verifier(top_dir)
    verifier.validate_tree()

    generator = Generator(verifier, generate=True, workspace=top_dir)
    with temp_file("index.rst",
                   tempdir=templates_dir,
                   content="{test_documentation}"):
        generator.generate_perfdocs()

        # Removed file for testing and run again
        generator._generate = False
        files = [f for f in os.listdir(generator.perfdocs_path)]
        for f in files:
            os.remove(os.path.join(generator.perfdocs_path, f))

        generator.generate_perfdocs()

    expected = (
        "PerfDocs are outdated, run ./mach lint -l perfdocs --fix` to update them."
    )
    args, _ = logger.warning.call_args

    assert logger.warning.call_count == 1
    assert args[0] == expected
Exemplo n.º 22
0
def run_perfdocs(config, logger=None, paths=None, generate=True):
    """
    Build up performance testing documentation dynamically by combining
    text data from YAML files that reside in `perfdoc` folders
    across the `testing` directory. Each directory is expected to have
    an `index.rst` file along with `config.yml` YAMLs defining what needs
    to be added to the documentation.

    The YAML must also define the name of the "framework" that should be
    used in the main index.rst for the performance testing documentation.

    The testing documentation list will be ordered alphabetically once
    it's produced (to avoid unwanted shifts because of unordered dicts
    and path searching).

    Note that the suite name headings will be given the H4 (---) style so it
    is suggested that you use H3 (===) style as the heading for your
    test section. H5 will be used be used for individual tests within each
    suite.

    Usage for verification: ./mach lint -l perfdocs
    Usage for generation: ./mach lint -l perfdocs --fix

    For validation, see the Verifier class for a description of how
    it works.

    The run will fail if the valid result from validate_tree is not
    False, implying some warning/problem was logged.

    :param dict config: The configuration given by mozlint.
    :param StructuredLogger logger: The StructuredLogger instance to be used to
        output the linting warnings/errors.
    :param list paths: The paths that are being tested. Used to filter
        out errors from files outside of these paths.
    :param bool generate: If true, the docs will be (re)generated.
    """
    from perfdocs.logger import PerfDocLogger

    top_dir = os.environ.get("WORKSPACE", None)
    if not top_dir:
        floc = os.path.abspath(__file__)
        top_dir = floc.split("tools")[0]
    top_dir = top_dir.replace("\\", "\\\\")

    PerfDocLogger.LOGGER = logger
    PerfDocLogger.TOP_DIR = top_dir

    # Convert all the paths to relative ones
    rel_paths = [re.sub(top_dir, "", path) for path in paths]
    PerfDocLogger.PATHS = rel_paths

    target_dir = [os.path.join(top_dir, i) for i in rel_paths]
    for path in target_dir:
        if not os.path.exists(path):
            raise Exception("Cannot locate directory at %s" % path)

    # Late import because logger isn't defined until later
    from perfdocs.generator import Generator
    from perfdocs.verifier import Verifier

    # Run the verifier first
    verifier = Verifier(top_dir)
    verifier.validate_tree()

    if not PerfDocLogger.FAILED:
        # Even if the tree is valid, we need to check if the documentation
        # needs to be regenerated, and if it does, we throw a linting error.
        # `generate` dictates whether or not the documentation is generated.
        generator = Generator(verifier, generate=generate, workspace=top_dir)
        generator.generate_perfdocs()
Exemplo n.º 23
0
def run_perfdocs(config, logger=None, paths=None, verify=True, generate=False):
    '''
    Build up performance testing documentation dynamically by combining
    text data from YAML files that reside in `perfdoc` folders
    across the `testing` directory. Each directory is expected to have
    an `index.rst` file along with `config.yml` YAMLs defining what needs
    to be added to the documentation.

    The YAML must also define the name of the "framework" that should be
    used in the main index.rst for the performance testing documentation.

    The testing documentation list will be ordered alphabetically once
    it's produced (to avoid unwanted shifts because of unordered dicts
    and path searching).

    Note that the suite name headings will be given the H4 (---) style so it
    is suggested that you use H3 (===) style as the heading for your
    test section. H5 will be used be used for individual tests within each
    suite.

    Usage for verification: ./mach lint -l perfdocs
    Usage for generation: Not Implemented

    Currently, doc generation is not implemented - only validation.

    For validation, see the Verifier class for a description of how
    it works.

    The run will fail if the valid result from validate_tree is not
    False, implying some warning/problem was logged.

    :param dict config: The configuration given by mozlint.
    :param StructuredLogger logger: The StructuredLogger instance to be used to
        output the linting warnings/errors.
    :param list paths: The paths that are being tested. Used to filter
        out errors from files outside of these paths.
    :param bool verify: If true, the verification will be performed.
    :param bool generate: If true, the docs will be generated.
    '''
    from perfdocs.logger import PerfDocLogger

    top_dir = os.environ.get('WORKSPACE', None)
    if not top_dir:
        floc = os.path.abspath(__file__)
        top_dir = floc.split('tools')[0]

    PerfDocLogger.LOGGER = logger
    # Convert all the paths to relative ones
    rel_paths = [re.sub(".*testing", "testing", path) for path in paths]
    PerfDocLogger.PATHS = rel_paths

    # TODO: Expand search to entire tree rather than just the testing directory
    testing_dir = os.path.join(top_dir, 'testing')
    if not os.path.exists(testing_dir):
        raise Exception("Cannot locate testing directory at %s" % testing_dir)

    # Run either the verifier or generator
    if generate:
        raise NotImplementedError
    if verify:
        from perfdocs.verifier import Verifier

        verifier = Verifier(testing_dir, top_dir)
        verifier.validate_tree()