Exemple #1
0
def run(update=False, query=None, templates=None, full=False, parameters=None,
        save=False, preset=None, mod_presets=False, push=True, message='{msg}',
        closed_tree=False):
    from .app import create_application
    check_working_directory(push)

    tg = generate_tasks(parameters, full)
    app = create_application(tg)

    if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
        # we are in the reloader process, don't open the browser or do any try stuff
        app.run()
        return

    # give app a second to start before opening the browser
    url = 'http://127.0.0.1:5000'
    Timer(1, lambda: webbrowser.open(url)).start()
    print("Starting trychooser on {}".format(url))
    app.run()

    selected = app.tasks
    if not selected:
        print("no tasks selected")
        return

    msg = "Try Chooser Enhanced ({} tasks selected)".format(len(selected))
    return push_to_try('chooser', message.format(msg=msg), selected, templates, push=push,
                       closed_tree=closed_tree)
Exemple #2
0
def run(
    update=False,
    query=None,
    try_config=None,
    full=False,
    parameters=None,
    save=False,
    preset=None,
    mod_presets=False,
    push=True,
    message="{msg}",
    closed_tree=False,
):
    from .app import create_application

    check_working_directory(push)

    tg = generate_tasks(parameters, full)

    # Remove tasks that are not to be shown unless `--full` is specified.
    if not full:
        blacklisted_tasks = [
            label
            for label in tg.tasks.keys()
            if not filter_by_uncommon_try_tasks(label)
        ]
        for task in blacklisted_tasks:
            tg.tasks.pop(task)

    app = create_application(tg)

    if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
        # we are in the reloader process, don't open the browser or do any try stuff
        app.run()
        return

    # give app a second to start before opening the browser
    url = "http://127.0.0.1:5000"
    Timer(1, lambda: webbrowser.open(url)).start()
    print("Starting trychooser on {}".format(url))
    app.run()

    selected = app.tasks
    if not selected:
        print("no tasks selected")
        return

    msg = "Try Chooser Enhanced ({} tasks selected)".format(len(selected))
    return push_to_try(
        "chooser",
        message.format(msg=msg),
        try_task_config=generate_try_task_config("chooser", selected, try_config),
        push=push,
        closed_tree=closed_tree,
    )
Exemple #3
0
def run_perfdocs(config, logger=None, paths=None, generate=True):
    """
    Build up performance testing documentation dynamically by combining
    text data from YAML files that reside in `perfdoc` folders
    across the `testing` directory. Each directory is expected to have
    an `index.rst` file along with `config.yml` YAMLs defining what needs
    to be added to the documentation.

    The YAML must also define the name of the "framework" that should be
    used in the main index.rst for the performance testing documentation.

    The testing documentation list will be ordered alphabetically once
    it's produced (to avoid unwanted shifts because of unordered dicts
    and path searching).

    Note that the suite name headings will be given the H4 (---) style so it
    is suggested that you use H3 (===) style as the heading for your
    test section. H5 will be used be used for individual tests within each
    suite.

    Usage for verification: ./mach lint -l perfdocs
    Usage for generation: ./mach lint -l perfdocs --fix

    For validation, see the Verifier class for a description of how
    it works.

    The run will fail if the valid result from validate_tree is not
    False, implying some warning/problem was logged.

    :param dict config: The configuration given by mozlint.
    :param StructuredLogger logger: The StructuredLogger instance to be used to
        output the linting warnings/errors.
    :param list paths: The paths that are being tested. Used to filter
        out errors from files outside of these paths.
    :param bool generate: If true, the docs will be (re)generated.
    """
    from perfdocs.logger import PerfDocLogger

    top_dir = os.environ.get("WORKSPACE", None)
    if not top_dir:
        floc = os.path.abspath(__file__)
        top_dir = floc.split("tools")[0]
    top_dir = top_dir.replace("\\", "\\\\")

    PerfDocLogger.LOGGER = logger
    PerfDocLogger.TOP_DIR = top_dir

    # Convert all the paths to relative ones
    rel_paths = [re.sub(top_dir, "", path) for path in paths]
    PerfDocLogger.PATHS = rel_paths

    target_dir = [os.path.join(top_dir, i) for i in rel_paths]
    for path in target_dir:
        if not os.path.exists(path):
            raise Exception("Cannot locate directory at %s" % path)

    decision_task_id = os.environ.get("DECISION_TASK_ID", None)
    if decision_task_id:
        from gecko_taskgraph.util.taskcluster import get_artifact

        task_graph = get_artifact(decision_task_id,
                                  "public/full-task-graph.json")
    else:
        from tryselect.tasks import generate_tasks

        task_graph = generate_tasks(params=None,
                                    full=True,
                                    disable_target_task_filter=True).tasks

    # Late import because logger isn't defined until later
    from perfdocs.generator import Generator
    from perfdocs.verifier import Verifier

    # Run the verifier first
    verifier = Verifier(top_dir, task_graph)
    verifier.validate_tree()

    if not PerfDocLogger.FAILED:
        # Even if the tree is valid, we need to check if the documentation
        # needs to be regenerated, and if it does, we throw a linting error.
        # `generate` dictates whether or not the documentation is generated.
        generator = Generator(verifier, generate=generate, workspace=top_dir)
        generator.generate_perfdocs()