Exemple #1
0
def simple_run(tag, id):
    # Attempt to find a V4 run which declares that it matches this simple run
    # ID. We do this so we can preserve some URL compatibility for old
    # databases.
    if g.db_info.db_version != '0.4':
        return render_template("error.html",
                               message="""\
Invalid URL for version %r database.""" % (g.db_info.db_version, ))

    # Get the expected test suite.
    db = request.get_db()
    ts = db.testsuite[tag]

    # Look for a matched run.
    matched_run = ts.query(ts.Run).\
        filter(ts.Run.simple_run_id == id).\
        first()

    # If we found one, redirect to it's report.
    if matched_run is not None:
        return redirect(
            db_url_for("v4_run", testsuite_name=tag, id=matched_run.id))

    # Otherwise, report an error.
    return render_template("error.html",
                           message="""\
Unable to find a v0.4 run for this ID. Please use the native v0.4 URL interface
(instead of the /simple/... URL schema).""")
Exemple #2
0
def simple_run(tag, id):
    # Attempt to find a V4 run which declares that it matches this simple run
    # ID. We do this so we can preserve some URL compatibility for old
    # databases.
    if g.db_info.db_version != '0.4':
        return render_template("error.html", message="""\
Invalid URL for version %r database.""" % (g.db_info.db_version,))

    # Get the expected test suite.
    db = request.get_db()
    ts = db.testsuite[tag]

    # Look for a matched run.
    matched_run = ts.query(ts.Run).\
        filter(ts.Run.simple_run_id == id).\
        first()

    # If we found one, redirect to it's report.
    if matched_run is not None:
        return redirect(db_url_for("v4_run", testsuite_name=tag,
                                   id=matched_run.id))

    # Otherwise, report an error.
    return render_template("error.html", message="""\
Unable to find a v0.4 run for this ID. Please use the native v0.4 URL interface
(instead of the /simple/... URL schema).""")
Exemple #3
0
def v4_summary_report_ui():
    # If this is a POST request, update the saved config.
    if request.method == 'POST':
        # Parse the config data.
        config_data = request.form.get('config')
        config = flask.json.loads(config_data)

        # Write the updated config.
        with open(get_summary_config_path(), 'w') as f:
            flask.json.dump(config, f, indent=2)

        # Redirect to the summary report.
        return redirect(db_url_for("v4_summary_report"))

    config_path = get_summary_config_path()
    if os.path.exists(config_path):
        with open(config_path) as f:
            config = flask.json.load(f)
    else:
        config = {
            "machine_names": [],
            "orders": [],
            "machine_patterns": [],
        }

    # Get the list of available test suites.
    testsuites = request.get_db().testsuite.values()

    # Gather the list of all run orders and all machines.
    def to_key(name):
        first = name.split('.', 1)[0]
        if first.isdigit():
            return (int(first), name)
        return (first, name)

    all_machines = set()
    all_orders = set()
    for ts in testsuites:
        for name, in ts.query(ts.Machine.name):
            all_machines.add(name)
        for name, in ts.query(ts.Order.llvm_project_revision):
            all_orders.add(name)
    all_machines = sorted(all_machines)
    all_orders = sorted(all_orders, key=to_key)

    return render_template("v4_summary_report_ui.html",
                           config=config,
                           all_machines=all_machines,
                           all_orders=all_orders)
Exemple #4
0
def v4_summary_report_ui():
    # If this is a POST request, update the saved config.
    if request.method == 'POST':
        # Parse the config data.
        config_data = request.form.get('config')
        config = flask.json.loads(config_data)

        # Write the updated config.
        with open(get_summary_config_path(), 'w') as f:
            flask.json.dump(config, f, indent=2)

        # Redirect to the summary report.
        return redirect(db_url_for("v4_summary_report"))

    config_path = get_summary_config_path()
    if os.path.exists(config_path):
        with open(config_path) as f:
            config = flask.json.load(f)
    else:
        config = {
            "machine_names" : [],
            "orders" : [],
            "machine_patterns" : [],
            }

    # Get the list of available test suites.
    testsuites = request.get_db().testsuite.values()

    # Gather the list of all run orders and all machines.
    def to_key(name):
        first = name.split('.', 1)[0]
        if first.isdigit():
            return (int(first), name)
        return (first, name)
    all_machines = set()
    all_orders = set()
    for ts in testsuites:
        for name, in ts.query(ts.Machine.name):
            all_machines.add(name)
        for name, in ts.query(ts.Order.llvm_project_revision):
            all_orders.add(name)
    all_machines = sorted(all_machines)
    all_orders = sorted(all_orders, key=to_key)

    return render_template("v4_summary_report_ui.html",
                           config=config, all_machines=all_machines,
                           all_orders=all_orders)
Exemple #5
0
    def __init__(self, run_id, only_html_body=True):
        self.db = request.get_db()
        self.ts = ts = request.get_testsuite()
        self.run = run = ts.query(ts.Run).filter_by(id=run_id).first()
        if run is None:
            abort(404)

        # Get the aggregation function to use.
        aggregation_fn_name = request.args.get('aggregation_fn')
        self.aggregation_fn = {'min': lnt.util.stats.safe_min,
                               'median': lnt.util.stats.median}.get(
            aggregation_fn_name, lnt.util.stats.safe_min)

        # Get the MW confidence level.
        try:
            confidence_lv = float(request.args.get('MW_confidence_lv'))
        except (TypeError, ValueError):
            confidence_lv = .05
        self.confidence_lv = confidence_lv

        # Find the neighboring runs, by order.
        prev_runs = list(ts.get_previous_runs_on_machine(run, N = 3))
        next_runs = list(ts.get_next_runs_on_machine(run, N = 3))
        self.neighboring_runs = next_runs[::-1] + [self.run] + prev_runs

        # Select the comparison run as either the previous run, or a user
        # specified comparison run.
        compare_to_str = request.args.get('compare_to')
        if compare_to_str:
            compare_to_id = int(compare_to_str)
            self.compare_to = ts.query(ts.Run).\
                filter_by(id=compare_to_id).first()
            if self.compare_to is None:
                # FIXME: Need better way to report this error.
                abort(404)

            self.comparison_neighboring_runs = (
                list(ts.get_next_runs_on_machine(self.compare_to, N=3))[::-1] +
                [self.compare_to] +
                list(ts.get_previous_runs_on_machine(self.compare_to, N=3)))
        else:
            if prev_runs:
                self.compare_to = prev_runs[0]
            else:
                self.compare_to = None
            self.comparison_neighboring_runs = self.neighboring_runs

        try:
            self.num_comparison_runs = int(
                request.args.get('num_comparison_runs'))
        except:
            self.num_comparison_runs = 0

        # Find the baseline run, if requested.
        baseline_str = request.args.get('baseline')
        if baseline_str:
            baseline_id = int(baseline_str)
            self.baseline = ts.query(ts.Run).\
                filter_by(id=baseline_id).first()
            if self.baseline is None:
                # FIXME: Need better way to report this error.
                abort(404)
        else:
            self.baseline = None

        # Gather the runs to use for statistical data.
        comparison_start_run = self.compare_to or self.run

        reports = lnt.server.reporting.runs.generate_run_report(
            self.run, baseurl=db_url_for('index', _external=True),
            only_html_body=only_html_body, result=None,
            compare_to=self.compare_to, baseline=self.baseline,
            num_comparison_runs=self.num_comparison_runs,
            aggregation_fn=self.aggregation_fn, confidence_lv=confidence_lv)
        _, self.text_report, self.html_report, self.sri = reports
Exemple #6
0
    def __init__(self, run_id, only_html_body=True):
        self.db = request.get_db()
        self.ts = ts = request.get_testsuite()
        self.run = run = ts.query(ts.Run).filter_by(id=run_id).first()
        if run is None:
            abort(404)

        # Get the aggregation function to use.
        aggregation_fn_name = request.args.get('aggregation_fn')
        self.aggregation_fn = {'min': lnt.util.stats.safe_min,
                               'median': lnt.util.stats.median}.get(
            aggregation_fn_name, lnt.util.stats.safe_min)

        # Get the MW confidence level.
        try:
            confidence_lv = float(request.args.get('MW_confidence_lv'))
        except (TypeError, ValueError):
            confidence_lv = .05
        self.confidence_lv = confidence_lv

        # Find the neighboring runs, by order.
        prev_runs = list(ts.get_previous_runs_on_machine(run, N = 3))
        next_runs = list(ts.get_next_runs_on_machine(run, N = 3))
        self.neighboring_runs = next_runs[::-1] + [self.run] + prev_runs

        # Select the comparison run as either the previous run, or a user
        # specified comparison run.
        compare_to_str = request.args.get('compare_to')
        if compare_to_str:
            compare_to_id = int(compare_to_str)
            self.compare_to = ts.query(ts.Run).\
                filter_by(id=compare_to_id).first()
            if self.compare_to is None:
                # FIXME: Need better way to report this error.
                abort(404)

            self.comparison_neighboring_runs = (
                list(ts.get_next_runs_on_machine(self.compare_to, N=3))[::-1] +
                [self.compare_to] +
                list(ts.get_previous_runs_on_machine(self.compare_to, N=3)))
        else:
            if prev_runs:
                self.compare_to = prev_runs[0]
            else:
                self.compare_to = None
            self.comparison_neighboring_runs = self.neighboring_runs

        try:
            self.num_comparison_runs = int(
                request.args.get('num_comparison_runs'))
        except:
            self.num_comparison_runs = 0

        # Find the baseline run, if requested.
        baseline_str = request.args.get('baseline')
        if baseline_str:
            baseline_id = int(baseline_str)
            self.baseline = ts.query(ts.Run).\
                filter_by(id=baseline_id).first()
            if self.baseline is None:
                # FIXME: Need better way to report this error.
                abort(404)
        else:
            self.baseline = None

        # Gather the runs to use for statistical data.
        comparison_start_run = self.compare_to or self.run

        # We're going to render this on a real webpage with CSS support, so
        # override the default styles and provide bootstrap class names for
        # the tables.
        styles = {
            'body': '', 'td': '',
            'h1': 'font-size: 14pt',
            'table': 'width: initial; font-size: 9pt;',
            'th': 'text-align: center;'
        }
        classes = {
            'table': 'table table-striped table-condensed table-hover'
        }
        
        reports = lnt.server.reporting.runs.generate_run_report(
            self.run, baseurl=db_url_for('index', _external=True),
            only_html_body=only_html_body, result=None,
            compare_to=self.compare_to, baseline=self.baseline,
            num_comparison_runs=self.num_comparison_runs,
            aggregation_fn=self.aggregation_fn, confidence_lv=confidence_lv,
            styles=styles, classes=classes)
        _, self.text_report, self.html_report, self.sri = reports
Exemple #7
0
    def __init__(self, run_id, only_html_body=True):
        self.db = request.get_db()
        self.ts = ts = request.get_testsuite()
        self.run = run = ts.query(ts.Run).filter_by(id=run_id).first()
        if run is None:
            abort(404)

        # Get the aggregation function to use.
        aggregation_fn_name = request.args.get('aggregation_fn')
        self.aggregation_fn = {
            'min': lnt.util.stats.safe_min,
            'median': lnt.util.stats.median
        }.get(aggregation_fn_name, lnt.util.stats.safe_min)

        # Get the MW confidence level.
        try:
            confidence_lv = float(request.args.get('MW_confidence_lv'))
        except (TypeError, ValueError):
            confidence_lv = .05
        self.confidence_lv = confidence_lv

        # Find the neighboring runs, by order.
        prev_runs = list(ts.get_previous_runs_on_machine(run, N=3))
        next_runs = list(ts.get_next_runs_on_machine(run, N=3))
        self.neighboring_runs = next_runs[::-1] + [self.run] + prev_runs

        # Select the comparison run as either the previous run, or a user
        # specified comparison run.
        compare_to_str = request.args.get('compare_to')
        if compare_to_str:
            compare_to_id = int(compare_to_str)
            self.compare_to = ts.query(ts.Run).\
                filter_by(id=compare_to_id).first()
            if self.compare_to is None:
                # FIXME: Need better way to report this error.
                abort(404)

            self.comparison_neighboring_runs = (
                list(ts.get_next_runs_on_machine(self.compare_to, N=3))[::-1] +
                [self.compare_to] +
                list(ts.get_previous_runs_on_machine(self.compare_to, N=3)))
        else:
            if prev_runs:
                self.compare_to = prev_runs[0]
            else:
                self.compare_to = None
            self.comparison_neighboring_runs = self.neighboring_runs

        try:
            self.num_comparison_runs = int(
                request.args.get('num_comparison_runs'))
        except:
            self.num_comparison_runs = 0

        # Find the baseline run, if requested.
        baseline_str = request.args.get('baseline')
        if baseline_str:
            baseline_id = int(baseline_str)
            self.baseline = ts.query(ts.Run).\
                filter_by(id=baseline_id).first()
            if self.baseline is None:
                # FIXME: Need better way to report this error.
                abort(404)
        else:
            self.baseline = None

        # Gather the runs to use for statistical data.
        comparison_start_run = self.compare_to or self.run

        reports = lnt.server.reporting.runs.generate_run_report(
            self.run,
            baseurl=db_url_for('index', _external=True),
            only_html_body=only_html_body,
            result=None,
            compare_to=self.compare_to,
            baseline=self.baseline,
            num_comparison_runs=self.num_comparison_runs,
            aggregation_fn=self.aggregation_fn,
            confidence_lv=confidence_lv)
        _, self.text_report, self.html_report, self.sri = reports