Example #1
0
def v4_machine(id):
    # Compute the list of associated runs, grouped by order.
    from lnt.server.ui import util

    # Gather all the runs on this machine.
    ts = request.get_testsuite()

    associated_runs = util.multidict(
        (run_order, r)
        for r,run_order in ts.query(ts.Run, ts.Order).\
            join(ts.Order).\
            filter(ts.Run.machine_id == id).\
            order_by(ts.Run.start_time.desc()))
    associated_runs = associated_runs.items()
    associated_runs.sort()

    if request.args.get('json'):
        json_obj = dict()
        machine_obj = ts.query(ts.Machine).filter(ts.Machine.id == id).one()
        json_obj['name'] = machine_obj.name
        json_obj['id'] = machine_obj.id
        json_obj['runs'] = []
        for order in associated_runs:
            rev = order[0].llvm_project_revision
            for run in order[1]:
                json_obj['runs'].append((run.id, rev,
                                         run.start_time.isoformat(), run.end_time.isoformat()))
        return flask.jsonify(**json_obj)
    try:
        return render_template("v4_machine.html",
                           testsuite_name=g.testsuite_name, id=id,
                           associated_runs=associated_runs)
    except NoResultFound as e:
        abort(404)
Example #2
0
def v4_machine(id):
    # Compute the list of associated runs, grouped by order.
    from lnt.server.ui import util

    # Gather all the runs on this machine.
    ts = request.get_testsuite()

    associated_runs = util.multidict(
        (run_order, r)
        for r,run_order in ts.query(ts.Run, ts.Order).\
            join(ts.Order).\
            filter(ts.Run.machine_id == id).\
            order_by(ts.Run.start_time.desc()))
    associated_runs = associated_runs.items()
    associated_runs.sort()

    if request.args.get('json'):
        json_obj = dict()
        machine_obj = ts.query(ts.Machine).filter(ts.Machine.id == id).one()
        json_obj['name'] = machine_obj.name
        json_obj['id'] = machine_obj.id
        json_obj['runs'] = []
        for order in associated_runs:
            rev = order[0].llvm_project_revision
            for run in order[1]:
                json_obj['runs'].append(
                    (run.id, rev, run.start_time.isoformat(),
                     run.end_time.isoformat()))
        return flask.jsonify(**json_obj)
    try:
        return render_template("v4_machine.html",
                               testsuite_name=g.testsuite_name,
                               id=id,
                               associated_runs=associated_runs)
    except NoResultFound as e:
        abort(404)
Example #3
0
def v4_global_status():
    from lnt.server.ui import util

    ts = request.get_testsuite()
    primary_fields = sorted(list(ts.Sample.get_primary_fields()),
                            key=lambda f: f.name)
    fields = dict((f.name, f) for f in primary_fields)

    # Get the latest run.
    latest = ts.query(ts.Run.start_time).\
        order_by(ts.Run.start_time.desc()).first()

    # If we found an entry, use that.
    if latest is not None:
        latest_date, = latest
    else:
        # Otherwise, just use today.
        latest_date = datetime.date.today()

    # Create a datetime for the day before the most recent run.
    yesterday = latest_date - datetime.timedelta(days=1)

    # Get arguments.
    revision = int(request.args.get('revision',
                                    ts.Machine.DEFAULT_BASELINE_REVISION))
    field = fields.get(request.args.get('field', None), primary_fields[0])

    # Get the list of all runs we might be interested in.
    recent_runs = ts.query(ts.Run).filter(ts.Run.start_time > yesterday).all()

    # Aggregate the runs by machine.
    recent_runs_by_machine = util.multidict()
    for run in recent_runs:
        recent_runs_by_machine[run.machine] = run

    # Get a sorted list of recent machines.
    recent_machines = sorted(recent_runs_by_machine.keys(),
                             key=lambda m: m.name)

    # We use periods in our machine names. css does not like this
    # since it uses periods to demark classes. Thus we convert periods
    # in the names of our machines to dashes for use in css. It is
    # also convenient for our computations in the jinja page to have
    # access to
    def get_machine_keys(m):
        m.css_name = m.name.replace('.','-')
        return m
    recent_machines = map(get_machine_keys, recent_machines)

    # For each machine, build a table of the machine, the baseline run, and the
    # most recent run. We also computed a list of all the runs we are reporting
    # over.
    machine_run_info = []
    reported_run_ids = []

    for machine in recent_machines:
        runs = recent_runs_by_machine[machine]

        # Get the baseline run for this machine.
        baseline = machine.get_closest_previously_reported_run(revision)

        # Choose the "best" run to report on. We want the most recent one with
        # the most recent order.
        run = max(runs, key=lambda r: (r.order, r.start_time))

        machine_run_info.append((baseline, run))
        reported_run_ids.append(baseline.id)
        reported_run_ids.append(run.id)

    # Get the set all tests reported in the recent runs.
    reported_tests = ts.query(ts.Test.id, ts.Test.name).filter(
        sqlalchemy.sql.exists('*', sqlalchemy.sql.and_(
            ts.Sample.run_id.in_(reported_run_ids),
            ts.Sample.test_id == ts.Test.id))).all()

    # Load all of the runs we are interested in.
    runinfo = lnt.server.reporting.analysis.RunInfo(ts, reported_run_ids)

    # Build the test matrix. This is a two dimensional table index by
    # (machine-index, test-index), where each entry is the percent change.
    test_table = []
    for i,(test_id,test_name) in enumerate(reported_tests):
        # Create the row, starting with the test name and worst entry.
        row = [(test_id, test_name), None]

        # Compute comparison results for each machine.
        row.extend((runinfo.get_run_comparison_result(run, baseline, test_id,
                                                     field), run.id)
                   for baseline,run in machine_run_info)

        # Compute the worst cell value.
        row[1] = max(cr.pct_delta
                     for cr,_ in row[2:])

        test_table.append(row)

    # Order the table by worst regression.
    test_table.sort(key = lambda row: row[1], reverse=True)

    return render_template("v4_global_status.html",
                           ts=ts,
                           tests=test_table,
                           machines=recent_machines,
                           fields=primary_fields,
                           selected_field=field,
                           selected_revision=revision)
Example #4
0
def v4_graph():
    from lnt.server.ui import util
    from lnt.testing import PASS
    from lnt.util import stats
    from lnt.external.stats import stats as ext_stats

    ts = request.get_testsuite()

    # Parse the view options.
    options = {}
    options['hide_lineplot'] = bool(request.args.get('hide_lineplot'))
    show_lineplot = not options['hide_lineplot']
    options['show_mad'] = show_mad = bool(request.args.get('show_mad'))
    options['show_stddev'] = show_stddev = bool(request.args.get('show_stddev'))
    options['hide_all_points'] = hide_all_points = bool(
        request.args.get('hide_all_points'))
    options['show_linear_regression'] = show_linear_regression = bool(
        request.args.get('show_linear_regression'))
    options['show_failures'] = show_failures = bool(
        request.args.get('show_failures'))
    options['normalize_by_median'] = normalize_by_median = bool(
        request.args.get('normalize_by_median'))
    options['show_moving_average'] = moving_average = bool(
        request.args.get('show_moving_average'))
    options['show_moving_median'] = moving_median = bool(
        request.args.get('show_moving_median'))
    options['moving_window_size'] = moving_window_size = int(
        request.args.get('moving_window_size', 10))
    options['hide_highlight'] = bool(
        request.args.get('hide_highlight'))
    show_highlight = not options['hide_highlight']

    def convert_revision(dotted):
        """Turn a version number like 489.2.10 into something
        that is ordered and sortable.
        For now 489.2.10 will be returned as a tuple of ints.
        """
        dotted = integral_rex.findall(dotted)
        return tuple([int(d) for d in dotted])

    # Load the graph parameters.
    graph_parameters = []
    for name,value in request.args.items():
        # Plots to graph are passed as::
        #
        #  plot.<unused>=<machine id>.<test id>.<field index>
        if not name.startswith(str('plot.')):
            continue

        # Ignore the extra part of the key, it is unused.
        machine_id_str,test_id_str,field_index_str = value.split('.')
        try:
            machine_id = int(machine_id_str)
            test_id = int(test_id_str)
            field_index = int(field_index_str)
        except:
            return abort(400)

        if not (0 <= field_index < len(ts.sample_fields)):
            return abort(404)

        try:
            machine = \
                ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one()
            test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
            field = ts.sample_fields[field_index]
        except NoResultFound:
            return abort(404)
        graph_parameters.append((machine, test, field))

    # Order the plots by machine name, test name and then field.
    graph_parameters.sort(key = lambda (m,t,f): (m.name, t.name, f.name))

    # Extract requested mean trend.
    mean_parameter = None
    for name,value in request.args.items():
        # Mean to graph is passed as:
        #
        #  mean=<machine id>.<field index>
        if name != 'mean':
            continue

        machine_id_str,field_index_str  = value.split('.')
        try:
            machine_id = int(machine_id_str)
            field_index = int(field_index_str)
        except ValueError:
            return abort(400)

        if not (0 <= field_index < len(ts.sample_fields)):
            return abort(404)

        try:
            machine = \
                ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one()
        except NoResultFound:
            return abort(404)
        field = ts.sample_fields[field_index]

        mean_parameter = (machine, field)

    # Sanity check the arguments.
    if not graph_parameters and not mean_parameter:
        return render_template("error.html", message="Nothing to graph.")

    # Extract requested baselines, and their titles.
    baseline_parameters = []
    for name,value in request.args.items():
        # Baselines to graph are passed as:
        #
        #  baseline.title=<run id>
        if not name.startswith(str('baseline.')):
            continue

        baseline_title = name[len('baseline.'):]

        run_id_str = value
        try:
            run_id = int(run_id_str)
        except:
            return abort(400)

        try:
            run = ts.query(ts.Run).join(ts.Machine).filter(ts.Run.id == run_id).one()
        except:
            err_msg = "The run {} was not found in the database.".format(run_id)
            return render_template("error.html",
                                   message=err_msg)

        baseline_parameters.append((run, baseline_title))

    # Create region of interest for run data region if we are performing a
    # comparison.
    revision_range = None
    highlight_run_id = request.args.get('highlight_run')
    if show_highlight and highlight_run_id and highlight_run_id.isdigit():
        highlight_run = ts.query(ts.Run).filter_by(
            id=int(highlight_run_id)).first()
        if highlight_run is None:
            abort(404)

        # Find the neighboring runs, by order.
        prev_runs = list(ts.get_previous_runs_on_machine(highlight_run, N = 1))
        if prev_runs:
            start_rev = prev_runs[0].order.llvm_project_revision
            end_rev = highlight_run.order.llvm_project_revision
            revision_range = {
                "start": convert_revision(start_rev),
                "end": convert_revision(end_rev) }

    # Build the graph data.
    legend = []
    graph_plots = []
    graph_datum = []
    overview_plots = []
    baseline_plots = []
    num_plots = len(graph_parameters)
    for i,(machine,test,field) in enumerate(graph_parameters):
        # Determine the base plot color.
        col = list(util.makeDarkColor(float(i) / num_plots))
        legend.append((machine, test.name, field.name, tuple(col)))

        # Load all the field values for this test on the same machine.
        #
        # FIXME: Don't join to Order here, aggregate this across all the tests
        # we want to load. Actually, we should just make this a single query.
        #
        # FIXME: Don't hard code field name.
        q = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time).\
            join(ts.Run).join(ts.Order).\
            filter(ts.Run.machine_id == machine.id).\
            filter(ts.Sample.test == test).\
            filter(field.column != None)

        # Unless all samples requested, filter out failing tests.
        if not show_failures:
            if field.status_field:
                q = q.filter((field.status_field.column == PASS) |
                             (field.status_field.column == None))

        # Aggregate by revision.
        data = util.multidict((rev, (val, date)) for val,rev,date in q).items()
        data.sort(key=lambda sample: convert_revision(sample[0]))

        graph_datum.append((test.name, data, col, field))

        # Get baselines for this line
        num_baselines = len(baseline_parameters)
        for baseline_id, (baseline, baseline_title) in enumerate(baseline_parameters):
            q_baseline = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Machine.name).\
                         join(ts.Run).join(ts.Order).join(ts.Machine).\
                         filter(ts.Run.id == baseline.id).\
                         filter(ts.Sample.test == test).\
                         filter(field.column != None)
            # In the event of many samples, use the mean of the samples as the baseline.
            samples = []
            for sample in q_baseline:
                samples.append(sample[0])
            # Skip this baseline if there is no data.
            if not samples:
                continue
            mean = sum(samples)/len(samples)
            # Darken the baseline color distinguish from non-baselines.
            # Make a color closer to the sample than its neighbour.
            color_offset = float(baseline_id) / num_baselines / 2
            my_color = (i + color_offset) / num_plots
            dark_col = list(util.makeDarkerColor(my_color))
            str_dark_col =  util.toColorString(dark_col)
            baseline_plots.append({'color': str_dark_col,
                                   'lineWidth': 2,
                                   'yaxis': {'from': mean, 'to': mean},
                                   'name': q_baseline[0].llvm_project_revision})
            baseline_name = "Baseline {} on {}".format(baseline_title,  q_baseline[0].name)
            legend.append((BaselineLegendItem(baseline_name, baseline.id), test.name, field.name, dark_col))

    # Draw mean trend if requested.
    if mean_parameter:
        machine, field = mean_parameter
        test_name = 'Geometric Mean'

        col = (0,0,0)
        legend.append((machine, test_name, field.name, col))

        q = ts.query(sqlalchemy.sql.func.min(field.column),
                ts.Order.llvm_project_revision,
                sqlalchemy.sql.func.min(ts.Run.start_time)).\
            join(ts.Run).join(ts.Order).join(ts.Test).\
            filter(ts.Run.machine_id == machine.id).\
            filter(field.column != None).\
            group_by(ts.Order.llvm_project_revision, ts.Test)

        # Calculate geomean of each revision.
        data = util.multidict(((rev, date), val) for val,rev,date in q).items()
        data = [(rev, [(lnt.server.reporting.analysis.calc_geomean(vals), date)])
                for ((rev, date), vals) in data]

        # Sort data points according to revision number.
        data.sort(key=lambda sample: convert_revision(sample[0]))

        graph_datum.append((test_name, data, col, field))

    for name, data, col, field in graph_datum:
        # Compute the graph points.
        errorbar_data = []
        points_data = []
        pts = []
        moving_median_data = []
        moving_average_data = []

        if normalize_by_median:
            normalize_by = 1.0/stats.median([min([d[0] for d in values])
                                           for _,values in data])
        else:
            normalize_by = 1.0

        for pos, (point_label, datapoints) in enumerate(data):
            # Get the samples.
            data = [data_date[0] for data_date in datapoints]
            # And the date on which they were taken.
            dates = [data_date[1] for data_date in datapoints]

            # When we can, map x-axis to revisions, but when that is too hard
            # use the position of the sample instead.
            rev_x = convert_revision(point_label)
            x = rev_x[0] if len(rev_x)==1 else pos

            values = [v*normalize_by for v in data]
            aggregation_fn = min
            if field.bigger_is_better:
                aggregation_fn = max
            agg_value, agg_index = \
                aggregation_fn((value, index)
                               for (index, value) in enumerate(values))

            # Generate metadata.
            metadata = {"label":point_label}
            metadata["date"] = str(dates[agg_index])
            if len(graph_datum) > 1:
                # If there are more than one plot in the graph, also label the
                # test name.
                metadata["test_name"] = name

            pts.append((x, agg_value, metadata))

            # Add the individual points, if requested.
            # For each point add a text label for the mouse over.
            if not hide_all_points:
                for i,v in enumerate(values):
                    point_metadata = dict(metadata)
                    point_metadata["date"] = str(dates[i])
                    points_data.append((x, v, point_metadata))
            
            # Add the standard deviation error bar, if requested.
            if show_stddev:
                mean = stats.mean(values)
                sigma = stats.standard_deviation(values)
                errorbar_data.append((x, mean, sigma))

            # Add the MAD error bar, if requested.
            if show_mad:
                med = stats.median(values)
                mad = stats.median_absolute_deviation(values, med)
                errorbar_data.append((x, med, mad))

        # Compute the moving average and or moving median of our data if requested.
        if moving_average or moving_median:
            fun = None

            def compute_moving_average(x, window, average_list, median_list):
                average_list.append((x, lnt.util.stats.mean(window)))
            def compute_moving_median(x, window, average_list, median_list):
                median_list.append((x, lnt.util.stats.median(window)))
            def compute_moving_average_and_median(x, window, average_list, median_list):
                average_list.append((x, lnt.util.stats.mean(window)))
                median_list.append((x, lnt.util.stats.median(window)))

            if moving_average and moving_median:
                fun = compute_moving_average_and_median
            elif moving_average:
                fun = compute_moving_average
            else:
                fun = compute_moving_median

            len_pts = len(pts)
            for i in range(len_pts):
                start_index = max(0, i - moving_window_size)
                end_index = min(len_pts, i + moving_window_size)

                window_pts = [x[1] for x in pts[start_index:end_index]]
                fun(pts[i][0], window_pts, moving_average_data, moving_median_data)

        # On the overview, we always show the line plot.
        overview_plots.append({
                "data" : pts,
                "color" : util.toColorString(col) })

        # Add the minimum line plot, if requested.
        if show_lineplot:
            graph_plots.append({
                    "data" : pts,
                    "color" : util.toColorString(col) })

        # Add regression line, if requested.
        if show_linear_regression:
            xs = [t for t,v,_ in pts]
            ys = [v for t,v,_ in pts]

            # We compute the regression line in terms of a normalized X scale.
            x_min, x_max = min(xs), max(xs)
            try:
                norm_xs = [(x - x_min) / (x_max - x_min)
                           for x in xs]
            except ZeroDivisionError:
                norm_xs = xs

            try:
                info = ext_stats.linregress(norm_xs, ys)
            except ZeroDivisionError:
                info = None
            except ValueError:
                info = None

            if info is not None:
                slope, intercept,_,_,_ = info

                reglin_col = [c * .7 for c in col]
                reglin_pts = [(x_min, 0.0 * slope + intercept),
                              (x_max, 1.0 * slope + intercept)]
                graph_plots.insert(0, {
                        "data" : reglin_pts,
                        "color" : util.toColorString(reglin_col),
                        "lines" : {
                            "lineWidth" : 2 },
                        "shadowSize" : 4 })

        # Add the points plot, if used.
        if points_data:
            pts_col = (0,0,0)
            graph_plots.append({
                    "data" : points_data,
                    "color" : util.toColorString(pts_col),
                    "lines" : {
                        "show" : False },
                    "points" : {
                        "show" : True,
                        "radius" : .25,
                        "fill" : True } })

        # Add the error bar plot, if used.
        if errorbar_data:
            bar_col = [c*.7 for c in col]
            graph_plots.append({
                    "data" : errorbar_data,
                    "lines" : { "show" : False },
                    "color" : util.toColorString(bar_col),
                    "points" : {
                        "errorbars" : "y",
                        "yerr" : { "show" : True,
                                   "lowerCap" : "-",
                                   "upperCap" : "-",
                                   "lineWidth" : 1 } } })

        # Add the moving average plot, if used.
        if moving_average_data:
            col = [0.32, 0.6, 0.0]
            graph_plots.append({
                    "data" : moving_average_data,
                    "color" : util.toColorString(col) })


        # Add the moving median plot, if used.
        if moving_median_data:
            col = [0.75, 0.0, 1.0]
            graph_plots.append({
                    "data" : moving_median_data,
                    "color" : util.toColorString(col) })

    if bool(request.args.get('json')):
        json_obj = dict()
        json_obj['data'] = graph_plots
        # Flatten ORM machine objects to their string names.
        simple_type_legend = []
        for machine, test, unit, color in legend:
            # Flatten name, make color a dict.
            new_entry = {'name': machine.name,
                         'test': test,
                         'unit': unit,
                         'color': util.toColorString(color),}
            simple_type_legend.append(new_entry)
        json_obj['legend'] = simple_type_legend
        json_obj['revision_range'] = revision_range
        json_obj['current_options'] = options
        json_obj['test_suite_name'] = ts.name
        json_obj['baselines'] = baseline_plots
        return flask.jsonify(**json_obj)

    return render_template("v4_graph.html", ts=ts, options=options,
                           revision_range=revision_range,
                           graph_plots=graph_plots,
                           overview_plots=overview_plots, legend=legend,
                           baseline_plots=baseline_plots)
Example #5
0
def v4_global_status():
    from lnt.server.ui import util

    ts = request.get_testsuite()
    metric_fields = sorted(list(ts.Sample.get_metric_fields()),
                           key=lambda f: f.name)
    fields = dict((f.name, f) for f in metric_fields)

    # Get the latest run.
    latest = ts.query(ts.Run.start_time).\
        order_by(ts.Run.start_time.desc()).first()

    # If we found an entry, use that.
    if latest is not None:
        latest_date, = latest
    else:
        # Otherwise, just use today.
        latest_date = datetime.date.today()

    # Create a datetime for the day before the most recent run.
    yesterday = latest_date - datetime.timedelta(days=1)

    # Get arguments.
    revision = int(
        request.args.get('revision', ts.Machine.DEFAULT_BASELINE_REVISION))
    field = fields.get(request.args.get('field', None), metric_fields[0])

    # Get the list of all runs we might be interested in.
    recent_runs = ts.query(ts.Run).filter(ts.Run.start_time > yesterday).all()

    # Aggregate the runs by machine.
    recent_runs_by_machine = util.multidict()
    for run in recent_runs:
        recent_runs_by_machine[run.machine] = run

    # Get a sorted list of recent machines.
    recent_machines = sorted(recent_runs_by_machine.keys(),
                             key=lambda m: m.name)

    # We use periods in our machine names. css does not like this
    # since it uses periods to demark classes. Thus we convert periods
    # in the names of our machines to dashes for use in css. It is
    # also convenient for our computations in the jinja page to have
    # access to
    def get_machine_keys(m):
        m.css_name = m.name.replace('.', '-')
        return m

    recent_machines = map(get_machine_keys, recent_machines)

    # For each machine, build a table of the machine, the baseline run, and the
    # most recent run. We also computed a list of all the runs we are reporting
    # over.
    machine_run_info = []
    reported_run_ids = []

    for machine in recent_machines:
        runs = recent_runs_by_machine[machine]

        # Get the baseline run for this machine.
        baseline = machine.get_closest_previously_reported_run(revision)

        # Choose the "best" run to report on. We want the most recent one with
        # the most recent order.
        run = max(runs, key=lambda r: (r.order, r.start_time))

        machine_run_info.append((baseline, run))
        reported_run_ids.append(baseline.id)
        reported_run_ids.append(run.id)

    # Get the set all tests reported in the recent runs.
    reported_tests = ts.query(ts.Test.id, ts.Test.name).filter(
        sqlalchemy.sql.exists(
            '*',
            sqlalchemy.sql.and_(ts.Sample.run_id.in_(reported_run_ids),
                                ts.Sample.test_id == ts.Test.id))).all()

    # Load all of the runs we are interested in.
    runinfo = lnt.server.reporting.analysis.RunInfo(ts, reported_run_ids)

    # Build the test matrix. This is a two dimensional table index by
    # (machine-index, test-index), where each entry is the percent change.
    test_table = []
    for i, (test_id, test_name) in enumerate(reported_tests):
        # Create the row, starting with the test name and worst entry.
        row = [(test_id, test_name), None]

        # Compute comparison results for each machine.
        row.extend(
            (runinfo.get_run_comparison_result(run, baseline, test_id, field),
             run.id) for baseline, run in machine_run_info)

        # Compute the worst cell value.
        row[1] = max(cr.pct_delta for cr, _ in row[2:])

        test_table.append(row)

    # Order the table by worst regression.
    test_table.sort(key=lambda row: row[1], reverse=True)

    return render_template("v4_global_status.html",
                           ts=ts,
                           tests=test_table,
                           machines=recent_machines,
                           fields=metric_fields,
                           selected_field=field,
                           selected_revision=revision)
Example #6
0
def v4_graph():
    from lnt.server.ui import util
    from lnt.testing import PASS
    from lnt.util import stats
    from lnt.external.stats import stats as ext_stats

    ts = request.get_testsuite()

    # Parse the view options.
    options = {}
    options['hide_lineplot'] = bool(request.args.get('hide_lineplot'))
    show_lineplot = not options['hide_lineplot']
    options['show_mad'] = show_mad = bool(request.args.get('show_mad'))
    options['show_stddev'] = show_stddev = bool(
        request.args.get('show_stddev'))
    options['hide_all_points'] = hide_all_points = bool(
        request.args.get('hide_all_points'))
    options['show_linear_regression'] = show_linear_regression = bool(
        request.args.get('show_linear_regression'))
    options['show_failures'] = show_failures = bool(
        request.args.get('show_failures'))
    options['normalize_by_median'] = normalize_by_median = bool(
        request.args.get('normalize_by_median'))
    options['show_moving_average'] = moving_average = bool(
        request.args.get('show_moving_average'))
    options['show_moving_median'] = moving_median = bool(
        request.args.get('show_moving_median'))
    options['moving_window_size'] = moving_window_size = int(
        request.args.get('moving_window_size', 10))
    options['hide_highlight'] = bool(request.args.get('hide_highlight'))
    show_highlight = not options['hide_highlight']

    def convert_revision(dotted):
        """Turn a version number like 489.2.10 into something
        that is ordered and sortable.
        For now 489.2.10 will be returned as a tuple of ints.
        """
        dotted = integral_rex.findall(dotted)
        return tuple([int(d) for d in dotted])

    # Load the graph parameters.
    graph_parameters = []
    for name, value in request.args.items():
        # Plots to graph are passed as::
        #
        #  plot.<unused>=<machine id>.<test id>.<field index>
        if not name.startswith(str('plot.')):
            continue

        # Ignore the extra part of the key, it is unused.
        machine_id_str, test_id_str, field_index_str = value.split('.')
        try:
            machine_id = int(machine_id_str)
            test_id = int(test_id_str)
            field_index = int(field_index_str)
        except:
            return abort(400)

        if not (0 <= field_index < len(ts.sample_fields)):
            return abort(404)

        try:
            machine = \
                ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one()
            test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
            field = ts.sample_fields[field_index]
        except NoResultFound:
            return abort(404)
        graph_parameters.append((machine, test, field, field_index))

    # Order the plots by machine name, test name and then field.
    graph_parameters.sort(key=lambda (m, t, f, _): (m.name, t.name, f.name, _))

    # Extract requested mean trend.
    mean_parameter = None
    for name, value in request.args.items():
        # Mean to graph is passed as:
        #
        #  mean=<machine id>.<field index>
        if name != 'mean':
            continue

        machine_id_str, field_index_str = value.split('.')
        try:
            machine_id = int(machine_id_str)
            field_index = int(field_index_str)
        except ValueError:
            return abort(400)

        if not (0 <= field_index < len(ts.sample_fields)):
            return abort(404)

        try:
            machine = \
                ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one()
        except NoResultFound:
            return abort(404)
        field = ts.sample_fields[field_index]

        mean_parameter = (machine, field)

    # Sanity check the arguments.
    if not graph_parameters and not mean_parameter:
        return render_template("error.html", message="Nothing to graph.")

    # Extract requested baselines, and their titles.
    baseline_parameters = []
    for name, value in request.args.items():
        # Baselines to graph are passed as:
        #
        #  baseline.title=<run id>
        if not name.startswith(str('baseline.')):
            continue

        baseline_title = name[len('baseline.'):]

        run_id_str = value
        try:
            run_id = int(run_id_str)
        except:
            return abort(400)

        try:
            run = ts.query(ts.Run).join(
                ts.Machine).filter(ts.Run.id == run_id).one()
        except:
            err_msg = "The run {} was not found in the database.".format(
                run_id)
            return render_template("error.html", message=err_msg)

        baseline_parameters.append((run, baseline_title))

    # Create region of interest for run data region if we are performing a
    # comparison.
    revision_range = None
    highlight_run_id = request.args.get('highlight_run')
    if show_highlight and highlight_run_id and highlight_run_id.isdigit():
        highlight_run = ts.query(
            ts.Run).filter_by(id=int(highlight_run_id)).first()
        if highlight_run is None:
            abort(404)

        # Find the neighboring runs, by order.
        prev_runs = list(ts.get_previous_runs_on_machine(highlight_run, N=1))
        if prev_runs:
            start_rev = prev_runs[0].order.llvm_project_revision
            end_rev = highlight_run.order.llvm_project_revision
            revision_range = {
                "start": convert_revision(start_rev),
                "end": convert_revision(end_rev)
            }

    # Build the graph data.
    legend = []
    graph_plots = []
    graph_datum = []
    overview_plots = []
    baseline_plots = []
    num_plots = len(graph_parameters)
    for i, (machine, test, field, field_index) in enumerate(graph_parameters):
        # Determine the base plot color.
        col = list(util.makeDarkColor(float(i) / num_plots))
        url = "/".join([str(machine.id), str(test.id), str(field_index)])
        legend.append((machine, test.name, field.name, tuple(col), url))

        # Load all the field values for this test on the same machine.
        #
        # FIXME: Don't join to Order here, aggregate this across all the tests
        # we want to load. Actually, we should just make this a single query.
        #
        # FIXME: Don't hard code field name.
        q = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Run.id).\
            join(ts.Run).join(ts.Order).\
            filter(ts.Run.machine_id == machine.id).\
            filter(ts.Sample.test == test).\
            filter(field.column != None)

        # Unless all samples requested, filter out failing tests.
        if not show_failures:
            if field.status_field:
                q = q.filter((field.status_field.column == PASS)
                             | (field.status_field.column == None))

        # Aggregate by revision.
        data = util.multidict((rev, (val, date, run_id))
                              for val, rev, date, run_id in q).items()
        data.sort(key=lambda sample: convert_revision(sample[0]))

        graph_datum.append((test.name, data, col, field, url))

        # Get baselines for this line
        num_baselines = len(baseline_parameters)
        for baseline_id, (baseline,
                          baseline_title) in enumerate(baseline_parameters):
            q_baseline = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Machine.name).\
                         join(ts.Run).join(ts.Order).join(ts.Machine).\
                         filter(ts.Run.id == baseline.id).\
                         filter(ts.Sample.test == test).\
                         filter(field.column != None)
            # In the event of many samples, use the mean of the samples as the baseline.
            samples = []
            for sample in q_baseline:
                samples.append(sample[0])
            # Skip this baseline if there is no data.
            if not samples:
                continue
            mean = sum(samples) / len(samples)
            # Darken the baseline color distinguish from non-baselines.
            # Make a color closer to the sample than its neighbour.
            color_offset = float(baseline_id) / num_baselines / 2
            my_color = (i + color_offset) / num_plots
            dark_col = list(util.makeDarkerColor(my_color))
            str_dark_col = util.toColorString(dark_col)
            baseline_plots.append({
                'color': str_dark_col,
                'lineWidth': 2,
                'yaxis': {
                    'from': mean,
                    'to': mean
                },
                'name': q_baseline[0].llvm_project_revision
            })
            baseline_name = "Baseline {} on {}".format(baseline_title,
                                                       q_baseline[0].name)
            legend.append((BaselineLegendItem(baseline_name, baseline.id),
                           test.name, field.name, dark_col))

    # Draw mean trend if requested.
    if mean_parameter:
        machine, field = mean_parameter
        test_name = 'Geometric Mean'

        col = (0, 0, 0)
        legend.append((machine, test_name, field.name, col, None))

        q = ts.query(sqlalchemy.sql.func.min(field.column),
                ts.Order.llvm_project_revision,
                sqlalchemy.sql.func.min(ts.Run.start_time)).\
            join(ts.Run).join(ts.Order).join(ts.Test).\
            filter(ts.Run.machine_id == machine.id).\
            filter(field.column != None).\
            group_by(ts.Order.llvm_project_revision, ts.Test)

        # Calculate geomean of each revision.
        data = util.multidict(
            ((rev, date), val) for val, rev, date in q).items()
        data = [(rev, [(lnt.server.reporting.analysis.calc_geomean(vals), date)
                       ]) for ((rev, date), vals) in data]

        # Sort data points according to revision number.
        data.sort(key=lambda sample: convert_revision(sample[0]))

        graph_datum.append((test_name, data, col, field, None))

    for name, data, col, field, url in graph_datum:
        # Compute the graph points.
        errorbar_data = []
        points_data = []
        pts = []
        moving_median_data = []
        moving_average_data = []

        if normalize_by_median:
            normalize_by = 1.0 / stats.median(
                [min([d[0] for d in values]) for _, values in data])
        else:
            normalize_by = 1.0

        for pos, (point_label, datapoints) in enumerate(data):
            # Get the samples.
            data = [data_date[0] for data_date in datapoints]
            # And the date on which they were taken.
            dates = [data_date[1] for data_date in datapoints]
            # Run where this point was collected.
            runs = [
                data_pts[2] for data_pts in datapoints if len(data_pts) == 3
            ]

            # When we can, map x-axis to revisions, but when that is too hard
            # use the position of the sample instead.
            rev_x = convert_revision(point_label)
            x = rev_x[0] if len(rev_x) == 1 else pos

            values = [v * normalize_by for v in data]
            aggregation_fn = min
            if field.bigger_is_better:
                aggregation_fn = max
            agg_value, agg_index = \
                aggregation_fn((value, index)
                               for (index, value) in enumerate(values))

            # Generate metadata.
            metadata = {"label": point_label}
            metadata["date"] = str(dates[agg_index])
            if runs:
                metadata["runID"] = str(runs[agg_index])

            if len(graph_datum) > 1:
                # If there are more than one plot in the graph, also label the
                # test name.
                metadata["test_name"] = name

            pts.append((x, agg_value, metadata))

            # Add the individual points, if requested.
            # For each point add a text label for the mouse over.
            if not hide_all_points:
                for i, v in enumerate(values):
                    point_metadata = dict(metadata)
                    point_metadata["date"] = str(dates[i])
                    points_data.append((x, v, point_metadata))

            # Add the standard deviation error bar, if requested.
            if show_stddev:
                mean = stats.mean(values)
                sigma = stats.standard_deviation(values)
                errorbar_data.append((x, mean, sigma))

            # Add the MAD error bar, if requested.
            if show_mad:
                med = stats.median(values)
                mad = stats.median_absolute_deviation(values, med)
                errorbar_data.append((x, med, mad))

        # Compute the moving average and or moving median of our data if requested.
        if moving_average or moving_median:
            fun = None

            def compute_moving_average(x, window, average_list, median_list):
                average_list.append((x, lnt.util.stats.mean(window)))

            def compute_moving_median(x, window, average_list, median_list):
                median_list.append((x, lnt.util.stats.median(window)))

            def compute_moving_average_and_median(x, window, average_list,
                                                  median_list):
                average_list.append((x, lnt.util.stats.mean(window)))
                median_list.append((x, lnt.util.stats.median(window)))

            if moving_average and moving_median:
                fun = compute_moving_average_and_median
            elif moving_average:
                fun = compute_moving_average
            else:
                fun = compute_moving_median

            len_pts = len(pts)
            for i in range(len_pts):
                start_index = max(0, i - moving_window_size)
                end_index = min(len_pts, i + moving_window_size)

                window_pts = [x[1] for x in pts[start_index:end_index]]
                fun(pts[i][0], window_pts, moving_average_data,
                    moving_median_data)

        # On the overview, we always show the line plot.
        overview_plots.append({"data": pts, "color": util.toColorString(col)})

        # Add the minimum line plot, if requested.
        if show_lineplot:
            plot = {"data": pts, "color": util.toColorString(col)}
            if url:
                plot["url"] = url
            graph_plots.append(plot)
        # Add regression line, if requested.
        if show_linear_regression:
            xs = [t for t, v, _ in pts]
            ys = [v for t, v, _ in pts]

            # We compute the regression line in terms of a normalized X scale.
            x_min, x_max = min(xs), max(xs)
            try:
                norm_xs = [(x - x_min) / (x_max - x_min) for x in xs]
            except ZeroDivisionError:
                norm_xs = xs

            try:
                info = ext_stats.linregress(norm_xs, ys)
            except ZeroDivisionError:
                info = None
            except ValueError:
                info = None

            if info is not None:
                slope, intercept, _, _, _ = info

                reglin_col = [c * .7 for c in col]
                reglin_pts = [(x_min, 0.0 * slope + intercept),
                              (x_max, 1.0 * slope + intercept)]
                graph_plots.insert(
                    0, {
                        "data": reglin_pts,
                        "color": util.toColorString(reglin_col),
                        "lines": {
                            "lineWidth": 2
                        },
                        "shadowSize": 4
                    })

        # Add the points plot, if used.
        if points_data:
            pts_col = (0, 0, 0)
            plot = {
                "data": points_data,
                "color": util.toColorString(pts_col),
                "lines": {
                    "show": False
                },
                "points": {
                    "show": True,
                    "radius": .25,
                    "fill": True
                }
            }
            if url:
                plot['url'] = url
            graph_plots.append(plot)

        # Add the error bar plot, if used.
        if errorbar_data:
            bar_col = [c * .7 for c in col]
            graph_plots.append({
                "data": errorbar_data,
                "lines": {
                    "show": False
                },
                "color": util.toColorString(bar_col),
                "points": {
                    "errorbars": "y",
                    "yerr": {
                        "show": True,
                        "lowerCap": "-",
                        "upperCap": "-",
                        "lineWidth": 1
                    }
                }
            })

        # Add the moving average plot, if used.
        if moving_average_data:
            col = [0.32, 0.6, 0.0]
            graph_plots.append({
                "data": moving_average_data,
                "color": util.toColorString(col)
            })

        # Add the moving median plot, if used.
        if moving_median_data:
            col = [0.75, 0.0, 1.0]
            graph_plots.append({
                "data": moving_median_data,
                "color": util.toColorString(col)
            })

    if bool(request.args.get('json')):
        json_obj = dict()
        json_obj['data'] = graph_plots
        # Flatten ORM machine objects to their string names.
        simple_type_legend = []
        for machine, test, unit, color, url in legend:
            # Flatten name, make color a dict.
            new_entry = {
                'name': machine.name,
                'test': test,
                'unit': unit,
                'color': util.toColorString(color),
                'url': url
            }
            simple_type_legend.append(new_entry)
        json_obj['legend'] = simple_type_legend
        json_obj['revision_range'] = revision_range
        json_obj['current_options'] = options
        json_obj['test_suite_name'] = ts.name
        json_obj['baselines'] = baseline_plots
        return flask.jsonify(**json_obj)

    return render_template("v4_graph.html",
                           ts=ts,
                           options=options,
                           revision_range=revision_range,
                           graph_plots=graph_plots,
                           overview_plots=overview_plots,
                           legend=legend,
                           baseline_plots=baseline_plots)