Example #1
0
    def __init__(self,
                 aggregation_fn,
                 cur_failed,
                 prev_failed,
                 samples,
                 prev_samples,
                 cur_hash,
                 prev_hash,
                 cur_profile=None,
                 prev_profile=None,
                 confidence_lv=0.05,
                 bigger_is_better=False):
        self.aggregation_fn = aggregation_fn

        # Special case: if we're using the minimum to aggregate, swap it for
        # max if bigger_is_better.
        if aggregation_fn == stats.safe_min and bigger_is_better:
            aggregation_fn = stats.safe_max

        self.cur_hash = cur_hash
        self.prev_hash = prev_hash
        self.cur_profile = cur_profile
        self.prev_profile = prev_profile

        if samples:
            self.current = aggregation_fn(samples)
        else:
            self.current = None

        self.previous = None

        # Compute the comparison status for the test value.
        self.delta = 0
        self.pct_delta = 0.0
        if self.current and prev_samples:
            self.delta, value = absmin_diff(self.current, prev_samples)
            if value != 0:
                self.pct_delta = self.delta / value
            self.previous = value

        # If we have multiple values for this run, use that to estimate the
        # distribution.
        #
        # We can get integer sample types here - for example if the field is
        # .exec.status. Make sure we don't assert by avoiding the stats
        # functions in this case.
        if samples and len(samples) > 1 and isinstance(samples[0], float):
            self.stddev = stats.standard_deviation(samples)
            self.MAD = stats.median_absolute_deviation(samples)
        else:
            self.stddev = None
            self.MAD = None

        self.failed = cur_failed
        self.prev_failed = prev_failed
        self.samples = samples
        self.prev_samples = prev_samples

        self.confidence_lv = confidence_lv
        self.bigger_is_better = bigger_is_better
Example #2
0
    def __init__(self, aggregation_fn,
                 cur_failed, prev_failed, samples, prev_samples,
                 cur_hash, prev_hash, cur_profile=None, prev_profile=None,
                 confidence_lv=0.05, bigger_is_better=False):
        self.aggregation_fn = aggregation_fn

        # Special case: if we're using the minimum to aggregate, swap it for
        # max if bigger_is_better.
        if aggregation_fn == stats.safe_min and bigger_is_better:
            aggregation_fn = stats.safe_max

        self.cur_hash = cur_hash
        self.prev_hash = prev_hash
        self.cur_profile = cur_profile
        self.prev_profile = prev_profile

        if samples:
            self.current = aggregation_fn(samples)
        else:
            self.current = None

        self.previous = None

        # Compute the comparison status for the test value.
        self.delta = 0
        self.pct_delta = 0.0
        if self.current and prev_samples:
            self.delta, value = absmin_diff(self.current, prev_samples)
            if value != 0:
                self.pct_delta = self.delta / value
            self.previous = value

        # If we have multiple values for this run, use that to estimate the
        # distribution.
        #
        # We can get integer sample types here - for example if the field is
        # .exec.status. Make sure we don't assert by avoiding the stats
        # functions in this case.
        if samples and len(samples) > 1 and isinstance(samples[0], float):
            self.stddev = stats.standard_deviation(samples)
            self.MAD = stats.median_absolute_deviation(samples)
        else:
            self.stddev = None
            self.MAD = None

        self.stddev_mean = None  # Only calculate this if needed.
        self.failed = cur_failed
        self.prev_failed = prev_failed
        self.samples = samples
        self.prev_samples = prev_samples

        self.confidence_lv = confidence_lv
        self.bigger_is_better = bigger_is_better
Example #3
0
def v4_graph():
    from lnt.server.ui import util
    from lnt.testing import PASS
    from lnt.util import stats
    from lnt.external.stats import stats as ext_stats

    ts = request.get_testsuite()

    # Parse the view options.
    options = {}
    options['hide_lineplot'] = bool(request.args.get('hide_lineplot'))
    show_lineplot = not options['hide_lineplot']
    options['show_mad'] = show_mad = bool(request.args.get('show_mad'))
    options['show_stddev'] = show_stddev = bool(request.args.get('show_stddev'))
    options['hide_all_points'] = hide_all_points = bool(
        request.args.get('hide_all_points'))
    options['show_linear_regression'] = show_linear_regression = bool(
        request.args.get('show_linear_regression'))
    options['show_failures'] = show_failures = bool(
        request.args.get('show_failures'))
    options['normalize_by_median'] = normalize_by_median = bool(
        request.args.get('normalize_by_median'))
    options['show_moving_average'] = moving_average = bool(
        request.args.get('show_moving_average'))
    options['show_moving_median'] = moving_median = bool(
        request.args.get('show_moving_median'))
    options['moving_window_size'] = moving_window_size = int(
        request.args.get('moving_window_size', 10))
    options['hide_highlight'] = bool(
        request.args.get('hide_highlight'))
    show_highlight = not options['hide_highlight']

    def convert_revision(dotted):
        """Turn a version number like 489.2.10 into something
        that is ordered and sortable.
        For now 489.2.10 will be returned as a tuple of ints.
        """
        dotted = integral_rex.findall(dotted)
        return tuple([int(d) for d in dotted])

    # Load the graph parameters.
    graph_parameters = []
    for name,value in request.args.items():
        # Plots to graph are passed as::
        #
        #  plot.<unused>=<machine id>.<test id>.<field index>
        if not name.startswith(str('plot.')):
            continue

        # Ignore the extra part of the key, it is unused.
        machine_id_str,test_id_str,field_index_str = value.split('.')
        try:
            machine_id = int(machine_id_str)
            test_id = int(test_id_str)
            field_index = int(field_index_str)
        except:
            return abort(400)

        if not (0 <= field_index < len(ts.sample_fields)):
            return abort(404)

        try:
            machine = \
                ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one()
            test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
            field = ts.sample_fields[field_index]
        except NoResultFound:
            return abort(404)
        graph_parameters.append((machine, test, field))

    # Order the plots by machine name, test name and then field.
    graph_parameters.sort(key = lambda (m,t,f): (m.name, t.name, f.name))

    # Extract requested mean trend.
    mean_parameter = None
    for name,value in request.args.items():
        # Mean to graph is passed as:
        #
        #  mean=<machine id>.<field index>
        if name != 'mean':
            continue

        machine_id_str,field_index_str  = value.split('.')
        try:
            machine_id = int(machine_id_str)
            field_index = int(field_index_str)
        except ValueError:
            return abort(400)

        if not (0 <= field_index < len(ts.sample_fields)):
            return abort(404)

        try:
            machine = \
                ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one()
        except NoResultFound:
            return abort(404)
        field = ts.sample_fields[field_index]

        mean_parameter = (machine, field)

    # Sanity check the arguments.
    if not graph_parameters and not mean_parameter:
        return render_template("error.html", message="Nothing to graph.")

    # Extract requested baselines, and their titles.
    baseline_parameters = []
    for name,value in request.args.items():
        # Baselines to graph are passed as:
        #
        #  baseline.title=<run id>
        if not name.startswith(str('baseline.')):
            continue

        baseline_title = name[len('baseline.'):]

        run_id_str = value
        try:
            run_id = int(run_id_str)
        except:
            return abort(400)

        try:
            run = ts.query(ts.Run).join(ts.Machine).filter(ts.Run.id == run_id).one()
        except:
            err_msg = "The run {} was not found in the database.".format(run_id)
            return render_template("error.html",
                                   message=err_msg)

        baseline_parameters.append((run, baseline_title))

    # Create region of interest for run data region if we are performing a
    # comparison.
    revision_range = None
    highlight_run_id = request.args.get('highlight_run')
    if show_highlight and highlight_run_id and highlight_run_id.isdigit():
        highlight_run = ts.query(ts.Run).filter_by(
            id=int(highlight_run_id)).first()
        if highlight_run is None:
            abort(404)

        # Find the neighboring runs, by order.
        prev_runs = list(ts.get_previous_runs_on_machine(highlight_run, N = 1))
        if prev_runs:
            start_rev = prev_runs[0].order.llvm_project_revision
            end_rev = highlight_run.order.llvm_project_revision
            revision_range = {
                "start": convert_revision(start_rev),
                "end": convert_revision(end_rev) }

    # Build the graph data.
    legend = []
    graph_plots = []
    graph_datum = []
    overview_plots = []
    baseline_plots = []
    num_plots = len(graph_parameters)
    for i,(machine,test,field) in enumerate(graph_parameters):
        # Determine the base plot color.
        col = list(util.makeDarkColor(float(i) / num_plots))
        legend.append((machine, test.name, field.name, tuple(col)))

        # Load all the field values for this test on the same machine.
        #
        # FIXME: Don't join to Order here, aggregate this across all the tests
        # we want to load. Actually, we should just make this a single query.
        #
        # FIXME: Don't hard code field name.
        q = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time).\
            join(ts.Run).join(ts.Order).\
            filter(ts.Run.machine_id == machine.id).\
            filter(ts.Sample.test == test).\
            filter(field.column != None)

        # Unless all samples requested, filter out failing tests.
        if not show_failures:
            if field.status_field:
                q = q.filter((field.status_field.column == PASS) |
                             (field.status_field.column == None))

        # Aggregate by revision.
        data = util.multidict((rev, (val, date)) for val,rev,date in q).items()
        data.sort(key=lambda sample: convert_revision(sample[0]))

        graph_datum.append((test.name, data, col, field))

        # Get baselines for this line
        num_baselines = len(baseline_parameters)
        for baseline_id, (baseline, baseline_title) in enumerate(baseline_parameters):
            q_baseline = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Machine.name).\
                         join(ts.Run).join(ts.Order).join(ts.Machine).\
                         filter(ts.Run.id == baseline.id).\
                         filter(ts.Sample.test == test).\
                         filter(field.column != None)
            # In the event of many samples, use the mean of the samples as the baseline.
            samples = []
            for sample in q_baseline:
                samples.append(sample[0])
            # Skip this baseline if there is no data.
            if not samples:
                continue
            mean = sum(samples)/len(samples)
            # Darken the baseline color distinguish from non-baselines.
            # Make a color closer to the sample than its neighbour.
            color_offset = float(baseline_id) / num_baselines / 2
            my_color = (i + color_offset) / num_plots
            dark_col = list(util.makeDarkerColor(my_color))
            str_dark_col =  util.toColorString(dark_col)
            baseline_plots.append({'color': str_dark_col,
                                   'lineWidth': 2,
                                   'yaxis': {'from': mean, 'to': mean},
                                   'name': q_baseline[0].llvm_project_revision})
            baseline_name = "Baseline {} on {}".format(baseline_title,  q_baseline[0].name)
            legend.append((BaselineLegendItem(baseline_name, baseline.id), test.name, field.name, dark_col))

    # Draw mean trend if requested.
    if mean_parameter:
        machine, field = mean_parameter
        test_name = 'Geometric Mean'

        col = (0,0,0)
        legend.append((machine, test_name, field.name, col))

        q = ts.query(sqlalchemy.sql.func.min(field.column),
                ts.Order.llvm_project_revision,
                sqlalchemy.sql.func.min(ts.Run.start_time)).\
            join(ts.Run).join(ts.Order).join(ts.Test).\
            filter(ts.Run.machine_id == machine.id).\
            filter(field.column != None).\
            group_by(ts.Order.llvm_project_revision, ts.Test)

        # Calculate geomean of each revision.
        data = util.multidict(((rev, date), val) for val,rev,date in q).items()
        data = [(rev, [(lnt.server.reporting.analysis.calc_geomean(vals), date)])
                for ((rev, date), vals) in data]

        # Sort data points according to revision number.
        data.sort(key=lambda sample: convert_revision(sample[0]))

        graph_datum.append((test_name, data, col, field))

    for name, data, col, field in graph_datum:
        # Compute the graph points.
        errorbar_data = []
        points_data = []
        pts = []
        moving_median_data = []
        moving_average_data = []

        if normalize_by_median:
            normalize_by = 1.0/stats.median([min([d[0] for d in values])
                                           for _,values in data])
        else:
            normalize_by = 1.0

        for pos, (point_label, datapoints) in enumerate(data):
            # Get the samples.
            data = [data_date[0] for data_date in datapoints]
            # And the date on which they were taken.
            dates = [data_date[1] for data_date in datapoints]

            # When we can, map x-axis to revisions, but when that is too hard
            # use the position of the sample instead.
            rev_x = convert_revision(point_label)
            x = rev_x[0] if len(rev_x)==1 else pos

            values = [v*normalize_by for v in data]
            aggregation_fn = min
            if field.bigger_is_better:
                aggregation_fn = max
            agg_value, agg_index = \
                aggregation_fn((value, index)
                               for (index, value) in enumerate(values))

            # Generate metadata.
            metadata = {"label":point_label}
            metadata["date"] = str(dates[agg_index])
            if len(graph_datum) > 1:
                # If there are more than one plot in the graph, also label the
                # test name.
                metadata["test_name"] = name

            pts.append((x, agg_value, metadata))

            # Add the individual points, if requested.
            # For each point add a text label for the mouse over.
            if not hide_all_points:
                for i,v in enumerate(values):
                    point_metadata = dict(metadata)
                    point_metadata["date"] = str(dates[i])
                    points_data.append((x, v, point_metadata))
            
            # Add the standard deviation error bar, if requested.
            if show_stddev:
                mean = stats.mean(values)
                sigma = stats.standard_deviation(values)
                errorbar_data.append((x, mean, sigma))

            # Add the MAD error bar, if requested.
            if show_mad:
                med = stats.median(values)
                mad = stats.median_absolute_deviation(values, med)
                errorbar_data.append((x, med, mad))

        # Compute the moving average and or moving median of our data if requested.
        if moving_average or moving_median:
            fun = None

            def compute_moving_average(x, window, average_list, median_list):
                average_list.append((x, lnt.util.stats.mean(window)))
            def compute_moving_median(x, window, average_list, median_list):
                median_list.append((x, lnt.util.stats.median(window)))
            def compute_moving_average_and_median(x, window, average_list, median_list):
                average_list.append((x, lnt.util.stats.mean(window)))
                median_list.append((x, lnt.util.stats.median(window)))

            if moving_average and moving_median:
                fun = compute_moving_average_and_median
            elif moving_average:
                fun = compute_moving_average
            else:
                fun = compute_moving_median

            len_pts = len(pts)
            for i in range(len_pts):
                start_index = max(0, i - moving_window_size)
                end_index = min(len_pts, i + moving_window_size)

                window_pts = [x[1] for x in pts[start_index:end_index]]
                fun(pts[i][0], window_pts, moving_average_data, moving_median_data)

        # On the overview, we always show the line plot.
        overview_plots.append({
                "data" : pts,
                "color" : util.toColorString(col) })

        # Add the minimum line plot, if requested.
        if show_lineplot:
            graph_plots.append({
                    "data" : pts,
                    "color" : util.toColorString(col) })

        # Add regression line, if requested.
        if show_linear_regression:
            xs = [t for t,v,_ in pts]
            ys = [v for t,v,_ in pts]

            # We compute the regression line in terms of a normalized X scale.
            x_min, x_max = min(xs), max(xs)
            try:
                norm_xs = [(x - x_min) / (x_max - x_min)
                           for x in xs]
            except ZeroDivisionError:
                norm_xs = xs

            try:
                info = ext_stats.linregress(norm_xs, ys)
            except ZeroDivisionError:
                info = None
            except ValueError:
                info = None

            if info is not None:
                slope, intercept,_,_,_ = info

                reglin_col = [c * .7 for c in col]
                reglin_pts = [(x_min, 0.0 * slope + intercept),
                              (x_max, 1.0 * slope + intercept)]
                graph_plots.insert(0, {
                        "data" : reglin_pts,
                        "color" : util.toColorString(reglin_col),
                        "lines" : {
                            "lineWidth" : 2 },
                        "shadowSize" : 4 })

        # Add the points plot, if used.
        if points_data:
            pts_col = (0,0,0)
            graph_plots.append({
                    "data" : points_data,
                    "color" : util.toColorString(pts_col),
                    "lines" : {
                        "show" : False },
                    "points" : {
                        "show" : True,
                        "radius" : .25,
                        "fill" : True } })

        # Add the error bar plot, if used.
        if errorbar_data:
            bar_col = [c*.7 for c in col]
            graph_plots.append({
                    "data" : errorbar_data,
                    "lines" : { "show" : False },
                    "color" : util.toColorString(bar_col),
                    "points" : {
                        "errorbars" : "y",
                        "yerr" : { "show" : True,
                                   "lowerCap" : "-",
                                   "upperCap" : "-",
                                   "lineWidth" : 1 } } })

        # Add the moving average plot, if used.
        if moving_average_data:
            col = [0.32, 0.6, 0.0]
            graph_plots.append({
                    "data" : moving_average_data,
                    "color" : util.toColorString(col) })


        # Add the moving median plot, if used.
        if moving_median_data:
            col = [0.75, 0.0, 1.0]
            graph_plots.append({
                    "data" : moving_median_data,
                    "color" : util.toColorString(col) })

    if bool(request.args.get('json')):
        json_obj = dict()
        json_obj['data'] = graph_plots
        # Flatten ORM machine objects to their string names.
        simple_type_legend = []
        for machine, test, unit, color in legend:
            # Flatten name, make color a dict.
            new_entry = {'name': machine.name,
                         'test': test,
                         'unit': unit,
                         'color': util.toColorString(color),}
            simple_type_legend.append(new_entry)
        json_obj['legend'] = simple_type_legend
        json_obj['revision_range'] = revision_range
        json_obj['current_options'] = options
        json_obj['test_suite_name'] = ts.name
        json_obj['baselines'] = baseline_plots
        return flask.jsonify(**json_obj)

    return render_template("v4_graph.html", ts=ts, options=options,
                           revision_range=revision_range,
                           graph_plots=graph_plots,
                           overview_plots=overview_plots, legend=legend,
                           baseline_plots=baseline_plots)
Example #4
0
class CompileTest(builtintest.BuiltinTest):
    def describe(self):
        return 'Single file compile-time performance testing'

    def run_test(self, name, args):
        global opts
        parser = OptionParser(
            ("%(name)s [options] [<output file>]\n" + usage_info) % locals())
        parser.add_option("-s",
                          "--sandbox",
                          dest="sandbox_path",
                          help="Parent directory to build and run tests in",
                          type=str,
                          default=None,
                          metavar="PATH")

        group = OptionGroup(parser, "Test Options")
        group.add_option("",
                         "--no-timestamp",
                         dest="timestamp_build",
                         help="Don't timestamp build directory (for testing)",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--cc",
                         dest="cc",
                         type='str',
                         help="Path to the compiler under test",
                         action="store",
                         default=None)
        group.add_option("",
                         "--cxx",
                         dest="cxx",
                         help="Path to the C++ compiler to test",
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--ld",
            dest="ld",
            help="Path to the c linker to use. (Xcode Distinction)",
            type=str,
            default=None)
        group.add_option(
            "",
            "--ldxx",
            dest="ldxx",
            help="Path to the cxx linker to use. (Xcode Distinction)",
            type=str,
            default=None)
        group.add_option("",
                         "--test-externals",
                         dest="test_suite_externals",
                         help="Path to the LLVM test-suite externals",
                         type=str,
                         default=None,
                         metavar="PATH")
        group.add_option("",
                         "--machine-param",
                         dest="machine_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the machine parameters",
                         type=str,
                         action="append",
                         default=[])
        group.add_option("",
                         "--run-param",
                         dest="run_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the run parameters",
                         type=str,
                         action="append",
                         default=[])
        group.add_option("",
                         "--run-order",
                         dest="run_order",
                         metavar="STR",
                         help="String to use to identify and order this run",
                         action="store",
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--test-subdir",
            dest="test_subdir",
            help="Subdirectory of test external dir to look for tests in.",
            type=str,
            default="lnt-compile-suite-src")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Selection")
        group.add_option("",
                         "--no-memory-profiling",
                         dest="memory_profiling",
                         help="Disable memory profiling",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--multisample",
                         dest="run_count",
                         metavar="N",
                         help="Accumulate test data from multiple runs",
                         action="store",
                         type=int,
                         default=3)
        group.add_option("",
                         "--min-sample-time",
                         dest="min_sample_time",
                         help="Ensure all tests run for at least N seconds",
                         metavar="N",
                         action="store",
                         type=float,
                         default=.5)
        group.add_option("",
                         "--save-temps",
                         dest="save_temps",
                         help="Save temporary build output files",
                         action="store_true",
                         default=False)
        group.add_option(
            "",
            "--show-tests",
            dest="show_tests",
            help="Only list the availables tests that will be run",
            action="store_true",
            default=False)
        group.add_option("",
                         "--test",
                         dest="tests",
                         metavar="NAME",
                         help="Individual test to run",
                         action="append",
                         default=[])
        group.add_option("",
                         "--test-filter",
                         dest="test_filters",
                         help="Run tests matching the given pattern",
                         metavar="REGEXP",
                         action="append",
                         default=[])
        group.add_option("",
                         "--flags-to-test",
                         dest="flags_to_test",
                         help="Add a set of flags to test (space separated)",
                         metavar="FLAGLIST",
                         action="append",
                         default=[])
        group.add_option("",
                         "--jobs-to-test",
                         dest="jobs_to_test",
                         help="Add a job count to test (full builds)",
                         metavar="NUM",
                         action="append",
                         default=[],
                         type=int)
        group.add_option("",
                         "--config-to-test",
                         dest="configs_to_test",
                         help="Add build configuration to test (full builds)",
                         metavar="NAME",
                         action="append",
                         default=[],
                         choices=('Debug', 'Release'))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("",
                         "--no-machdep-info",
                         dest="use_machdep_info",
                         help=("Don't put machine (instance) dependent "
                               "variables in machine info"),
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--machine-name",
                         dest="machine_name",
                         type='str',
                         help="Machine name to use in submission [%default]",
                         action="store",
                         default=platform.uname()[1])
        group.add_option(
            "",
            "--submit",
            dest="submit_url",
            metavar="URLORPATH",
            help=("autosubmit the test result to the given server "
                  "(or local instance) [%default]"),
            type=str,
            default=None)
        group.add_option(
            "",
            "--commit",
            dest="commit",
            help=("whether the autosubmit result should be committed "
                  "[%default]"),
            type=int,
            default=True)
        group.add_option(
            "",
            "--output",
            dest="output",
            metavar="PATH",
            help="write raw report data to PATH (or stdout if '-')",
            action="store",
            default=None)
        group.add_option("-v",
                         "--verbose",
                         dest="verbose",
                         help="show verbose test results",
                         action="store_true",
                         default=False)

        parser.add_option_group(group)

        opts, args = parser.parse_args(args)

        if len(args) != 0:
            parser.error("invalid number of arguments")

        if opts.cc is None:
            parser.error("You must specify a --cc argument.")

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx, ))

        # Validate options.
        if opts.cc is None:
            parser.error('--cc is required')
        if opts.cxx is None:
            parser.error('--cxx is required (and could not be inferred)')
        if opts.sandbox_path is None:
            parser.error('--sandbox is required')
        if opts.test_suite_externals is None:
            parser.error("--test-externals option is required")

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            parser.error("unable to determine absolute path for --cc: %r" %
                         (opts.cc, ))
        if not os.path.exists(cxx_abs):
            parser.error("unable to determine absolute path for --cc: %r" %
                         (opts.cc, ))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >> sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            report_name = "test-%s" % (timestamp().replace(' ', '_').replace(
                ':', '-'))
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError, e:
            if e.errno == errno.EEXIST:
                parser.error("sandbox output directory %r already exists!" %
                             (g_output_dir, ))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            l = logging.Logger('compile_test')
            l.setLevel(logging.INFO)
            l.addHandler(file_log_handler(os.path.join(output_dir,
                                                       'test.log')))
            l.addHandler(stderr_log_handler())
            return l

        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version', ('/usr/bin/as', '-v',
                                              '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            note("inferred run order to be: %r" % (variables['run_order'], ))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            note(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = [('-O0', ), (
                '-O0',
                '-g',
            ), ('-Os', '-g'), ('-O3', )]
        else:
            flags_to_test = [
                string.split(' ') for string in opts.flags_to_test
            ]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(
            get_tests(opts.test_suite_externals, opts.test_subdir,
                      flags_to_test, jobs_to_test, configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >> sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >> sys.stderr, '  %s' % (name, )
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                parser.error(("invalid test names %s, use --show-tests to "
                              "see available tests") %
                             (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [
                re.compile(pattern) for pattern in opts.test_filters
            ]

            # Form the list of tests.
            tests_to_run = [
                test for test in all_tests if
                (test[0] in requested_tests or
                 [True for filter in test_filters if filter.search(test[0])])
            ]
        if not tests_to_run:
            parser.error(
                "no tests requested (invalid --test or --test-filter options)!"
            )

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples), )
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples), )
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' %
                           (num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name + '.status',
                                                [lnt.testing.FAIL]))
                if samples:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name, samples))
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts)

        return server_report
Example #5
0
    def run_test(self, opts):

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            self._fatal('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                logger.info("inferred C++ compiler under test as: %r" %
                            (opts.cxx, ))

        if opts.cxx is None:
            self._fatal('--cxx is required (and could not be inferred)')

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            self._fatal("unable to determine absolute path for --cc: %r" %
                        (opts.cc, ))
        if not os.path.exists(cxx_abs):
            self._fatal("unable to determine absolute path for --cc: %r" %
                        (opts.cc, ))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >> sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError as e:
            if e.errno == errno.EEXIST:
                self._fatal("sandbox output directory %r already exists!" %
                            (g_output_dir, ))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            log = logging.Logger('compile_test')
            log.setLevel(logging.INFO)
            log.addHandler(
                file_log_handler(os.path.join(output_dir, 'test.log')))
            log.addHandler(stderr_log_handler())
            return log

        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version', ('/usr/bin/as', '-v',
                                              '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            logger.info("inferred run order to be: %r" %
                        (variables['run_order'], ))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            logger.info(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [
                string.split(' ') for string in opts.flags_to_test
            ]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(
            get_tests(opts.test_suite_externals, opts.test_subdir,
                      flags_to_test, jobs_to_test, configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >> sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >> sys.stderr, '  %s' % (name, )
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                self._fatal(("invalid test names %s, use --show-tests to "
                             "see available tests") %
                            (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [
                re.compile(pattern) for pattern in opts.test_filters
            ]

            # Form the list of tests.
            tests_to_run = [
                test for test in all_tests if
                (test[0] in requested_tests or
                 [True for filter in test_filters if filter.search(test[0])])
            ]
        if not tests_to_run:
            self._fatal("no tests requested "
                        "(invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        no_errors = True
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples), )
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples), )
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' %
                           (num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name + '.status',
                                                [lnt.testing.FAIL]))
                    no_errors = False
                if samples:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name, samples))
        run_info['no_errors'] = no_errors
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts, ts_name='compile')

        return server_report
Example #6
0
    def run_test(self, opts):

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            self._fatal('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                logger.info("inferred C++ compiler under test as: %r" %
                            (opts.cxx,))

        if opts.cxx is None:
            self._fatal('--cxx is required (and could not be inferred)')

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            self._fatal("unable to determine absolute path for --cc: %r" % (
                opts.cc,))
        if not os.path.exists(cxx_abs):
            self._fatal("unable to determine absolute path for --cc: %r" % (
                opts.cc,))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >>sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError as e:
            if e.errno == errno.EEXIST:
                self._fatal("sandbox output directory %r already exists!" % (
                    g_output_dir,))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h
            log = logging.Logger('compile_test')
            log.setLevel(logging.INFO)
            log.addHandler(file_log_handler(os.path.join(output_dir,
                                                         'test.log')))
            log.addHandler(stderr_log_handler())
            return log
        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version',
                           ('/usr/bin/as', '-v', '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            logger.info("inferred run order to be: %r" %
                        (variables['run_order'],))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            logger.info(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [string.split(' ')
                             for string in opts.flags_to_test]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(get_tests(opts.test_suite_externals, opts.test_subdir,
                                   flags_to_test, jobs_to_test,
                                   configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >>sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >>sys.stderr, '  %s' % (name,)
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                self._fatal(("invalid test names %s, use --show-tests to "
                             "see available tests") %
                            (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [re.compile(pattern)
                            for pattern in opts.test_filters]

            # Form the list of tests.
            tests_to_run = [test
                            for test in all_tests
                            if (test[0] in requested_tests or
                                [True
                                 for filter in test_filters
                                 if filter.search(test[0])])]
        if not tests_to_run:
            self._fatal("no tests requested "
                        "(invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        no_errors = True
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples),)
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples),)
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' % (
                    num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(lnt.testing.TestSamples(
                        test_name + '.status', [lnt.testing.FAIL]))
                    no_errors = False
                if samples:
                    testsamples.append(lnt.testing.TestSamples(
                        test_name, samples))
        run_info['no_errors'] = no_errors
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts, ts_name='compile')

        return server_report
Example #7
0
    def run_test(self, name, args):
        global opts
        parser = OptionParser(
            ("%(name)s [options] [<output file>]\n" +
             usage_info) % locals())
        parser.add_option("-s", "--sandbox", dest="sandbox_path",
                          help="Parent directory to build and run tests in",
                          type=str, default=None, metavar="PATH")

        group = OptionGroup(parser, "Test Options")
        group.add_option("", "--no-timestamp", dest="timestamp_build",
                         help="Don't timestamp build directory (for testing)",
                         action="store_false", default=True)
        group.add_option("", "--cc", dest="cc", type='str',
                         help="Path to the compiler under test",
                         action="store", default=None)
        group.add_option("", "--cxx", dest="cxx",
                         help="Path to the C++ compiler to test",
                         type=str, default=None)
        group.add_option("", "--ld", dest="ld",
                         help="Path to the c linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--ldxx", dest="ldxx",
                         help="Path to the cxx linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--test-externals", dest="test_suite_externals",
                         help="Path to the LLVM test-suite externals",
                         type=str, default=None, metavar="PATH")
        group.add_option("", "--machine-param", dest="machine_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the machine parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-param", dest="run_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the run parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-order", dest="run_order", metavar="STR",
                         help="String to use to identify and order this run",
                         action="store", type=str, default=None)
        group.add_option("", "--test-subdir", dest="test_subdir",
                         help="Subdirectory of test external dir to look for tests in.",
                         type=str, default="lnt-compile-suite-src")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Selection")
        group.add_option("", "--no-memory-profiling", dest="memory_profiling",
                         help="Disable memory profiling",
                         action="store_false", default=True)
        group.add_option("", "--multisample", dest="run_count", metavar="N",
                         help="Accumulate test data from multiple runs",
                         action="store", type=int, default=3)
        group.add_option("", "--min-sample-time", dest="min_sample_time",
                         help="Ensure all tests run for at least N seconds",
                         metavar="N", action="store", type=float, default=.5)
        group.add_option("", "--save-temps", dest="save_temps",
                         help="Save temporary build output files",
                         action="store_true", default=False)
        group.add_option("", "--show-tests", dest="show_tests",
                         help="Only list the availables tests that will be run",
                         action="store_true", default=False)
        group.add_option("", "--test", dest="tests", metavar="NAME",
                         help="Individual test to run",
                         action="append", default=[])
        group.add_option("", "--test-filter", dest="test_filters",
                         help="Run tests matching the given pattern",
                         metavar="REGEXP", action="append", default=[])
        group.add_option("", "--flags-to-test", dest="flags_to_test",
                         help="Add a set of flags to test (space separated)",
                         metavar="FLAGLIST", action="append", default=[])
        group.add_option("", "--jobs-to-test", dest="jobs_to_test",
                         help="Add a job count to test (full builds)",
                         metavar="NUM", action="append", default=[], type=int)
        group.add_option("", "--config-to-test", dest="configs_to_test",
                         help="Add build configuration to test (full builds)",
                         metavar="NAME", action="append", default=[],
                         choices=('Debug', 'Release'))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("", "--no-machdep-info", dest="use_machdep_info",
                         help=("Don't put machine (instance) dependent "
                               "variables in machine info"),
                         action="store_false", default=True)
        group.add_option("", "--machine-name", dest="machine_name", type='str',
                         help="Machine name to use in submission [%default]",
                         action="store", default=platform.uname()[1])
        group.add_option("", "--submit", dest="submit_url", metavar="URLORPATH",
                         help=("autosubmit the test result to the given server "
                               "(or local instance) [%default]"),
                         type=str, default=None)
        group.add_option("", "--commit", dest="commit",
                         help=("whether the autosubmit result should be committed "
                               "[%default]"),
                         type=int, default=True)
        group.add_option("", "--output", dest="output", metavar="PATH",
                         help="write raw report data to PATH (or stdout if '-')",
                         action="store", default=None)
        group.add_option("-v", "--verbose", dest="verbose",
                         help="show verbose test results",
                         action="store_true", default=False)

        parser.add_option_group(group)

        opts, args = parser.parse_args(args)

        if len(args) != 0:
            parser.error("invalid number of arguments")

        if opts.cc is None:
            parser.error("You must specify a --cc argument.")

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx,))

        # Validate options.
        if opts.cc is None:
            parser.error('--cc is required')
        if opts.cxx is None:
            parser.error('--cxx is required (and could not be inferred)')
        if opts.sandbox_path is None:
            parser.error('--sandbox is required')
        if opts.test_suite_externals is None:
            parser.error("--test-externals option is required")

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                         opts.cc,))
        if not os.path.exists(cxx_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                         opts.cc,))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >>sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError(e):
            if e.errno == errno.EEXIST:
                parser.error("sandbox output directory %r already exists!" % (
                             g_output_dir,))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter("%(asctime)-7s: %(levelname)s: %(message)s",
                                      "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter("%(asctime)-7s: %(levelname)s: %(message)s",
                                      "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h
            l = logging.Logger('compile_test')
            l.setLevel(logging.INFO)
            l.addHandler(file_log_handler(os.path.join(output_dir, 'test.log')))
            l.addHandler(stderr_log_handler())
            return l
        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version', ('/usr/bin/as', '-v', '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            note("inferred run order to be: %r" % (variables['run_order'],))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            note(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [string.split(' ')
                             for string in opts.flags_to_test]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(get_tests(opts.test_suite_externals, opts.test_subdir,
                                   flags_to_test, jobs_to_test,
                                   configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >>sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >>sys.stderr, '  %s' % (name,)
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                    parser.error(("invalid test names %s, use --show-tests to "
                                  "see available tests") %
                                 (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [re.compile(pattern)
                            for pattern in opts.test_filters]

            # Form the list of tests.
            tests_to_run = [test
                            for test in all_tests
                            if (test[0] in requested_tests or
                                [True
                                 for filter in test_filters
                                 if filter.search(test[0])])]
        if not tests_to_run:
            parser.error(
                "no tests requested (invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples),)
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples),)
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' % (
                    num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(lnt.testing.TestSamples(
                                       test_name + '.status',
                                       [lnt.testing.FAIL]))
                if samples:
                    testsamples.append(lnt.testing.TestSamples(
                                       test_name, samples))
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts)

        return server_report
Example #8
0
def v4_graph():
    from lnt.server.ui import util
    from lnt.testing import PASS
    from lnt.util import stats
    from lnt.external.stats import stats as ext_stats

    ts = request.get_testsuite()

    # Parse the view options.
    options = {}
    options['hide_lineplot'] = bool(request.args.get('hide_lineplot'))
    show_lineplot = not options['hide_lineplot']
    options['show_mad'] = show_mad = bool(request.args.get('show_mad'))
    options['show_stddev'] = show_stddev = bool(
        request.args.get('show_stddev'))
    options['hide_all_points'] = hide_all_points = bool(
        request.args.get('hide_all_points'))
    options['show_linear_regression'] = show_linear_regression = bool(
        request.args.get('show_linear_regression'))
    options['show_failures'] = show_failures = bool(
        request.args.get('show_failures'))
    options['normalize_by_median'] = normalize_by_median = bool(
        request.args.get('normalize_by_median'))
    options['show_moving_average'] = moving_average = bool(
        request.args.get('show_moving_average'))
    options['show_moving_median'] = moving_median = bool(
        request.args.get('show_moving_median'))
    options['moving_window_size'] = moving_window_size = int(
        request.args.get('moving_window_size', 10))
    options['hide_highlight'] = bool(request.args.get('hide_highlight'))
    show_highlight = not options['hide_highlight']

    def convert_revision(dotted):
        """Turn a version number like 489.2.10 into something
        that is ordered and sortable.
        For now 489.2.10 will be returned as a tuple of ints.
        """
        dotted = integral_rex.findall(dotted)
        return tuple([int(d) for d in dotted])

    # Load the graph parameters.
    graph_parameters = []
    for name, value in request.args.items():
        # Plots to graph are passed as::
        #
        #  plot.<unused>=<machine id>.<test id>.<field index>
        if not name.startswith(str('plot.')):
            continue

        # Ignore the extra part of the key, it is unused.
        machine_id_str, test_id_str, field_index_str = value.split('.')
        try:
            machine_id = int(machine_id_str)
            test_id = int(test_id_str)
            field_index = int(field_index_str)
        except:
            return abort(400)

        if not (0 <= field_index < len(ts.sample_fields)):
            return abort(404)

        try:
            machine = \
                ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one()
            test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
            field = ts.sample_fields[field_index]
        except NoResultFound:
            return abort(404)
        graph_parameters.append((machine, test, field, field_index))

    # Order the plots by machine name, test name and then field.
    graph_parameters.sort(key=lambda (m, t, f, _): (m.name, t.name, f.name, _))

    # Extract requested mean trend.
    mean_parameter = None
    for name, value in request.args.items():
        # Mean to graph is passed as:
        #
        #  mean=<machine id>.<field index>
        if name != 'mean':
            continue

        machine_id_str, field_index_str = value.split('.')
        try:
            machine_id = int(machine_id_str)
            field_index = int(field_index_str)
        except ValueError:
            return abort(400)

        if not (0 <= field_index < len(ts.sample_fields)):
            return abort(404)

        try:
            machine = \
                ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one()
        except NoResultFound:
            return abort(404)
        field = ts.sample_fields[field_index]

        mean_parameter = (machine, field)

    # Sanity check the arguments.
    if not graph_parameters and not mean_parameter:
        return render_template("error.html", message="Nothing to graph.")

    # Extract requested baselines, and their titles.
    baseline_parameters = []
    for name, value in request.args.items():
        # Baselines to graph are passed as:
        #
        #  baseline.title=<run id>
        if not name.startswith(str('baseline.')):
            continue

        baseline_title = name[len('baseline.'):]

        run_id_str = value
        try:
            run_id = int(run_id_str)
        except:
            return abort(400)

        try:
            run = ts.query(ts.Run).join(
                ts.Machine).filter(ts.Run.id == run_id).one()
        except:
            err_msg = "The run {} was not found in the database.".format(
                run_id)
            return render_template("error.html", message=err_msg)

        baseline_parameters.append((run, baseline_title))

    # Create region of interest for run data region if we are performing a
    # comparison.
    revision_range = None
    highlight_run_id = request.args.get('highlight_run')
    if show_highlight and highlight_run_id and highlight_run_id.isdigit():
        highlight_run = ts.query(
            ts.Run).filter_by(id=int(highlight_run_id)).first()
        if highlight_run is None:
            abort(404)

        # Find the neighboring runs, by order.
        prev_runs = list(ts.get_previous_runs_on_machine(highlight_run, N=1))
        if prev_runs:
            start_rev = prev_runs[0].order.llvm_project_revision
            end_rev = highlight_run.order.llvm_project_revision
            revision_range = {
                "start": convert_revision(start_rev),
                "end": convert_revision(end_rev)
            }

    # Build the graph data.
    legend = []
    graph_plots = []
    graph_datum = []
    overview_plots = []
    baseline_plots = []
    num_plots = len(graph_parameters)
    for i, (machine, test, field, field_index) in enumerate(graph_parameters):
        # Determine the base plot color.
        col = list(util.makeDarkColor(float(i) / num_plots))
        url = "/".join([str(machine.id), str(test.id), str(field_index)])
        legend.append((machine, test.name, field.name, tuple(col), url))

        # Load all the field values for this test on the same machine.
        #
        # FIXME: Don't join to Order here, aggregate this across all the tests
        # we want to load. Actually, we should just make this a single query.
        #
        # FIXME: Don't hard code field name.
        q = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Run.id).\
            join(ts.Run).join(ts.Order).\
            filter(ts.Run.machine_id == machine.id).\
            filter(ts.Sample.test == test).\
            filter(field.column != None)

        # Unless all samples requested, filter out failing tests.
        if not show_failures:
            if field.status_field:
                q = q.filter((field.status_field.column == PASS)
                             | (field.status_field.column == None))

        # Aggregate by revision.
        data = util.multidict((rev, (val, date, run_id))
                              for val, rev, date, run_id in q).items()
        data.sort(key=lambda sample: convert_revision(sample[0]))

        graph_datum.append((test.name, data, col, field, url))

        # Get baselines for this line
        num_baselines = len(baseline_parameters)
        for baseline_id, (baseline,
                          baseline_title) in enumerate(baseline_parameters):
            q_baseline = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Machine.name).\
                         join(ts.Run).join(ts.Order).join(ts.Machine).\
                         filter(ts.Run.id == baseline.id).\
                         filter(ts.Sample.test == test).\
                         filter(field.column != None)
            # In the event of many samples, use the mean of the samples as the baseline.
            samples = []
            for sample in q_baseline:
                samples.append(sample[0])
            # Skip this baseline if there is no data.
            if not samples:
                continue
            mean = sum(samples) / len(samples)
            # Darken the baseline color distinguish from non-baselines.
            # Make a color closer to the sample than its neighbour.
            color_offset = float(baseline_id) / num_baselines / 2
            my_color = (i + color_offset) / num_plots
            dark_col = list(util.makeDarkerColor(my_color))
            str_dark_col = util.toColorString(dark_col)
            baseline_plots.append({
                'color': str_dark_col,
                'lineWidth': 2,
                'yaxis': {
                    'from': mean,
                    'to': mean
                },
                'name': q_baseline[0].llvm_project_revision
            })
            baseline_name = "Baseline {} on {}".format(baseline_title,
                                                       q_baseline[0].name)
            legend.append((BaselineLegendItem(baseline_name, baseline.id),
                           test.name, field.name, dark_col))

    # Draw mean trend if requested.
    if mean_parameter:
        machine, field = mean_parameter
        test_name = 'Geometric Mean'

        col = (0, 0, 0)
        legend.append((machine, test_name, field.name, col, None))

        q = ts.query(sqlalchemy.sql.func.min(field.column),
                ts.Order.llvm_project_revision,
                sqlalchemy.sql.func.min(ts.Run.start_time)).\
            join(ts.Run).join(ts.Order).join(ts.Test).\
            filter(ts.Run.machine_id == machine.id).\
            filter(field.column != None).\
            group_by(ts.Order.llvm_project_revision, ts.Test)

        # Calculate geomean of each revision.
        data = util.multidict(
            ((rev, date), val) for val, rev, date in q).items()
        data = [(rev, [(lnt.server.reporting.analysis.calc_geomean(vals), date)
                       ]) for ((rev, date), vals) in data]

        # Sort data points according to revision number.
        data.sort(key=lambda sample: convert_revision(sample[0]))

        graph_datum.append((test_name, data, col, field, None))

    for name, data, col, field, url in graph_datum:
        # Compute the graph points.
        errorbar_data = []
        points_data = []
        pts = []
        moving_median_data = []
        moving_average_data = []

        if normalize_by_median:
            normalize_by = 1.0 / stats.median(
                [min([d[0] for d in values]) for _, values in data])
        else:
            normalize_by = 1.0

        for pos, (point_label, datapoints) in enumerate(data):
            # Get the samples.
            data = [data_date[0] for data_date in datapoints]
            # And the date on which they were taken.
            dates = [data_date[1] for data_date in datapoints]
            # Run where this point was collected.
            runs = [
                data_pts[2] for data_pts in datapoints if len(data_pts) == 3
            ]

            # When we can, map x-axis to revisions, but when that is too hard
            # use the position of the sample instead.
            rev_x = convert_revision(point_label)
            x = rev_x[0] if len(rev_x) == 1 else pos

            values = [v * normalize_by for v in data]
            aggregation_fn = min
            if field.bigger_is_better:
                aggregation_fn = max
            agg_value, agg_index = \
                aggregation_fn((value, index)
                               for (index, value) in enumerate(values))

            # Generate metadata.
            metadata = {"label": point_label}
            metadata["date"] = str(dates[agg_index])
            if runs:
                metadata["runID"] = str(runs[agg_index])

            if len(graph_datum) > 1:
                # If there are more than one plot in the graph, also label the
                # test name.
                metadata["test_name"] = name

            pts.append((x, agg_value, metadata))

            # Add the individual points, if requested.
            # For each point add a text label for the mouse over.
            if not hide_all_points:
                for i, v in enumerate(values):
                    point_metadata = dict(metadata)
                    point_metadata["date"] = str(dates[i])
                    points_data.append((x, v, point_metadata))

            # Add the standard deviation error bar, if requested.
            if show_stddev:
                mean = stats.mean(values)
                sigma = stats.standard_deviation(values)
                errorbar_data.append((x, mean, sigma))

            # Add the MAD error bar, if requested.
            if show_mad:
                med = stats.median(values)
                mad = stats.median_absolute_deviation(values, med)
                errorbar_data.append((x, med, mad))

        # Compute the moving average and or moving median of our data if requested.
        if moving_average or moving_median:
            fun = None

            def compute_moving_average(x, window, average_list, median_list):
                average_list.append((x, lnt.util.stats.mean(window)))

            def compute_moving_median(x, window, average_list, median_list):
                median_list.append((x, lnt.util.stats.median(window)))

            def compute_moving_average_and_median(x, window, average_list,
                                                  median_list):
                average_list.append((x, lnt.util.stats.mean(window)))
                median_list.append((x, lnt.util.stats.median(window)))

            if moving_average and moving_median:
                fun = compute_moving_average_and_median
            elif moving_average:
                fun = compute_moving_average
            else:
                fun = compute_moving_median

            len_pts = len(pts)
            for i in range(len_pts):
                start_index = max(0, i - moving_window_size)
                end_index = min(len_pts, i + moving_window_size)

                window_pts = [x[1] for x in pts[start_index:end_index]]
                fun(pts[i][0], window_pts, moving_average_data,
                    moving_median_data)

        # On the overview, we always show the line plot.
        overview_plots.append({"data": pts, "color": util.toColorString(col)})

        # Add the minimum line plot, if requested.
        if show_lineplot:
            plot = {"data": pts, "color": util.toColorString(col)}
            if url:
                plot["url"] = url
            graph_plots.append(plot)
        # Add regression line, if requested.
        if show_linear_regression:
            xs = [t for t, v, _ in pts]
            ys = [v for t, v, _ in pts]

            # We compute the regression line in terms of a normalized X scale.
            x_min, x_max = min(xs), max(xs)
            try:
                norm_xs = [(x - x_min) / (x_max - x_min) for x in xs]
            except ZeroDivisionError:
                norm_xs = xs

            try:
                info = ext_stats.linregress(norm_xs, ys)
            except ZeroDivisionError:
                info = None
            except ValueError:
                info = None

            if info is not None:
                slope, intercept, _, _, _ = info

                reglin_col = [c * .7 for c in col]
                reglin_pts = [(x_min, 0.0 * slope + intercept),
                              (x_max, 1.0 * slope + intercept)]
                graph_plots.insert(
                    0, {
                        "data": reglin_pts,
                        "color": util.toColorString(reglin_col),
                        "lines": {
                            "lineWidth": 2
                        },
                        "shadowSize": 4
                    })

        # Add the points plot, if used.
        if points_data:
            pts_col = (0, 0, 0)
            plot = {
                "data": points_data,
                "color": util.toColorString(pts_col),
                "lines": {
                    "show": False
                },
                "points": {
                    "show": True,
                    "radius": .25,
                    "fill": True
                }
            }
            if url:
                plot['url'] = url
            graph_plots.append(plot)

        # Add the error bar plot, if used.
        if errorbar_data:
            bar_col = [c * .7 for c in col]
            graph_plots.append({
                "data": errorbar_data,
                "lines": {
                    "show": False
                },
                "color": util.toColorString(bar_col),
                "points": {
                    "errorbars": "y",
                    "yerr": {
                        "show": True,
                        "lowerCap": "-",
                        "upperCap": "-",
                        "lineWidth": 1
                    }
                }
            })

        # Add the moving average plot, if used.
        if moving_average_data:
            col = [0.32, 0.6, 0.0]
            graph_plots.append({
                "data": moving_average_data,
                "color": util.toColorString(col)
            })

        # Add the moving median plot, if used.
        if moving_median_data:
            col = [0.75, 0.0, 1.0]
            graph_plots.append({
                "data": moving_median_data,
                "color": util.toColorString(col)
            })

    if bool(request.args.get('json')):
        json_obj = dict()
        json_obj['data'] = graph_plots
        # Flatten ORM machine objects to their string names.
        simple_type_legend = []
        for machine, test, unit, color, url in legend:
            # Flatten name, make color a dict.
            new_entry = {
                'name': machine.name,
                'test': test,
                'unit': unit,
                'color': util.toColorString(color),
                'url': url
            }
            simple_type_legend.append(new_entry)
        json_obj['legend'] = simple_type_legend
        json_obj['revision_range'] = revision_range
        json_obj['current_options'] = options
        json_obj['test_suite_name'] = ts.name
        json_obj['baselines'] = baseline_plots
        return flask.jsonify(**json_obj)

    return render_template("v4_graph.html",
                           ts=ts,
                           options=options,
                           revision_range=revision_range,
                           graph_plots=graph_plots,
                           overview_plots=overview_plots,
                           legend=legend,
                           baseline_plots=baseline_plots)
Example #9
0
File: analysis.py Project: efcs/lnt
    def get_comparison_result(self, runs, compare_runs, test_id, field):
        # Get the field which indicates the requested field's status.
        status_field = field.status_field

        # Load the sample data for the current and previous runs and the
        # comparison window.
        run_samples = []
        prev_samples = []
        for run in runs:
            samples = self.sample_map.get((run.id, test_id))
            if samples is not None:
                run_samples.extend(samples)
        for run in compare_runs:
            samples = self.sample_map.get((run.id, test_id))
            if samples is not None:
                prev_samples.extend(samples)

        # Determine whether this (test,pset) passed or failed in the current and
        # previous runs.
        #
        # FIXME: Support XFAILs and non-determinism (mixed fail and pass)
        # better.
        run_failed = prev_failed = False
        if status_field:
            for sample in run_samples:
                run_failed |= sample[status_field.index] == FAIL
            for sample in prev_samples:
                prev_failed |= sample[status_field.index] == FAIL

        # Get the current and previous values.
        run_values = [s[field.index] for s in run_samples
                      if s[field.index] is not None]
        prev_values = [s[field.index] for s in prev_samples
                       if s[field.index] is not None]
        if run_values:
            run_value = self.aggregation_fn(run_values)
        else:
            run_value = None
        if prev_values:
            prev_value = self.aggregation_fn(prev_values)
        else:
            prev_value = None

        # If we have multiple values for this run, use that to estimate the
        # distribution.
        if run_values and len(run_values) > 1:
            stddev = stats.standard_deviation(run_values)
            MAD = stats.median_absolute_deviation(run_values)
            stddev_mean = stats.mean(run_values)
        else:
            stddev = None
            MAD = None
            stddev_mean = None

        # If we are missing current or comparison values we are done.
        if run_value is None or prev_value is None:
            return ComparisonResult(
                run_value, prev_value, delta=None,
                pct_delta = None, stddev = stddev, MAD = MAD,
                cur_failed = run_failed, prev_failed = prev_failed,
                samples = run_values, prev_samples = prev_values,
                confidence_lv = self.confidence_lv)

        # Compute the comparison status for the test value.
        delta = run_value - prev_value
        if prev_value != 0:
            pct_delta = delta / prev_value
        else:
            pct_delta = 0.0

        return ComparisonResult(run_value, prev_value, delta,
                                pct_delta, stddev, MAD,
                                run_failed, prev_failed, run_values,
                                prev_values, stddev_mean, self.confidence_lv)