Exemple #1
0
    def _make(self, path):
        make_cmd = self.opts.make

        subdir = path
        target = 'all'
        if self.opts.only_test:
            components = [path] + [self.opts.only_test[0]]
            if self.opts.only_test[1]:
                target = self.opts.only_test[1]
            subdir = os.path.join(*components)

        logger.info('Building...')
        if not self.opts.succinct:
            args = ["VERBOSE=1", target]
        else:
            args = [target]
        try:
            self._check_call(
                [make_cmd, '-k', '-j',
                 str(self._build_threads())] + args,
                cwd=subdir)
        except subprocess.CalledProcessError:
            # make is expected to exit with code 1 if there was any build
            # failure. Build failures are not unexpected when testing an
            # experimental compiler.
            pass
Exemple #2
0
 def _check_output(self, *args, **kwargs):
     logger.info('Execute: %s' % ' '.join(args[0]))
     if 'cwd' in kwargs:
         logger.info('          (In %s)' % kwargs['cwd'])
     output = subprocess.check_output(*args, **kwargs)
     sys.stdout.write(output)
     return output
Exemple #3
0
 def _check_output(self, *args, **kwargs):
     logger.info('Execute: %s' % ' '.join(args[0]))
     if 'cwd' in kwargs:
         logger.info('          (In %s)' % kwargs['cwd'])
     output = subprocess.check_output(*args, **kwargs)
     sys.stdout.write(output)
     return output
Exemple #4
0
    def _make(self, path):
        make_cmd = self.opts.make

        subdir = path
        target = 'all'
        if self.opts.only_test:
            components = [path] + [self.opts.only_test[0]]
            if self.opts.only_test[1]:
                target = self.opts.only_test[1]
            subdir = os.path.join(*components)

        logger.info('Building...')
        if not self.opts.succinct:
            args = ["VERBOSE=1", target]
        else:
            args = [target]
        try:
            self._check_call([make_cmd,
                              '-k', '-j', str(self._build_threads())] + args,
                             cwd=subdir)
        except subprocess.CalledProcessError:
            # make is expected to exit with code 1 if there was any build
            # failure. Build failures are not unexpected when testing an
            # experimental compiler.
            pass
Exemple #5
0
def upgrade_and_normalize_report(data, ts_name):
    # Get the report version. V2 has it at the top level, older version
    # in Run.Info.
    format_version = _get_format_version(data)
    if format_version is None:
        data['format_version'] = '2'
        format_version = 2

    if format_version == 0:
        data = upgrade_0_to_1(data)
        format_version = 1
    if format_version == 1:
        data = upgrade_1_to_2(data, ts_name)
        format_version = 2

    if format_version != 2 or data['format_version'] != '2':
        raise ValueError("Unknown format version")
    if 'run' not in data:
        import pprint
        logger.info(pprint.pformat(data))
        raise ValueError("No 'run' section in submission")
    if 'machine' not in data:
        raise ValueError("No 'machine' section in submission")
    if 'tests' not in data:
        raise ValueError("No 'tests' section in submission")

    run = data['run']
    if 'start_time' not in run:
        time = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
        run['start_time'] = time
        run['end_time'] = time
    elif 'end_time' not in run:
        run['end_time'] = run['start_time']

    return data
Exemple #6
0
def check_testsuite_schema_changes(session, testsuite):
    """Check whether the given testsuite that was loaded from a json/yaml
    file changed compared to the previous schema stored in the database.
    The database is automatically migrated for trivial changes or we throw
    and exception if automatic migration is not possible."""
    name = testsuite.name
    schema = TestSuiteJSONSchema(name, testsuite.jsonschema)
    prev_schema = session.query(TestSuiteJSONSchema) \
        .filter(TestSuiteJSONSchema.testsuite_name == name).first()
    if prev_schema is not None:
        if prev_schema.jsonschema != schema.jsonschema:
            logger.info("Previous Schema:")
            logger.info(
                json.dumps(json.loads(prev_schema.jsonschema), indent=2))
            # First do a dry run to check whether the upgrade will succeed.
            _upgrade_to(session, prev_schema, schema, dry_run=True)
            # Not perform the actual upgrade. This shouldn't fail as the dry
            # run already worked fine.
            _upgrade_to(session, prev_schema, schema)

            prev_schema.jsonschema = schema.jsonschema
            session.add(prev_schema)
            session.commit()
    else:
        session.add(schema)
        session.commit()
Exemple #7
0
        def perform_delete(ts, machine):
            count = session.query(ts.Run) \
                .filter(ts.Run.machine_id == machine.id).count()
            at = 0
            while True:
                runs = session.query(ts.Run) \
                    .filter(ts.Run.machine_id == machine.id) \
                    .options(joinedload(ts.Run.samples)) \
                    .options(joinedload(ts.Run.fieldchanges)) \
                    .order_by(ts.Run.id).limit(10).all()
                if len(runs) == 0:
                    break
                at += len(runs)
                msg = "Deleting runs %s (%d/%d)" % \
                    (" ".join([str(run.id) for run in runs]), at, count)
                logger.info(msg)
                yield msg + '\n'
                for run in runs:
                    session.delete(run)
                session.commit()

            machine_name = "%s:%s" % (machine.name, machine.id)
            session.delete(machine)
            session.commit()
            msg = "Deleted machine %s" % machine_name
            logger.info(msg)
            yield msg + '\n'
Exemple #8
0
def _start_browser(url, debug=False):
    from lnt.util import logger

    def url_is_up(url):
        try:
            o = urllib.urlopen(url)
        except IOError:
            return False
        o.close()
        return True

    # Wait for server to start...
    if debug:
        logger.info('waiting for server to start...')
    for i in range(10000):
        if url_is_up(url):
            break
        if debug:
            sys.stderr.write('.')
            sys.stderr.flush()
        time.sleep(.01)
    else:
        logger.warning('unable to detect that server started')

    if debug:
        logger.info('opening webbrowser...')
    webbrowser.open(url)
Exemple #9
0
def upgrade_and_normalize_report(data, ts_name):
    # Get the report version. V2 has it at the top level, older version
    # in Run.Info.
    format_version = _get_format_version(data)
    if format_version is None:
        data['format_version'] = '2'
        format_version = 2

    if format_version == 0:
        data = upgrade_0_to_1(data)
        format_version = 1
    if format_version == 1:
        data = upgrade_1_to_2(data, ts_name)
        format_version = 2

    if format_version != 2 or data['format_version'] != '2':
        raise ValueError("Unknown format version")
    if 'run' not in data:
        import pprint
        logger.info(pprint.pformat(data))
        raise ValueError("No 'run' section in submission")
    if 'machine' not in data:
        raise ValueError("No 'machine' section in submission")
    if 'tests' not in data:
        raise ValueError("No 'tests' section in submission")

    run = data['run']
    if 'start_time' not in run:
        time = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
        run['start_time'] = time
        run['end_time'] = time
    elif 'end_time' not in run:
        run['end_time'] = run['start_time']

    return data
def age_out_oldest_regressions(session, ts, num_to_keep=50):
    # type: (Session, TestSuiteDB, int) -> int
    """Find the oldest regressions that are still in the detected state,
    and age them out.  This is needed when regressions are not manually
    acknowledged, regression analysis can grow unbounded.

    :param session: db session
    :param ts: testsuite
    :param num_to_keep: the number of newest regressions to keep in the detected state.
    :returns: the number of regressions changed.
    """

    regression_orders = session.query(ts.Regression.id, ts.FieldChange.end_order_id) \
        .filter(ts.Regression.state == RegressionState.DETECTED) \
        .join(ts.RegressionIndicator, ts.Regression.id == ts.RegressionIndicator.regression_id) \
        .join(ts.FieldChange) \
        .all()

    regression_newest_change = {}
    for regression_id, order_id in regression_orders:
        current = regression_newest_change.get(regression_id)
        if current is None or current < order_id:
            regression_newest_change[regression_id] = order_id
    # Order regressions by FC end order.
    ordered = sorted(regression_newest_change.items(), key=lambda x: x[1])
    to_move = ordered[0:(-1 * num_to_keep)]

    for r, _ in to_move:
        regress = session.query(ts.Regression).filter_by(id=r).one()
        logger.info(
            "Ageing out regression {} to keep regression count under {}.".
            format(regress, num_to_keep))
        regress.state = RegressionState.IGNORED
    return len(to_move)
Exemple #11
0
        def perform_delete(ts, machine):
            count = session.query(ts.Run) \
                .filter(ts.Run.machine_id == machine.id).count()
            at = 0
            while True:
                runs = session.query(ts.Run) \
                    .filter(ts.Run.machine_id == machine.id) \
                    .options(joinedload(ts.Run.samples)) \
                    .options(joinedload(ts.Run.fieldchanges)) \
                    .order_by(ts.Run.id).limit(10).all()
                if len(runs) == 0:
                    break
                at += len(runs)
                msg = "Deleting runs %s (%d/%d)" % \
                    (" ".join([str(run.id) for run in runs]), at, count)
                logger.info(msg)
                yield msg + '\n'
                for run in runs:
                    session.delete(run)
                session.commit()

            machine_name = "%s:%s" % (machine.name, machine.id)
            session.delete(machine)
            session.commit()
            msg = "Deleted machine %s" % machine_name
            logger.info(msg)
            yield msg + '\n'
Exemple #12
0
def _start_browser(url, debug=False):
    from lnt.util import logger

    def url_is_up(url):
        try:
            o = urllib.urlopen(url)
        except IOError:
            return False
        o.close()
        return True

    # Wait for server to start...
    if debug:
        logger.info('waiting for server to start...')
    for i in range(10000):
        if url_is_up(url):
            break
        if debug:
            sys.stderr.write('.')
            sys.stderr.flush()
        time.sleep(.01)
    else:
        logger.warning('unable to detect that server started')

    if debug:
        logger.info('opening webbrowser...')
    webbrowser.open(url)
Exemple #13
0
def delete_fieldchange(session, ts, change):
    """Delete this field change.  Since it might be attahed to a regression
    via regression indicators, fix those up too.  If this orphans a regression
    delete it as well."""
    # Find the indicators.
    indicators = session.query(ts.RegressionIndicator). \
        filter(ts.RegressionIndicator.field_change_id == change.id). \
        all()
    # And all the related regressions.
    regression_ids = [r.regression_id for r in indicators]

    # Remove the idicators that point to this change.
    for ind in indicators:
        session.delete(ind)

    # Now we can remove the change, itself.
    session.delete(change)

    # We might have just created a regression with no changes.
    # If so, delete it as well.
    deleted_ids = []
    for r in regression_ids:
        remaining = session.query(ts.RegressionIndicator). \
            filter(ts.RegressionIndicator.regression_id == r). \
            all()
        if len(remaining) == 0:
            r = session.query(ts.Regression).get(r)
            logger.info("Deleting regression because it has not changes:" +
                        repr(r))
            session.delete(r)
            deleted_ids.append(r)
    session.commit()
    return deleted_ids
def age_out_oldest_regressions(session, ts, num_to_keep=50):
    # type: (Session, TestSuiteDB, int) -> int
    """Find the oldest regressions that are still in the detected state,
    and age them out.  This is needed when regressions are not manually
    acknowledged, regression analysis can grow unbounded.

    :param session: db session
    :param ts: testsuite
    :param num_to_keep: the number of newest regressions to keep in the detected state.
    :returns: the number of regressions changed.
    """

    regression_orders = session.query(ts.Regression.id, ts.FieldChange.end_order_id) \
        .filter(ts.Regression.state == RegressionState.DETECTED) \
        .join(ts.RegressionIndicator, ts.Regression.id == ts.RegressionIndicator.regression_id) \
        .join(ts.FieldChange) \
        .all()

    regression_newest_change = {}  # type: Dict[int, int]
    for regression_id, order_id in regression_orders:
        current = regression_newest_change.get(regression_id)
        if current is None or current < order_id:
            regression_newest_change[regression_id] = order_id
    # Order regressions by FC end order.
    ordered = sorted(regression_newest_change.items(), key=lambda x: x[1])
    to_move = ordered[0:(-1 * num_to_keep)]

    for r, _ in to_move:
        regress = session.query(ts.Regression).filter_by(id=r).one()
        logger.info("Ageing out regression {} to keep regression count under {}."
                    .format(regress, num_to_keep))
        regress.state = RegressionState.IGNORED
    return len(to_move)
Exemple #15
0
 def delete(run_id):
     session = request.session
     ts = request.get_testsuite()
     run = session.query(ts.Run).filter(ts.Run.id == run_id).first()
     if run is None:
         abort(404, msg="Did not find run " + str(run_id))
     session.delete(run)
     session.commit()
     logger.info("Deleted run %s" % (run_id, ))
Exemple #16
0
 def delete(run_id):
     session = request.session
     ts = request.get_testsuite()
     run = session.query(ts.Run).filter(ts.Run.id == run_id).first()
     if run is None:
         abort(404, msg="Did not find run " + str(run_id))
     session.delete(run)
     session.commit()
     logger.info("Deleted run %s" % (run_id,))
def regression_evolution(session, ts, run_id):
    """Analyse regressions. If they have changes, process them.
    Look at each regression in state detect.  Move to ignore if it is fixed.
    Look at each regression in state stage. Move to verify if fixed.
    Look at regressions in detect, do they match our policy? If no, move to
    NTBF.
    """
    logger.info("Running regression evolution")

    # Clear the cache before we start.
    ts.machine_to_latest_order_cache = {}
    changed = 0
    evolve_states = [
        RegressionState.DETECTED, RegressionState.STAGED,
        RegressionState.ACTIVE
    ]
    regressions = session.query(ts.Regression) \
        .filter(ts.Regression.state.in_(evolve_states)) \
        .all()

    detects = [r for r in regressions if r.state == RegressionState.DETECTED]
    staged = [r for r in regressions if r.state == RegressionState.STAGED]
    active = [r for r in regressions if r.state == RegressionState.ACTIVE]

    # Remove the oldest detected regressions if needed.
    num_regression_to_keep = 50
    if len(detects) > num_regression_to_keep:
        changed += age_out_oldest_regressions(session, ts,
                                              num_regression_to_keep)

    for regression in detects:
        if impacts(session, ts, run_id, regression) and is_fixed(
                session, ts, regression):
            logger.info("Detected fixed regression" + str(regression))
            regression.state = RegressionState.IGNORED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1

    for regression in staged:
        if impacts(session, ts, run_id, regression) and is_fixed(
                session, ts, regression):
            logger.info("Staged fixed regression" + str(regression))
            regression.state = RegressionState.DETECTED_FIXED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1

    for regression in active:
        if impacts(session, ts, run_id, regression) and is_fixed(
                session, ts, regression):
            logger.info("Active fixed regression" + str(regression))
            regression.state = RegressionState.DETECTED_FIXED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1

    session.commit()
    logger.info("Changed the state of {} regressions".format(changed))
Exemple #18
0
 def _check_output(self, *args, **kwargs):
     logger.info('Execute: %s' % ' '.join(args[0]))
     if 'cwd' in kwargs:
         logger.info('          (In %s)' % kwargs['cwd'])
     output = subprocess.check_output(*args, **kwargs)
     if kwargs.get('universal_newlines', False):
         sys.stdout.write(output)
     else:
         sys.stdout.buffer.write(output)
     return output
Exemple #19
0
 def timed(*args, **kw):
     t_start = time.time()
     result = func(*args, **kw)
     t_end = time.time()
     delta = t_end - t_start
     msg = 'timer: %r %2.2f sec' % (func.__name__, delta)
     if delta > 10:
         logger.warning(msg)
     else:
         logger.info(msg)
     return result
Exemple #20
0
 def timed(*args, **kw):
     t_start = time.time()
     result = func(*args, **kw)
     t_end = time.time()
     delta = t_end - t_start
     msg = 'timer: %r %2.2f sec' % (func.__name__, delta)
     if delta > 10:
         logger.warning(msg)
     else:
         logger.info(msg)
     return result
Exemple #21
0
 def timed(*args, **kw):
     t_start = time.time()
     result = func(*args, **kw)
     t_end = time.time()
     short_args = repr(args)
     if len(short_args) > 80:
         short_args = short_args[0:80]
     delta = t_end - t_start
     msg = '%r (%s, %r) %2.2f sec' % (func.__name__, short_args, kw, delta)
     if delta > 10:
         logger.warning(msg)
     else:
         logger.info(msg)
     return result
def regression_evolution(session, ts, run_id):
    """Analyse regressions. If they have changes, process them.
    Look at each regression in state detect.  Move to ignore if it is fixed.
    Look at each regression in state stage. Move to verify if fixed.
    Look at regressions in detect, do they match our policy? If no, move to
    NTBF.
    """
    logger.info("Running regression evolution")

    # Clear the cache before we start.
    ts.machine_to_latest_order_cache = {}
    changed = 0
    evolve_states = [RegressionState.DETECTED, RegressionState.STAGED,
                     RegressionState.ACTIVE]
    regressions = session.query(ts.Regression) \
        .filter(ts.Regression.state.in_(evolve_states)) \
        .all()

    detects = [r for r in regressions if r.state == RegressionState.DETECTED]
    staged = [r for r in regressions if r.state == RegressionState.STAGED]
    active = [r for r in regressions if r.state == RegressionState.ACTIVE]

    # Remove the oldest detected regressions if needed.
    num_regression_to_keep = 50
    if len(detects) > num_regression_to_keep:
        changed += age_out_oldest_regressions(session, ts, num_regression_to_keep)

    for regression in detects:
        if impacts(session, ts, run_id, regression) and is_fixed(session, ts, regression):
            logger.info("Detected fixed regression" + str(regression))
            regression.state = RegressionState.IGNORED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1

    for regression in staged:
        if impacts(session, ts, run_id, regression) and is_fixed(session, ts, regression):
            logger.info("Staged fixed regression" + str(regression))
            regression.state = RegressionState.DETECTED_FIXED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1

    for regression in active:
        if impacts(session, ts, run_id, regression) and is_fixed(session, ts, regression):
            logger.info("Active fixed regression" + str(regression))
            regression.state = RegressionState.DETECTED_FIXED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1

    session.commit()
    logger.info("Changed the state of {} regressions".format(changed))
Exemple #23
0
    def _lit(self, path, test, profile):
        lit_cmd = self.opts.lit

        output_json_path = tempfile.NamedTemporaryFile(prefix='output',
                                                       suffix='.json',
                                                       dir=path,
                                                       delete=False)
        output_json_path.close()

        subdir = path
        if self.opts.only_test:
            components = [path] + [self.opts.only_test[0]]
            subdir = os.path.join(*components)

        extra_args = []
        if not test:
            extra_args = ['--no-execute']

        nr_threads = self._test_threads()
        if profile:
            if nr_threads != 1:
                logger.warning('Gathering profiles with perf requires -j 1 ' +
                               'as perf record cannot be run multiple times ' +
                               'simultaneously. Overriding -j %s to -j 1' %
                               nr_threads)
                nr_threads = 1
            extra_args += ['--param', 'profile=perf']
            if self.opts.perf_events:
                extra_args += ['--param',
                               'perf_profile_events=%s' %
                               self.opts.perf_events]

        logger.info('Testing...')
        try:
            self._check_call([lit_cmd,
                              '-v',
                              '-j', str(nr_threads),
                              subdir,
                              '-o', output_json_path.name] + extra_args)
        except subprocess.CalledProcessError:
            # LIT is expected to exit with code 1 if there were test
            # failures!
            pass
        try:
            return json.loads(open(output_json_path.name).read())
        except ValueError as e:
            fatal("Running test-suite did not create valid json report "
                  "in {}: {}".format(output_json_path.name, e.message))
Exemple #24
0
    def _lit(self, path, test, profile):
        lit_cmd = self.opts.lit

        output_json_path = tempfile.NamedTemporaryFile(prefix='output',
                                                       suffix='.json',
                                                       dir=path,
                                                       delete=False)
        output_json_path.close()

        subdir = path
        if self.opts.only_test:
            components = [path] + [self.opts.only_test[0]]
            subdir = os.path.join(*components)

        extra_args = []
        if not test:
            extra_args = ['--no-execute']

        nr_threads = self._test_threads()
        if profile:
            if nr_threads != 1:
                logger.warning('Gathering profiles with perf requires -j 1 ' +
                               'as perf record cannot be run multiple times ' +
                               'simultaneously. Overriding -j %s to -j 1' %
                               nr_threads)
                nr_threads = 1
            extra_args += ['--param', 'profile=perf']
            if self.opts.perf_events:
                extra_args += [
                    '--param',
                    'perf_profile_events=%s' % self.opts.perf_events
                ]

        logger.info('Testing...')
        try:
            self._check_call([
                lit_cmd, '-v', '-j',
                str(nr_threads), subdir, '-o', output_json_path.name
            ] + extra_args)
        except subprocess.CalledProcessError:
            # LIT is expected to exit with code 1 if there were test
            # failures!
            pass
        try:
            return json.loads(open(output_json_path.name).read())
        except ValueError as e:
            fatal("Running test-suite did not create valid json report "
                  "in {}: {}".format(output_json_path.name, e.message))
Exemple #25
0
def resolve_command_path(name):
    """Try to make the name/path given into an absolute path to an
    executable.

    """
    # If the given name exists (or is a path), make it absolute.
    if os.path.exists(name):
        return os.path.abspath(name)

    # Otherwise we most likely have a command name, try to look it up.
    path = which(name)
    if path is not None:
        logger.info("resolved command %r to path %r" % (name, path))
        return path

    # If that failed just return the original name.
    return name
Exemple #26
0
    def frompath(path):
        """
        frompath(path) -> Insance

        Load an LNT instance from the given instance specifier. The instance
        path can be one of:
          * The directory containing the instance.
          * The instance config file.
          * A tarball containing an instance.
        """

        # Accept paths to config files, or to directories containing 'lnt.cfg'.
        tmpdir = None
        if os.path.isdir(path):
            config_path = os.path.join(path, 'lnt.cfg')
        elif tarfile.is_tarfile(path):
            # Accept paths to tar/tgz etc. files, which we automatically unpack
            # into a temporary directory.
            tmpdir = tempfile.mkdtemp(suffix='lnt')

            logger.info("extracting input tarfile %r to %r" % (path, tmpdir))
            tf = tarfile.open(path)
            tf.extractall(tmpdir)

            # Find the LNT instance inside the tar file. Support tarballs that
            # either contain the instance directly, or contain a single
            # subdirectory which is the instance.
            if os.path.exists(os.path.join(tmpdir, "lnt.cfg")):
                config_path = os.path.join(tmpdir, "lnt.cfg")
            else:
                filenames = os.listdir(tmpdir)
                if len(filenames) != 1:
                    raise Exception("Unable to find LNT instance "
                                    "inside tarfile")
                config_path = os.path.join(tmpdir, filenames[0], "lnt.cfg")
        else:
            config_path = path

        if not config_path or not os.path.exists(config_path):
            raise Exception("Invalid config: %r" % config_path)

        config_data = {}
        exec open(config_path) in config_data
        config = lnt.server.config.Config.from_data(config_path, config_data)

        return Instance(config_path, config, tmpdir)
Exemple #27
0
def identify_related_changes(session, ts, fc):
    """Can we find a home for this change in some existing regression? If a
    match is found add a regression indicator adding this change to that
    regression, otherwise create a new regression for this change.

    Regression matching looks for regressions that happen in overlapping order
    ranges. Then looks for changes that are similar.

    """
    active_indicators = session.query(ts.RegressionIndicator) \
        .join(ts.Regression) \
        .filter(or_(ts.Regression.state == RegressionState.DETECTED,
                ts.Regression.state == RegressionState.DETECTED_FIXED)) \
        .options(joinedload(ts.RegressionIndicator.field_change)) \
        .all()

    for change in active_indicators:
        regression_change = change.field_change

        if is_overlaping(regression_change, fc):
            confidence = 0.0

            confidence += percent_similar(regression_change.machine.name,
                                          fc.machine.name)
            confidence += percent_similar(regression_change.test.name,
                                          fc.test.name)

            if regression_change.field_id == fc.field_id:
                confidence += 1.0

            if confidence >= 2.0:
                # Matching
                MSG = "Found a match: {} with score {}."
                regression = session.query(ts.Regression) \
                    .get(change.regression_id)
                logger.info(MSG.format(str(regression), confidence))
                ri = ts.RegressionIndicator(regression, fc)
                session.add(ri)
                # Update the default title if needed.
                rebuild_title(session, ts, regression)
                session.commit()
                return True, regression
    logger.info("Could not find a partner, creating new Regression for change")
    new_reg = new_regression(session, ts, [fc.id])
    return False, new_reg
Exemple #28
0
def resolve_command_path(name):
    """Try to make the name/path given into an absolute path to an
    executable.

    """
    name = os.path.expanduser(name)

    # If the given name exists (or is a path), make it absolute.
    if os.path.exists(name):
        return os.path.abspath(name)

    # Otherwise we most likely have a command name, try to look it up.
    path = which(name)
    if path is not None:
        logger.info("resolved command %r to path %r" % (name, path))
        return path

    # If that failed just return the original name.
    return name
Exemple #29
0
def update(engine):
    any_changed = False

    # Load the available migrations.
    available_migrations = _load_migrations()

    Base.metadata.create_all(engine)

    session = sqlalchemy.orm.sessionmaker(engine)()
    version_list = session.query(SchemaVersion).all()
    session.close()

    versions = dict((v.name, v.version) for v in version_list)

    # Update the core schema.
    any_changed |= update_schema(engine, versions, available_migrations,
                                 '__core__')

    if any_changed:
        logger.info("database auto-upgraded")
Exemple #30
0
def update_schema(engine, versions, available_migrations, schema_name):
    schema_migrations = available_migrations[schema_name]

    # Get the current schema version.
    db_version = versions.get(schema_name, None)
    current_version = schema_migrations['current_version']

    # If there was no previous version, initialize the version.
    if db_version is None:
        logger.info("assigning initial version for schema %r", schema_name)
        _set_schema_version(engine, schema_name, 0)
        db_version = 0

    # If we are up-to-date, do nothing.
    if db_version == current_version:
        return False

    # Otherwise, update the database.
    if db_version > current_version:
        logger.error("invalid schema %r version %r (greater than current)",
                     schema_name, db_version)
        return False

    logger.info("updating schema %r from version %r to current version %r",
                schema_name, db_version, current_version)
    while db_version < current_version:
        # Lookup the upgrade function for this version.
        upgrade_script = schema_migrations[db_version]

        globals = {}
        with open(upgrade_script) as f:
            exec(compile(f.read(), upgrade_script, 'exec'), globals)
        upgrade_method = globals['upgrade']

        # Execute the upgrade.
        #
        # FIXME: Backup the database here.
        #
        # FIXME: Execute this inside a transaction?
        logger.info("applying upgrade for version %d to %d" %
                    (db_version, db_version + 1))
        upgrade_method(engine)

        # Update the schema version.
        db_version += 1
        _set_schema_version(engine, schema_name, db_version)

    return True
Exemple #31
0
    def post(machine_spec):
        session = request.session
        ts = request.get_testsuite()
        machine = Machine._get_machine(machine_spec)
        machine_name = "%s:%s" % (machine.name, machine.id)

        action = request.values.get('action', None)
        if action is None:
            abort(400, msg="No 'action' specified")
        elif action == 'rename':
            name = request.values.get('name', None)
            if name is None:
                abort(400, msg="Expected 'name' for rename request")
            existing = session.query(ts.Machine) \
                .filter(ts.Machine.name == name) \
                .first()
            if existing is not None:
                abort(400, msg="Machine with name '%s' already exists" % name)
            machine.name = name
            session.commit()
            logger.info("Renamed machine %s to %s" % (machine_name, name))
        elif action == 'merge':
            into_id = request.values.get('into', None)
            if into_id is None:
                abort(400, msg="Expected 'into' for merge request")
            into = Machine._get_machine(into_id)
            into_name = "%s:%s" % (into.name, into.id)
            session.query(ts.Run) \
                .filter(ts.Run.machine_id == machine.id) \
                .update({ts.Run.machine_id: into.id},
                        synchronize_session=False)
            session.expire_all()  # be safe after synchronize_session==False
            # re-query Machine so we can delete it.
            machine = Machine._get_machine(machine_spec)
            session.delete(machine)
            session.commit()
            logger.info("Merged machine %s into %s" %
                        (machine_name, into_name))
            logger.info("Deleted machine %s" % machine_name)
        else:
            abort(400, msg="Unknown action '%s'" % action)
Exemple #32
0
    def post(machine_spec):
        session = request.session
        ts = request.get_testsuite()
        machine = Machine._get_machine(machine_spec)
        machine_name = "%s:%s" % (machine.name, machine.id)

        action = request.values.get('action', None)
        if action is None:
            abort(400, msg="No 'action' specified")
        elif action == 'rename':
            name = request.values.get('name', None)
            if name is None:
                abort(400, msg="Expected 'name' for rename request")
            existing = session.query(ts.Machine) \
                .filter(ts.Machine.name == name) \
                .first()
            if existing is not None:
                abort(400, msg="Machine with name '%s' already exists" % name)
            machine.name = name
            session.commit()
            logger.info("Renamed machine %s to %s" % (machine_name, name))
        elif action == 'merge':
            into_id = request.values.get('into', None)
            if into_id is None:
                abort(400, msg="Expected 'into' for merge request")
            into = Machine._get_machine(into_id)
            into_name = "%s:%s" % (into.name, into.id)
            session.query(ts.Run) \
                .filter(ts.Run.machine_id == machine.id) \
                .update({ts.Run.machine_id: into.id},
                        synchronize_session=False)
            session.expire_all()  # be safe after synchronize_session==False
            # re-query Machine so we can delete it.
            machine = Machine._get_machine(machine_spec)
            session.delete(machine)
            session.commit()
            logger.info("Merged machine %s into %s" %
                        (machine_name, into_name))
            logger.info("Deleted machine %s" % machine_name)
        else:
            abort(400, msg="Unknown action '%s'" % action)
Exemple #33
0
def import_and_report(config, db_name, db, session, file, format, ts_name,
                      show_sample_count=False, disable_email=False,
                      disable_report=False, select_machine=None,
                      merge_run=None):
    """
    import_and_report(config, db_name, db, session, file, format, ts_name,
                      [show_sample_count], [disable_email],
                      [disable_report], [select_machine], [merge_run])
                     -> ... object ...

    Import a test data file into an LNT server and generate a test report. On
    success, run is the newly imported run.

    The result object is a dictionary containing information on the imported
    run and its comparison to the previous run.
    """
    result = {
        'success': False,
        'error': None,
        'import_file': file,
    }
    if select_machine is None:
        select_machine = 'match'
    if merge_run is None:
        merge_run = 'reject'

    if select_machine not in ('match', 'update', 'split'):
        result['error'] = "select_machine must be 'match', 'update' or 'split'"
        return result

    ts = db.testsuite.get(ts_name, None)
    if ts is None:
        result['error'] = "Unknown test suite '%s'!" % ts_name
        return result
    numMachines = ts.getNumMachines(session)
    numRuns = ts.getNumRuns(session)
    numTests = ts.getNumTests(session)

    # If the database gets fragmented, count(*) in SQLite can get really
    # slow!?!
    if show_sample_count:
        numSamples = ts.getNumSamples(session)

    startTime = time.time()
    try:
        data = lnt.formats.read_any(file, format)
    except Exception:
        import traceback
        result['error'] = "could not parse input format"
        result['message'] = traceback.format_exc()
        return result

    result['load_time'] = time.time() - startTime

    # Auto-upgrade the data, if necessary.
    try:
        data = lnt.testing.upgrade_and_normalize_report(data, ts_name)
    except ValueError as e:
        import traceback
        result['error'] = "Invalid input format: %s" % e
        result['message'] = traceback.format_exc()
        return result

    # Find the database config, if we have a configuration object.
    if config:
        db_config = config.databases[db_name]
    else:
        db_config = None

    # Find the email address for this machine's results.
    toAddress = email_config = None
    if db_config and not disable_email:
        email_config = db_config.email_config
        if email_config.enabled:
            # Find the machine name.
            machineName = str(data.get('Machine', {}).get('Name'))
            toAddress = email_config.get_to_address(machineName)
            if toAddress is None:
                result['error'] = ("unable to match machine name "
                                   "for test results email address!")
                return result

    importStartTime = time.time()
    try:
        data_schema = data.get('schema')
        if data_schema is not None and data_schema != ts_name:
            result['error'] = ("Importing '%s' data into test suite '%s'" %
                               (data_schema, ts_name))
            return result

        run = ts.importDataFromDict(session, data, config=db_config,
                                    select_machine=select_machine,
                                    merge_run=merge_run)
    except KeyboardInterrupt:
        raise
    except Exception as e:
        import traceback
        result['error'] = "import failure: %s" % e.message
        result['message'] = traceback.format_exc()
        if isinstance(e, lnt.server.db.testsuitedb.MachineInfoChanged):
            result['message'] += \
                '\n\nNote: Use --select-machine=update to update ' \
                'the existing machine information.\n'
        return result

    # If the import succeeded, save the import path.
    run.imported_from = file

    result['import_time'] = time.time() - importStartTime

    reportStartTime = time.time()
    result['report_to_address'] = toAddress
    if config:
        report_url = "%s/db_%s/" % (config.zorgURL, db_name)
    else:
        report_url = "localhost"

    if not disable_report:
        #  This has the side effect of building the run report for
        #  this result.
        NTEmailReport.emailReport(result, session, run, report_url,
                                  email_config, toAddress, True)

    result['added_machines'] = ts.getNumMachines(session) - numMachines
    result['added_runs'] = ts.getNumRuns(session) - numRuns
    result['added_tests'] = ts.getNumTests(session) - numTests
    if show_sample_count:
        result['added_samples'] = ts.getNumSamples(session) - numSamples

    result['committed'] = True
    result['run_id'] = run.id
    session.commit()

    fieldchange.post_submit_tasks(session, ts, run.id)

    # Add a handy relative link to the submitted run.
    result['result_url'] = "db_{}/v4/{}/{}".format(db_name, ts_name, run.id)
    result['report_time'] = time.time() - importStartTime
    result['total_time'] = time.time() - startTime
    logger.info("Successfully created {}".format(result['result_url']))
    # If this database has a shadow import configured, import the run into that
    # database as well.
    if config and config.databases[db_name].shadow_import:
        # Load the shadow database to import into.
        db_config = config.databases[db_name]
        shadow_name = db_config.shadow_import
        with closing(config.get_database(shadow_name)) as shadow_db:
            if shadow_db is None:
                raise ValueError("invalid configuration, shadow import "
                                 "database %r does not exist" % shadow_name)

            # Perform the shadow import.
            shadow_session = shadow_db.make_session()
            shadow_result = import_and_report(config, shadow_name,
                                              shadow_db, shadow_session, file,
                                              format, ts_name,
                                              show_sample_count, disable_email,
                                              disable_report,
                                              select_machine=select_machine,
                                              merge_run=merge_run)

            # Append the shadow result to the result.
            result['shadow_result'] = shadow_result

    result['success'] = True
    return result
Exemple #34
0
def v4_make_regression(machine_id, test_id, field_index, run_id):
    """This function is called to make a new regression from a graph data point.

    It is not nessessarly the case that there will be a real change there,
    so we must create a regression, bypassing the normal analysis.

    """
    session = request.session
    ts = request.get_testsuite()
    field = ts.sample_fields[field_index]
    new_regression_id = 0
    run = session.query(ts.Run).get(run_id)

    runs = session.query(ts.Run). \
        filter(ts.Run.order_id == run.order_id). \
        filter(ts.Run.machine_id == run.machine_id). \
        all()

    if len(runs) == 0:
        abort(404)

    previous_runs = ts.get_previous_runs_on_machine(session, run, 1)

    # Find our start/end order.
    if previous_runs != []:
        start_order = previous_runs[0].order
    else:
        start_order = run.order
    end_order = run.order

    # Load our run data for the creation of the new fieldchanges.
    runs_to_load = [r.id for r in (runs + previous_runs)]

    runinfo = lnt.server.reporting.analysis.RunInfo(session, ts, runs_to_load)

    result = runinfo.get_comparison_result(
        runs, previous_runs, test_id, field,
        ts.Sample.get_hash_of_binary_field())

    # Try and find a matching FC and update, else create one.
    try:
        f = session.query(ts.FieldChange) \
            .filter(ts.FieldChange.start_order == start_order) \
            .filter(ts.FieldChange.end_order == end_order) \
            .filter(ts.FieldChange.test_id == test_id) \
            .filter(ts.FieldChange.machine == run.machine) \
            .filter(ts.FieldChange.field_id == field.id) \
            .one()
    except sqlalchemy.orm.exc.NoResultFound:
        # Create one
        test = session.query(ts.Test).filter(ts.Test.id == test_id).one()
        f = ts.FieldChange(start_order=start_order,
                           end_order=run.order,
                           machine=run.machine,
                           test=test,
                           field_id=field.id)
        session.add(f)

    # Always update FCs with new values.
    if f:
        f.old_value = result.previous
        f.new_value = result.current
        f.run = run
    session.commit()

    # Make new regressions.
    regression, _ = new_regression(session, ts, [f.id])
    regression.state = RegressionState.ACTIVE

    session.commit()
    logger.info("Manually created new regressions: {}".format(regression.id))
    flash("Created " + regression.title, FLASH_SUCCESS)

    return redirect(v4_url_for(".v4_regression_detail", id=regression.id))
Exemple #35
0
 def _cp_artifacts(self, src, dest, patts):
     """Copy artifacts out of the build """
     for patt in patts:
         for file in glob.glob(src + patt):
             shutil.copy(file, dest)
             logger.info(file + " --> " + dest)
Exemple #36
0
    def _configure(self, path, extra_cmake_defs=[], execute=True):
        cmake_cmd = self.opts.cmake

        defs = {}
        if self.opts.cc:
            defs['CMAKE_C_COMPILER'] = self.opts.cc
        if self.opts.cxx:
            defs['CMAKE_CXX_COMPILER'] = self.opts.cxx

        cmake_build_types = ('DEBUG', 'MINSIZEREL', 'RELEASE',
                             'RELWITHDEBINFO')
        if self.opts.cppflags or self.opts.cflags:
            all_cflags = ' '.join([self.opts.cppflags, self.opts.cflags])
            defs['CMAKE_C_FLAGS'] = self._unix_quote_args(all_cflags)
            # Ensure that no flags get added based on build type when the user
            # explicitly specifies flags to use.
            for build_type in cmake_build_types:
                defs['CMAKE_C_FLAGS_' + build_type] = ""

        if self.opts.cppflags or self.opts.cxxflags:
            all_cxx_flags = ' '.join([self.opts.cppflags, self.opts.cxxflags])
            defs['CMAKE_CXX_FLAGS'] = self._unix_quote_args(all_cxx_flags)
            # Ensure that no flags get added based on build type when the user
            # explicitly specifies flags to use.
            for build_type in cmake_build_types:
                defs['CMAKE_CXX_FLAGS_' + build_type] = ""

        if self.opts.run_under:
            defs['TEST_SUITE_RUN_UNDER'] = \
                self._unix_quote_args(self.opts.run_under)
        if self.opts.benchmarking_only:
            defs['TEST_SUITE_BENCHMARKING_ONLY'] = 'ON'
        if self.opts.only_compile:
            defs['TEST_SUITE_RUN_BENCHMARKS'] = 'Off'
        if self.opts.use_perf in ('time', 'all'):
            defs['TEST_SUITE_USE_PERF'] = 'ON'
        if self.opts.test_suite_externals:
            defs['TEST_SUITE_EXTERNALS_DIR'] = self.opts.test_suite_externals
        if self.opts.pgo and self.trained:
            defs['TEST_SUITE_PROFILE_USE'] = "On"
            defs['TEST_SUITE_PROFILE_GENERATE'] = "Off"
            if 'TEST_SUITE_RUN_TYPE' not in defs:
                defs['TEST_SUITE_RUN_TYPE'] = 'ref'

        for item in tuple(self.opts.cmake_defines) + tuple(extra_cmake_defs):
            k, v = item.split('=', 1)
            # make sure the overriding of the settings above also works
            # when the cmake-define-defined variable has a datatype
            # specified.
            key_no_datatype = k.split(':', 1)[0]
            if key_no_datatype in defs:
                del defs[key_no_datatype]
            defs[k] = v

        # We use 'cmake -LAH -N' later to find out the value of the
        # CMAKE_C_COMPILER and CMAKE_CXX_COMPILER variables.
        # 'cmake -LAH -N' will only return variables in the cache that have
        # a cmake type set. Therefore, explicitly set a 'FILEPATH' type on
        # these variables here, if they were untyped so far.
        if 'CMAKE_C_COMPILER' in defs:
            defs['CMAKE_C_COMPILER:FILEPATH'] = defs['CMAKE_C_COMPILER']
            del defs['CMAKE_C_COMPILER']
        if 'CMAKE_CXX_COMPILER' in defs:
            defs['CMAKE_CXX_COMPILER:FILEPATH'] = defs['CMAKE_CXX_COMPILER']
            del defs['CMAKE_CXX_COMPILER']

        lines = ['Configuring with {']
        for k, v in sorted(defs.items()):
            lines.append("  %s: '%s'" % (k, v))
        lines.append('}')

        if 'TEST_SUITE_REMOTE_HOST' in defs:
            self.remote_run = True

        # Prepare cmake cache if requested:
        cmake_flags = []
        for cache in self.opts.cmake_cache:
            if cache == "":
                continue
            # Shortcut for the common case.
            if not cache.endswith(".cmake") and "/" not in cache:
                cache = os.path.join(self._test_suite_dir(), "cmake/caches",
                                     cache + ".cmake")
            cache = os.path.abspath(cache)
            if not os.path.exists(cache):
                fatal("Could not find CMake cache file: " + cache)
            cmake_flags += ['-C', cache]

        for l in lines:
            logger.info(l)

        # Define compilers before specifying the cache files.
        early_defs = {}
        for key in [
                'CMAKE_C_COMPILER:FILEPATH', 'CMAKE_CXX_COMPILER:FILEPATH'
        ]:
            value = defs.pop(key, None)
            if value is not None:
                early_defs[key] = value

        cmake_cmd = ([cmake_cmd] +
                     ['-D%s=%s' % (k, v) for k, v in early_defs.items()] +
                     cmake_flags + [self._test_suite_dir()] +
                     ['-D%s=%s' % (k, v) for k, v in defs.items()])
        if execute:
            self._check_call(cmake_cmd, cwd=path)

        return cmake_cmd
Exemple #37
0
def action_view_comparison(report_a, report_b, hostname, port, dry_run,
                           testsuite):
    """view a report comparison using a temporary server"""
    from .common import init_logger
    from lnt.util import logger
    from lnt.util.ImportData import import_and_report
    import contextlib
    import lnt.server.db.migrate
    import lnt.server.instance
    import lnt.server.ui.app
    import logging
    import os
    import shutil
    import tempfile
    import thread

    init_logger(logging.ERROR)

    # Create a temporary directory to hold the instance.
    tmpdir = tempfile.mkdtemp(suffix='lnt')

    try:
        # Create a temporary instance.
        url = 'http://%s:%d' % (hostname, port)
        db_path = os.path.join(tmpdir, 'data.db')
        db_info = lnt.server.config.DBInfo(
            'sqlite:///%s' % (db_path,), None,
            lnt.server.config.EmailConfig(False, '', '', []), "0")
        # _(self, name, zorgURL, dbDir, tempDir,
        # profileDir, secretKey, databases, blacklist):
        config = lnt.server.config.Config('LNT', url, db_path, tmpdir,
                                          None, "Not secret key.",
                                          {'default': db_info}, None,
                                          None)
        instance = lnt.server.instance.Instance(None, config)

        # Create the database.
        lnt.server.db.migrate.update_path(db_path)

        # Import the two reports.
        with contextlib.closing(config.get_database('default')) as db:
            session = db.make_session()
            import_and_report(config, 'default', db, session, report_a,
                                  '<auto>', testsuite, select_machine='match')
            import_and_report(config, 'default', db, session, report_b,
                              '<auto>', testsuite, select_machine='match')

            # Dispatch another thread to start the webbrowser.
            comparison_url = '%s/v4/nts/2?compare_to=1' % (url,)
            logger.info("opening comparison view: %s" % (comparison_url,))

            if not dry_run:
                thread.start_new_thread(_start_browser, (comparison_url, True))

            # Run the webserver.
            app = lnt.server.ui.app.App.create_with_instance(instance)
            app.debug = True

            if dry_run:
                # Don't catch out exceptions.
                app.testing = True
                # Create a test client.
                client = app.test_client()
                response = client.get(comparison_url)
                assert response.status_code == 200, "Page did not return 200."
            else:
                app.run(hostname, port, use_reloader=False)
    finally:
        shutil.rmtree(tmpdir)
Exemple #38
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagnose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps']

        logger.info(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        logger.info(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_deps = [
            self.opts.make, "VERBOSE=1", "timeit-target", "timeit-host",
            "fpcmp-host"
        ]
        logger.info(" ".join(make_deps))
        p = subprocess.Popen(make_deps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        logger.info(std_out)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        logger.info(" ".join(make_save_temps))
        p = subprocess.Popen(make_save_temps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        logger.info(std_out)
        with open(report_path + "/build.log", 'w') as f:
            f.write(std_out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"),
                    report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = [
            "/*.o", "/*.time", "/*.cmake", "/*.make", "/*.includecache",
            "/*.txt"
        ]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        # Now lets do -ftime-report.
        cmd_time_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-ftime-report']

        logger.info(' '.join(cmd_time_report))

        out = subprocess.check_output(cmd_time_report)
        logger.info(out)

        make_time_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_time_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/time-report.txt", 'w') as f:
            f.write(std_err)
        logger.info("Wrote: " + report_path + "/time-report.txt")

        # Now lets do -llvm -stats.
        cmd_stats_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-mllvm -stats']

        logger.info(' '.join(cmd_stats_report))

        out = subprocess.check_output(cmd_stats_report)
        logger.info(out)

        make_stats_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_stats_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/stats-report.txt", 'w') as f:
            f.write(std_err)
        logger.info("Wrote: " + report_path + "/stats-report.txt")

        #  Collect Profile:
        if "Darwin" in platform.platform():
            # For testing and power users, lets allow overrides of how sudo
            # and iprofiler are called.
            sudo = os.getenv("SUDO_CMD", "sudo")
            if " " in sudo:
                sudo = sudo.split(" ")
            if not sudo:
                sudo = []
            else:
                sudo = [sudo]
            iprofiler = os.getenv("IPROFILER_CMD",
                                  "iprofiler -timeprofiler -I 40u")

            cmd_iprofiler = cmd + ['-DTEST_SUITE_RUN_UNDER=' + iprofiler]
            print(' '.join(cmd_iprofiler))

            out = subprocess.check_output(cmd_iprofiler)

            os.chdir(local_path)
            make_iprofiler_temps = [self.opts.make, "VERBOSE=1", short_name]
            p = subprocess.Popen(make_iprofiler_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            logger.warning("Using sudo to collect execution trace.")
            make_save_temps = sudo + [self.opts.lit, short_name + ".test"]
            p = subprocess.Popen(make_save_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            sys.stdout.write(std_out)
            sys.stderr.write(std_err)
            logger.warning("Tests may fail because of iprofiler's output.")
            # The dtps file will be saved as root, make it so
            # that we can read it.
            chmod = sudo + [
                "chown", "-R",
                getpass.getuser(), short_name + ".dtps"
            ]
            subprocess.call(chmod)
            profile = local_path + "/" + short_name + ".dtps"
            shutil.copytree(profile, report_path + "/" + short_name + ".dtps")
            logger.info(profile + "-->" + report_path)
        else:
            logger.warning("Skipping execution profiling because " +
                           "this is not Darwin.")
        logger.info("Report produced in: " + report_path)

        return lnt.util.ImportData.no_submit()
Exemple #39
0
def v4_make_regression(machine_id, test_id, field_index, run_id):
    """This function is called to make a new regression from a graph data point.

    It is not nessessarly the case that there will be a real change there,
    so we must create a regression, bypassing the normal analysis.

    """
    session = request.session
    ts = request.get_testsuite()
    field = ts.sample_fields[field_index]
    new_regression_id = 0
    run = session.query(ts.Run).get(run_id)

    runs = session.query(ts.Run). \
        filter(ts.Run.order_id == run.order_id). \
        filter(ts.Run.machine_id == run.machine_id). \
        all()

    if len(runs) == 0:
        abort(404)

    previous_runs = ts.get_previous_runs_on_machine(session, run, 1)

    # Find our start/end order.
    if previous_runs != []:
        start_order = previous_runs[0].order
    else:
        start_order = run.order
    end_order = run.order

    # Load our run data for the creation of the new fieldchanges.
    runs_to_load = [r.id for r in (runs + previous_runs)]

    runinfo = lnt.server.reporting.analysis.RunInfo(session, ts, runs_to_load)

    result = runinfo.get_comparison_result(
        runs, previous_runs, test_id, field,
        ts.Sample.get_hash_of_binary_field())

    # Try and find a matching FC and update, else create one.
    try:
        f = session.query(ts.FieldChange) \
            .filter(ts.FieldChange.start_order == start_order) \
            .filter(ts.FieldChange.end_order == end_order) \
            .filter(ts.FieldChange.test_id == test_id) \
            .filter(ts.FieldChange.machine == run.machine) \
            .filter(ts.FieldChange.field_id == field.id) \
            .one()
    except sqlalchemy.orm.exc.NoResultFound:
        # Create one
        test = session.query(ts.Test).filter(ts.Test.id == test_id).one()
        f = ts.FieldChange(start_order=start_order,
                           end_order=run.order,
                           machine=run.machine,
                           test=test,
                           field_id=field.id)
        session.add(f)

    # Always update FCs with new values.
    if f:
        f.old_value = result.previous
        f.new_value = result.current
        f.run = run
    session.commit()

    # Make new regressions.
    regression, _ = new_regression(session, ts, [f.id])
    regression.state = RegressionState.ACTIVE

    session.commit()
    logger.info("Manually created new regressions: {}".format(regression.id))
    flash("Created " + regression.title, FLASH_SUCCESS)

    return redirect(v4_url_for(".v4_regression_detail", id=regression.id))
Exemple #40
0
def import_and_report(config, db_name, db, session, file, format, ts_name,
                      show_sample_count=False, disable_email=False,
                      disable_report=False, select_machine=None,
                      merge_run=None):
    """
    import_and_report(config, db_name, db, session, file, format, ts_name,
                      [show_sample_count], [disable_email],
                      [disable_report], [select_machine], [merge_run])
                     -> ... object ...

    Import a test data file into an LNT server and generate a test report. On
    success, run is the newly imported run.

    The result object is a dictionary containing information on the imported
    run and its comparison to the previous run.
    """
    result = {
        'success': False,
        'error': None,
        'import_file': file,
    }
    if select_machine is None:
        select_machine = 'match'
    if merge_run is None:
        merge_run = 'reject'

    if select_machine not in ('match', 'update', 'split'):
        result['error'] = "select_machine must be 'match', 'update' or 'split'"
        return result

    ts = db.testsuite.get(ts_name, None)
    if ts is None:
        result['error'] = "Unknown test suite '%s'!" % ts_name
        return result
    numMachines = ts.getNumMachines(session)
    numRuns = ts.getNumRuns(session)
    numTests = ts.getNumTests(session)

    # If the database gets fragmented, count(*) in SQLite can get really
    # slow!?!
    if show_sample_count:
        numSamples = ts.getNumSamples(session)

    startTime = time.time()
    try:
        data = lnt.formats.read_any(file, format)
    except Exception:
        import traceback
        result['error'] = "could not parse input format"
        result['message'] = traceback.format_exc()
        return result

    result['load_time'] = time.time() - startTime

    # Auto-upgrade the data, if necessary.
    try:
        data = lnt.testing.upgrade_and_normalize_report(data, ts_name)
    except ValueError as e:
        import traceback
        result['error'] = "Invalid input format: %s" % e
        result['message'] = traceback.format_exc()
        return result

    # Find the database config, if we have a configuration object.
    if config:
        db_config = config.databases[db_name]
    else:
        db_config = None

    # Find the email address for this machine's results.
    toAddress = email_config = None
    if db_config and not disable_email:
        email_config = db_config.email_config
        if email_config.enabled:
            # Find the machine name.
            machineName = str(data.get('Machine', {}).get('Name'))
            toAddress = email_config.get_to_address(machineName)
            if toAddress is None:
                result['error'] = ("unable to match machine name "
                                   "for test results email address!")
                return result

    importStartTime = time.time()
    try:
        data_schema = data.get('schema')
        if data_schema is not None and data_schema != ts_name:
            result['error'] = ("Importing '%s' data into test suite '%s'" %
                               (data_schema, ts_name))
            return result

        run = ts.importDataFromDict(session, data, config=db_config,
                                    select_machine=select_machine,
                                    merge_run=merge_run)
    except KeyboardInterrupt:
        raise
    except Exception as e:
        import traceback
        result['error'] = "import failure: %s" % e.message
        result['message'] = traceback.format_exc()
        if isinstance(e, lnt.server.db.testsuitedb.MachineInfoChanged):
            result['message'] += \
                '\n\nNote: Use --select-machine=update to update ' \
                'the existing machine information.\n'
        return result

    # If the import succeeded, save the import path.
    run.imported_from = file

    result['import_time'] = time.time() - importStartTime

    result['report_to_address'] = toAddress
    if config:
        report_url = "%s/db_%s/" % (config.zorgURL, db_name)
    else:
        report_url = "localhost"

    if not disable_report:
        #  This has the side effect of building the run report for
        #  this result.
        NTEmailReport.emailReport(result, session, run, report_url,
                                  email_config, toAddress, True)

    result['added_machines'] = ts.getNumMachines(session) - numMachines
    result['added_runs'] = ts.getNumRuns(session) - numRuns
    result['added_tests'] = ts.getNumTests(session) - numTests
    if show_sample_count:
        result['added_samples'] = ts.getNumSamples(session) - numSamples

    result['committed'] = True
    result['run_id'] = run.id
    session.commit()

    fieldchange.post_submit_tasks(session, ts, run.id)

    # Add a handy relative link to the submitted run.
    result['result_url'] = "db_{}/v4/{}/{}".format(db_name, ts_name, run.id)
    result['report_time'] = time.time() - importStartTime
    result['total_time'] = time.time() - startTime
    logger.info("Successfully created {}".format(result['result_url']))
    # If this database has a shadow import configured, import the run into that
    # database as well.
    if config and config.databases[db_name].shadow_import:
        # Load the shadow database to import into.
        db_config = config.databases[db_name]
        shadow_name = db_config.shadow_import
        with closing(config.get_database(shadow_name)) as shadow_db:
            if shadow_db is None:
                raise ValueError("invalid configuration, shadow import "
                                 "database %r does not exist" % shadow_name)

            # Perform the shadow import.
            shadow_session = shadow_db.make_session()
            shadow_result = import_and_report(config, shadow_name,
                                              shadow_db, shadow_session, file,
                                              format, ts_name,
                                              show_sample_count, disable_email,
                                              disable_report,
                                              select_machine=select_machine,
                                              merge_run=merge_run)

            # Append the shadow result to the result.
            result['shadow_result'] = shadow_result

    result['success'] = True
    return result
Exemple #41
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagnose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps']

        logger.info(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        logger.info(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_deps = [self.opts.make, "VERBOSE=1", "timeit-target",
                     "timeit-host", "fpcmp-host"]
        logger.info(" ".join(make_deps))
        p = subprocess.Popen(make_deps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        logger.info(std_out)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        logger.info(" ".join(make_save_temps))
        p = subprocess.Popen(make_save_temps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        logger.info(std_out)
        with open(report_path + "/build.log", 'w') as f:
            f.write(std_out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"),
                    report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = ["/*.o", "/*.time", "/*.cmake", "/*.make",
                       "/*.includecache", "/*.txt"]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        # Now lets do -ftime-report.
        cmd_time_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-ftime-report']

        logger.info(' '.join(cmd_time_report))

        out = subprocess.check_output(cmd_time_report)
        logger.info(out)

        make_time_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_time_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/time-report.txt", 'w') as f:
            f.write(std_err)
        logger.info("Wrote: " + report_path + "/time-report.txt")

        # Now lets do -llvm -stats.
        cmd_stats_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-mllvm -stats']

        logger.info(' '.join(cmd_stats_report))

        out = subprocess.check_output(cmd_stats_report)
        logger.info(out)

        make_stats_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_stats_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/stats-report.txt", 'w') as f:
            f.write(std_err)
        logger.info("Wrote: " + report_path + "/stats-report.txt")

        #  Collect Profile:
        if "Darwin" in platform.platform():
            # For testing and power users, lets allow overrides of how sudo
            # and iprofiler are called.
            sudo = os.getenv("SUDO_CMD", "sudo")
            if " " in sudo:
                sudo = sudo.split(" ")
            if not sudo:
                sudo = []
            else:
                sudo = [sudo]
            iprofiler = os.getenv("IPROFILER_CMD",
                                  "iprofiler -timeprofiler -I 40u")

            cmd_iprofiler = cmd + ['-DTEST_SUITE_RUN_UNDER=' + iprofiler]
            print ' '.join(cmd_iprofiler)

            out = subprocess.check_output(cmd_iprofiler)

            os.chdir(local_path)
            make_iprofiler_temps = [self.opts.make, "VERBOSE=1", short_name]
            p = subprocess.Popen(make_iprofiler_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            logger.warning("Using sudo to collect execution trace.")
            make_save_temps = sudo + [self.opts.lit, short_name + ".test"]
            p = subprocess.Popen(make_save_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            sys.stdout.write(std_out)
            sys.stderr.write(std_err)
            logger.warning("Tests may fail because of iprofiler's output.")
            # The dtps file will be saved as root, make it so
            # that we can read it.
            chmod = sudo + ["chown", "-R", getpass.getuser(),
                            short_name + ".dtps"]
            subprocess.call(chmod)
            profile = local_path + "/" + short_name + ".dtps"
            shutil.copytree(profile, report_path + "/" + short_name + ".dtps")
            logger.info(profile + "-->" + report_path)
        else:
            logger.warning("Skipping execution profiling because " +
                           "this is not Darwin.")
        logger.info("Report produced in: " + report_path)

        return lnt.util.ImportData.no_submit()
Exemple #42
0
def action_send_daily_report(instance_path, address, database, testsuite, host,
                             from_address, today, subject_prefix, dry_run,
                             days, filter_machine_regex):
    """send a daily report email"""
    import contextlib
    import datetime
    import email.mime.multipart
    import email.mime.text
    import lnt.server.reporting.dailyreport
    import smtplib

    # Load the LNT instance.
    instance = lnt.server.instance.Instance.frompath(instance_path)
    config = instance.config

    # Get the database.
    with contextlib.closing(config.get_database(database)) as db:
        session = db.make_session()

        # Get the testsuite.
        ts = db.testsuite[testsuite]

        if today:
            date = datetime.datetime.utcnow()
        else:
            # Get a timestamp to use to derive the daily report to generate.
            latest = session.query(ts.Run).\
                order_by(ts.Run.start_time.desc()).limit(1).first()

            # If we found a run, use its start time (rounded up to the next
            # hour, so we make sure it gets included).
            if latest:
                date = latest.start_time + datetime.timedelta(hours=1)
            else:
                # Otherwise, just use now.
                date = datetime.datetime.utcnow()

        # Generate the daily report.
        logger.info("building report data...")
        report = lnt.server.reporting.dailyreport.DailyReport(
            ts, year=date.year, month=date.month, day=date.day,
            day_start_offset_hours=date.hour, for_mail=True,
            num_prior_days_to_include=days,
            filter_machine_regex=filter_machine_regex)
        report.build(session)

        logger.info("generating HTML report...")
        ts_url = "%s/db_%s/v4/%s" \
            % (config.zorgURL, database, testsuite)
        subject = "Daily Report: %04d-%02d-%02d" % (
            report.year, report.month, report.day)
        html_report = report.render(ts_url, only_html_body=False).encode('utf-8')

        if subject_prefix is not None:
            subject = "%s %s" % (subject_prefix, subject)

        # Form the multipart email message.
        msg = email.mime.multipart.MIMEMultipart('alternative')
        msg['Subject'] = subject
        msg['From'] = from_address
        msg['To'] = address
        msg.attach(email.mime.text.MIMEText(html_report, 'html', 'utf-8'))

        # Send the report.
        if not dry_run:
            s = smtplib.SMTP(host)
            s.sendmail(from_address, [address],
                       msg.as_string())
            s.quit()
        else:
            out = sys.stdout
            out.write("From: %s\n" % msg['From'])
            out.write("To: %s\n" % msg['To'])
            out.write("Subject: %s\n" % msg['Subject'])
            out.write("=== html report\n")
            out.write(html_report + "\n")
Exemple #43
0
 def _cp_artifacts(self, src, dest, patts):
     """Copy artifacts out of the build """
     for patt in patts:
         for file in glob.glob(src + patt):
             shutil.copy(file, dest)
             logger.info(file + " --> " + dest)
Exemple #44
0
    def _parse_lit_output(self, path, data, cmake_vars, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash',
            'link_time': 'compile',
            'size.__text': 'code_size',
            'mem_bytes': 'mem',
            'link_mem_bytes': 'mem'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str,
            'link_time': float,
            'size.__text': float,
            'mem_bytes': float,
            'link_mem_bytes': float
        }

        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []
        no_errors = True

        for test_data in data['tests']:
            code = test_data['code']
            raw_name = test_data['name']

            split_name = raw_name.split(' :: ', 1)
            if len(split_name) > 1:
                name = split_name[1]
            else:
                name = split_name[0]

            if name.endswith('.test'):
                name = name[:-5]
            name = 'nts.' + name

            # If --single-result is given, exit based on
            # --single-result-predicate
            is_pass = self._is_pass_code(code)
            if self.opts.single_result and \
               raw_name == self.opts.single_result + '.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k, v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k, v in sorted(test_data['metrics'].items()):
                    if k == 'profile':
                        profiles_to_import.append((name, v))
                        continue

                    if k not in LIT_METRIC_TO_LNT or \
                       LIT_METRIC_TO_LNT[k] in ignore:
                        continue
                    server_name = name + '.' + LIT_METRIC_TO_LNT[k]

                    if k == 'link_time' or k == 'link_mem_bytes':
                        # Move link time into a second benchmark's
                        # compile-time.
                        server_name = name + '-link.' + LIT_METRIC_TO_LNT[k]

                    test_samples.append(
                        lnt.testing.TestSamples(server_name,
                                                [v],
                                                test_info,
                                                LIT_METRIC_CONV_FN[k]))

            if code == 'NOEXE':
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL],
                                            test_info))
                no_errors = False

            elif not is_pass:
                lnt_code = self._get_lnt_code(test_data['code'])
                test_samples.append(
                    lnt.testing.TestSamples(name + '.exec.status',
                                            [lnt_code], test_info))
                no_errors = False

        # Now import the profiles in parallel.
        if profiles_to_import:
            logger.info('Importing %d profiles with %d threads...' %
                        (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend([sample
                                     for sample in samples
                                     if sample is not None])
            except multiprocessing.TimeoutError:
                logger.warning('Profiles had not completed importing after ' +
                               '%s seconds.' % TIMEOUT)
                logger.info('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {
            'tag': 'nts',
            'no_errors': no_errors,
        }
        run_info.update(self._get_cc_info(cmake_vars))
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order

        machine_info = {
        }

        machine = lnt.testing.Machine(self.opts.label, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
Exemple #45
0
 def _check_call(self, *args, **kwargs):
     logger.info('Execute: %s' % ' '.join(args[0]))
     if 'cwd' in kwargs:
         logger.info('          (In %s)' % kwargs['cwd'])
     return subprocess.check_call(*args, **kwargs)
Exemple #46
0
def action_send_daily_report(instance_path, address, database, testsuite, host,
                             from_address, today, subject_prefix, dry_run,
                             days, filter_machine_regex):
    """send a daily report email"""
    import contextlib
    import datetime
    import email.mime.multipart
    import email.mime.text
    import lnt.server.reporting.dailyreport
    import smtplib

    # Load the LNT instance.
    instance = lnt.server.instance.Instance.frompath(instance_path)
    config = instance.config

    # Get the database.
    with contextlib.closing(config.get_database(database)) as db:
        session = db.make_session()

        # Get the testsuite.
        ts = db.testsuite[testsuite]

        if today:
            date = datetime.datetime.utcnow()
        else:
            # Get a timestamp to use to derive the daily report to generate.
            latest = session.query(ts.Run).\
                order_by(ts.Run.start_time.desc()).limit(1).first()

            # If we found a run, use it's start time (rounded up to the next
            # hour, so we make sure it gets included).
            if latest:
                date = latest.start_time + datetime.timedelta(hours=1)
            else:
                # Otherwise, just use now.
                date = datetime.datetime.utcnow()

        # Generate the daily report.
        logger.info("building report data...")
        report = lnt.server.reporting.dailyreport.DailyReport(
            ts,
            year=date.year,
            month=date.month,
            day=date.day,
            day_start_offset_hours=date.hour,
            for_mail=True,
            num_prior_days_to_include=days,
            filter_machine_regex=filter_machine_regex)
        report.build(session)

        logger.info("generating HTML report...")
        ts_url = "%s/db_%s/v4/%s" \
            % (config.zorgURL, database, testsuite)
        subject = "Daily Report: %04d-%02d-%02d" % (report.year, report.month,
                                                    report.day)
        html_report = report.render(ts_url, only_html_body=False)

        if subject_prefix is not None:
            subject = "%s %s" % (subject_prefix, subject)

        # Form the multipart email message.
        msg = email.mime.multipart.MIMEMultipart('alternative')
        msg['Subject'] = subject
        msg['From'] = from_address
        msg['To'] = address
        msg.attach(email.mime.text.MIMEText(html_report, "html"))

        # Send the report.
        if not dry_run:
            s = smtplib.SMTP(host)
            s.sendmail(from_address, [address], msg.as_string())
            s.quit()
        else:
            out = sys.stdout
            out.write("From: %s\n" % msg['From'])
            out.write("To: %s\n" % msg['To'])
            out.write("Subject: %s\n" % msg['Subject'])
            out.write("=== html report\n")
            out.write(html_report + "\n")
Exemple #47
0
    def run_test(self, opts):

        if self.opts.cc is not None:
            self.opts.cc = resolve_command_path(self.opts.cc)

            if not lnt.testing.util.compilers.is_valid(self.opts.cc):
                self._fatal('--cc does not point to a valid executable.')

            # If there was no --cxx given, attempt to infer it from the --cc.
            if self.opts.cxx is None:
                self.opts.cxx = \
                    lnt.testing.util.compilers.infer_cxx_compiler(self.opts.cc)
                if self.opts.cxx is not None:
                    logger.info("Inferred C++ compiler under test as: %r"
                                % (self.opts.cxx,))
                else:
                    self._fatal("unable to infer --cxx - set it manually.")
            else:
                self.opts.cxx = resolve_command_path(self.opts.cxx)

            if not os.path.exists(self.opts.cxx):
                self._fatal("invalid --cxx argument %r, does not exist"
                            % (self.opts.cxx))

        if opts.test_suite_root is None:
            self._fatal('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            self._fatal("invalid --test-suite argument, does not exist: %r" % (
                opts.test_suite_root))
        opts.test_suite_root = os.path.abspath(opts.test_suite_root)

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                self._fatal(
                    "invalid --test-externals argument, does not exist: %r" % (
                        opts.test_suite_externals,))
            opts.test_suite_externals = os.path.abspath(
                opts.test_suite_externals)

        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            self._fatal("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            self._fatal("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            self._fatal("LIT tool not found (looked for %s)" % opts.lit)
        if opts.run_under:
            split = shlex.split(opts.run_under)
            split[0] = resolve_command_path(split[0])
            if not isexecfile(split[0]):
                self._fatal("Run under wrapper not found (looked for %s)" %
                            opts.run_under)

        if opts.single_result:
            # --single-result implies --only-test
            opts.only_test = opts.single_result

        if opts.only_test:
            # --only-test can either point to a particular test or a directory.
            # Therefore, test_suite_root + opts.only_test or
            # test_suite_root + dirname(opts.only_test) must be a directory.
            path = os.path.join(self.opts.test_suite_root, opts.only_test)
            parent_path = os.path.dirname(path)

            if os.path.isdir(path):
                opts.only_test = (opts.only_test, None)
            elif os.path.isdir(parent_path):
                opts.only_test = (os.path.dirname(opts.only_test),
                                  os.path.basename(opts.only_test))
            else:
                self._fatal("--only-test argument not understood (must be a " +
                            " test or directory name)")

        if opts.single_result and not opts.only_test[1]:
            self._fatal("--single-result must be given a single test name, "
                        "not a directory name")

        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)

        if opts.diagnose:
            if not opts.only_test:
                self._fatal("--diagnose requires --only-test")

        self.start_time = timestamp()

        # Work out where to put our build stuff
        if self.opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(self.opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        cmakecache = os.path.join(self._base_path, 'CMakeCache.txt')
        self.configured = not self.opts.run_configure and \
            os.path.exists(cmakecache)

        #  If we are doing diagnostics, skip the usual run and do them now.
        if opts.diagnose:
            return self.diagnose()

        # configure, so we can extract toolchain information from the cmake
        # output.
        self._configure_if_needed()

        # Verify that we can actually find a compiler before continuing
        cmake_vars = self._extract_cmake_vars_from_cache()
        if "CMAKE_C_COMPILER" not in cmake_vars or \
                not os.path.exists(cmake_vars["CMAKE_C_COMPILER"]):
            self._fatal(
                "Couldn't find C compiler (%s). Maybe you should specify --cc?"
                % cmake_vars.get("CMAKE_C_COMPILER"))

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            logger.info("Increasing number of execution samples to %d" %
                        opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        if opts.auto_name:
            # Construct the nickname from a few key parameters.
            cc_info = self._get_cc_info(cmake_vars)
            cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
            opts.label += "__%s__%s" %\
                (cc_nick, cc_info['cc_target'].split('-')[0])
        logger.info('Using nickname: %r' % opts.label)

        #  When we can't detect the clang version we use 0 instead. That
        # is a horrible failure mode because all of our data ends up going
        # to order 0.  The user needs to give an order if we can't detect!
        if opts.run_order is None:
            cc_info = self._get_cc_info(cmake_vars)
            if cc_info['inferred_run_order'] == 0:
                fatal("Cannot detect compiler version. Specify --run-order"
                      " to manually define it.")

        # Now do the actual run.
        reports = []
        json_reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            # only gather perf profiles on a single run.
            p = i == 0 and self.opts.use_perf in ('profile', 'all')
            run_report, json_data = self.run(cmake_vars, compile=c, test=e,
                                             profile=p)
            reports.append(run_report)
            json_reports.append(json_data)

        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        if opts.output:
            with open(opts.output, 'w') as fd:
                fd.write(report.render())

        xml_report_path = os.path.join(self._base_path,
                                       'test-results.xunit.xml')

        str_template = _lit_json_to_xunit_xml(json_reports)
        with open(xml_report_path, 'w') as fd:
            fd.write(str_template)

        csv_report_path = os.path.join(self._base_path,
                                       'test-results.csv')
        str_template = _lit_json_to_csv(json_reports)
        with open(csv_report_path, 'w') as fd:
            fd.write(str_template)

        return self.submit(report_path, self.opts, 'nts')
Exemple #48
0
def update_testsuite(engine, session, db_key_name):
    class Run(object):
        pass

    class Order(object):
        def __init__(self, **kwargs):
            self.__dict__.update(kwargs)

    meta = MetaData(bind=engine)

    # Autoload the Run and Order tables.
    order_table = Table('%s_Order' % db_key_name, meta, autoload=True)
    run_table = Table('%s_Run' % db_key_name, meta, autoload=True)

    sqlalchemy.orm.mapper(Order, order_table)
    sqlalchemy.orm.mapper(Run, run_table)

    # Scan each run that has no report version and possibly recompute the
    # run order.
    logger.info("updating runs")
    all_runs = session.query(Run).\
        filter(sqlalchemy.not_(Run.Parameters.like(
                '%["__report_version__"%'))).all()
    for i, run in enumerate(all_runs):
        if i % 1000 == 999:
            logger.info("update run %d of %d" % (i + 1, len(all_runs)))

        # Extract the parameters.
        run_info = dict(json.loads(run.Parameters))

        # Sanity check this was an inferred run order.
        orig_order, = session.query(Order.llvm_project_revision).\
            filter(Order.ID == run.OrderID).first()
        inferred_run_order = run_info.get('inferred_run_order')

        if orig_order is None or (orig_order != inferred_run_order and
                                  inferred_run_order is not None):
            continue

        # Trim the whitespace on the run order.
        run_order = orig_order.strip()

        # If this was a production Clang build, try to recompute the src tag.
        if 'clang' in run_info.get('cc_name', '') and \
                run_info.get('cc_build') == 'PROD' and \
                run_info.get('cc_src_tag') and \
                run_order == run_info['cc_src_tag'].strip():
            # Extract the version line.
            version_ln = None
            for ln in run_info.get('cc_version', '').split('\n'):
                if ' version ' in ln:
                    version_ln = ln
                    break

            # Extract the build string.
            if version_ln:
                m = re.match(r'(.*) version ([^ ]*) (\([^(]*\))(.*)',
                             version_ln)
                if m:
                    cc_name, cc_version_num, cc_build_string, cc_extra = \
                        m.groups()
                    m = re.search('clang-([0-9.]*)', cc_build_string)
                    if m:
                        run_order = m.group(1)

        # Update the run info.
        run_info['inferred_run_order'] = run_order
        run_info['__report_version__'] = '1'
        run.Parameters = json.dumps(sorted(run_info.items()))

        if run_order != orig_order:
            # Lookup the new run order.
            result = session.query(Order.ID).\
                filter(Order.llvm_project_revision == run_order).first()

            # If the result exists...
            if result is not None:
                order_id, = result
            else:
                # It doesn't, we need to create a new run order. We will
                # rebuild all the links at the end.
                order = Order(llvm_project_revision=run_order)
                session.add(order)
                session.flush()
                order_id = order.ID

            run.OrderID = order_id

    # Drop any now-unused orders.
    logger.info("deleting unused orders")
    session.query(Order) \
        .filter(sqlalchemy.not_(sqlalchemy.sql.exists()
                .where(Run.OrderID == Order.ID))) \
        .delete(synchronize_session=False)

    # Rebuilt all the previous/next links for the run orders.
    logger.info("rebuilding run order links")

    def parse_run_order(order):
        version = order.llvm_project_revision.strip()
        items = version.split('.')
        for i, item in enumerate(items):
            if item.isdigit():
                items[i] = int(item)
        return tuple(items)

    orders = session.query(Order).all()
    orders.sort(key=parse_run_order)
    for i, order in enumerate(orders):
        if i == 0:
            order.PreviousOrder = None
        else:
            order.PreviousOrder = orders[i-1].ID
        if i + 1 == len(orders):
            order.NextOrder = None
        else:
            order.NextOrder = orders[i+1].ID

    session.flush()
Exemple #49
0
    def run_test(self, opts):

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            self._fatal('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                logger.info("inferred C++ compiler under test as: %r" %
                            (opts.cxx, ))

        if opts.cxx is None:
            self._fatal('--cxx is required (and could not be inferred)')

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            self._fatal("unable to determine absolute path for --cc: %r" %
                        (opts.cc, ))
        if not os.path.exists(cxx_abs):
            self._fatal("unable to determine absolute path for --cc: %r" %
                        (opts.cc, ))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >> sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError as e:
            if e.errno == errno.EEXIST:
                self._fatal("sandbox output directory %r already exists!" %
                            (g_output_dir, ))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            log = logging.Logger('compile_test')
            log.setLevel(logging.INFO)
            log.addHandler(
                file_log_handler(os.path.join(output_dir, 'test.log')))
            log.addHandler(stderr_log_handler())
            return log

        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version', ('/usr/bin/as', '-v',
                                              '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            logger.info("inferred run order to be: %r" %
                        (variables['run_order'], ))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            logger.info(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [
                string.split(' ') for string in opts.flags_to_test
            ]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(
            get_tests(opts.test_suite_externals, opts.test_subdir,
                      flags_to_test, jobs_to_test, configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >> sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >> sys.stderr, '  %s' % (name, )
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                self._fatal(("invalid test names %s, use --show-tests to "
                             "see available tests") %
                            (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [
                re.compile(pattern) for pattern in opts.test_filters
            ]

            # Form the list of tests.
            tests_to_run = [
                test for test in all_tests if
                (test[0] in requested_tests or
                 [True for filter in test_filters if filter.search(test[0])])
            ]
        if not tests_to_run:
            self._fatal("no tests requested "
                        "(invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        no_errors = True
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples), )
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples), )
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' %
                           (num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name + '.status',
                                                [lnt.testing.FAIL]))
                    no_errors = False
                if samples:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name, samples))
        run_info['no_errors'] = no_errors
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts, ts_name='compile')

        return server_report
Exemple #50
0
    def run_test(self, opts):

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            self._fatal('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                logger.info("inferred C++ compiler under test as: %r" %
                            (opts.cxx,))

        if opts.cxx is None:
            self._fatal('--cxx is required (and could not be inferred)')

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            self._fatal("unable to determine absolute path for --cc: %r" % (
                opts.cc,))
        if not os.path.exists(cxx_abs):
            self._fatal("unable to determine absolute path for --cc: %r" % (
                opts.cc,))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >>sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError as e:
            if e.errno == errno.EEXIST:
                self._fatal("sandbox output directory %r already exists!" % (
                    g_output_dir,))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h
            log = logging.Logger('compile_test')
            log.setLevel(logging.INFO)
            log.addHandler(file_log_handler(os.path.join(output_dir,
                                                         'test.log')))
            log.addHandler(stderr_log_handler())
            return log
        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version',
                           ('/usr/bin/as', '-v', '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            logger.info("inferred run order to be: %r" %
                        (variables['run_order'],))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            logger.info(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [string.split(' ')
                             for string in opts.flags_to_test]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(get_tests(opts.test_suite_externals, opts.test_subdir,
                                   flags_to_test, jobs_to_test,
                                   configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >>sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >>sys.stderr, '  %s' % (name,)
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                self._fatal(("invalid test names %s, use --show-tests to "
                             "see available tests") %
                            (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [re.compile(pattern)
                            for pattern in opts.test_filters]

            # Form the list of tests.
            tests_to_run = [test
                            for test in all_tests
                            if (test[0] in requested_tests or
                                [True
                                 for filter in test_filters
                                 if filter.search(test[0])])]
        if not tests_to_run:
            self._fatal("no tests requested "
                        "(invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        no_errors = True
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples),)
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples),)
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' % (
                    num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(lnt.testing.TestSamples(
                        test_name + '.status', [lnt.testing.FAIL]))
                    no_errors = False
                if samples:
                    testsamples.append(lnt.testing.TestSamples(
                        test_name, samples))
        run_info['no_errors'] = no_errors
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts, ts_name='compile')

        return server_report
Exemple #51
0
def action_view_comparison(report_a, report_b, hostname, port, dry_run,
                           testsuite):
    """view a report comparison using a temporary server"""
    from .common import init_logger
    from lnt.util import logger
    from lnt.util.ImportData import import_and_report
    import contextlib
    import lnt.server.db.migrate
    import lnt.server.instance
    import lnt.server.ui.app
    import logging
    import os
    import shutil
    import sys
    import tempfile
    import thread
    import time
    import urllib
    import webbrowser

    init_logger(logging.ERROR)

    # Create a temporary directory to hold the instance.
    tmpdir = tempfile.mkdtemp(suffix='lnt')

    try:
        # Create a temporary instance.
        url = 'http://%s:%d' % (hostname, port)
        db_path = os.path.join(tmpdir, 'data.db')
        db_info = lnt.server.config.DBInfo(
            'sqlite:///%s' % (db_path, ), None,
            lnt.server.config.EmailConfig(False, '', '', []), "0")
        # _(self, name, zorgURL, dbDir, tempDir,
        # profileDir, secretKey, databases, blacklist):
        config = lnt.server.config.Config('LNT', url, db_path, tmpdir, None,
                                          "Not secret key.",
                                          {'default': db_info}, None, None)
        instance = lnt.server.instance.Instance(None, config)

        # Create the database.
        lnt.server.db.migrate.update_path(db_path)

        # Import the two reports.
        with contextlib.closing(config.get_database('default')) as db:
            session = db.make_session()
            r = import_and_report(config,
                                  'default',
                                  db,
                                  session,
                                  report_a,
                                  '<auto>',
                                  testsuite,
                                  select_machine='match')
            import_and_report(config,
                              'default',
                              db,
                              session,
                              report_b,
                              '<auto>',
                              testsuite,
                              select_machine='match')

            # Dispatch another thread to start the webbrowser.
            comparison_url = '%s/v4/nts/2?compare_to=1' % (url, )
            logger.info("opening comparison view: %s" % (comparison_url, ))

            if not dry_run:
                thread.start_new_thread(_start_browser, (comparison_url, True))

            # Run the webserver.
            app = lnt.server.ui.app.App.create_with_instance(instance)
            app.debug = True

            if dry_run:
                # Don't catch out exceptions.
                app.testing = True
                # Create a test client.
                client = app.test_client()
                response = client.get(comparison_url)
                assert response.status_code == 200, "Page did not return 200."
            else:
                app.run(hostname, port, use_reloader=False)
    finally:
        shutil.rmtree(tmpdir)
Exemple #52
0
    def _getOrCreateRun(self, session, run_data, machine, merge):
        """
        _getOrCreateRun(session, run_data, machine, merge) -> Run, bool

        Add a new Run record from the given data (as recorded by the test
        interchange format).

        merge comes into play when there is already a run with the same order
        fields:
        - 'reject': Reject submission (raise ValueError).
        - 'replace': Remove the existing submission(s), then add the new one.
        - 'append': Add new submission.

        The boolean result indicates whether the returned record was
        constructed or not.
        """

        # Extra the run parameters that define the order.
        run_parameters = run_data.copy()
        # Ignore incoming ids; we will create our own
        run_parameters.pop('id', None)

        # Added by REST API, we will replace as well.
        run_parameters.pop('order_by', None)
        run_parameters.pop('order_id', None)
        run_parameters.pop('machine_id', None)
        run_parameters.pop('imported_from', None)
        run_parameters.pop('simple_run_id', None)

        # Find the order record.
        order = self._getOrCreateOrder(session, run_parameters)
        new_id = None

        if merge != 'append':
            existing_runs = session.query(self.Run) \
                .filter(self.Run.machine_id == machine.id) \
                .filter(self.Run.order_id == order.id) \
                .all()
            if len(existing_runs) > 0:
                if merge == 'reject':
                    raise ValueError("Duplicate submission for '%s'" %
                                     order.name)
                elif merge == 'replace':
                    for previous_run in existing_runs:
                        logger.info("Duplicate submission for order %r: "
                                    "deleting previous run %r" %
                                    (order, previous_run))

                        # Keep the latest ID so the URL is still valid on replace
                        new_id = previous_run.id

                        session.delete(previous_run)
                else:
                    raise ValueError('Invalid Run mergeStrategy %r' % merge)

        # We'd like ISO8061 timestamps, but will also accept the old format.
        try:
            start_time = aniso8601.parse_datetime(run_data['start_time'])
        except ValueError:
            start_time = datetime.datetime.strptime(run_data['start_time'],
                                                    "%Y-%m-%d %H:%M:%S")
        run_parameters.pop('start_time')

        try:
            end_time = aniso8601.parse_datetime(run_data['end_time'])
        except ValueError:
            end_time = datetime.datetime.strptime(run_data['end_time'],
                                                  "%Y-%m-%d %H:%M:%S")
        run_parameters.pop('end_time')

        run = self.Run(new_id, machine, order, start_time, end_time)

        # First, extract all of the specified run fields.
        for item in self.run_fields:
            value = run_parameters.pop(item.name, None)
            run.set_field(item, value)

        # Any remaining parameters are saved as a JSON encoded array.
        run.parameters = run_parameters
        session.add(run)
        return run
Exemple #53
0
    def _parse_lit_output(self, path, data, cmake_vars, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash',
            'link_time': 'compile',
            'size.__text': 'code_size',
            'mem_bytes': 'mem',
            'link_mem_bytes': 'mem'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str,
            'link_time': float,
            'size.__text': float,
            'mem_bytes': float,
            'link_mem_bytes': float
        }

        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []
        no_errors = True

        for test_data in data['tests']:
            code = test_data['code']
            raw_name = test_data['name']

            split_name = raw_name.split(' :: ', 1)
            if len(split_name) > 1:
                name = split_name[1]
            else:
                name = split_name[0]

            if name.endswith('.test'):
                name = name[:-5]
            name = 'nts.' + name

            # If --single-result is given, exit based on
            # --single-result-predicate
            is_pass = self._is_pass_code(code)
            if self.opts.single_result and \
               raw_name == self.opts.single_result + '.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k, v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k, v in sorted(test_data['metrics'].items()):
                    if k == 'profile':
                        profiles_to_import.append((name, v))
                        continue

                    if k not in LIT_METRIC_TO_LNT or \
                       LIT_METRIC_TO_LNT[k] in ignore:
                        continue
                    server_name = name + '.' + LIT_METRIC_TO_LNT[k]

                    if k == 'link_time' or k == 'link_mem_bytes':
                        # Move link time into a second benchmark's
                        # compile-time.
                        server_name = name + '-link.' + LIT_METRIC_TO_LNT[k]

                    test_samples.append(
                        lnt.testing.TestSamples(server_name, [v], test_info,
                                                LIT_METRIC_CONV_FN[k]))

            if code == 'NOEXE':
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL], test_info))
                no_errors = False

            elif not is_pass:
                lnt_code = self._get_lnt_code(test_data['code'])
                test_samples.append(
                    lnt.testing.TestSamples(name + '.exec.status', [lnt_code],
                                            test_info))
                no_errors = False

        # Now import the profiles in parallel.
        if profiles_to_import:
            logger.info('Importing %d profiles with %d threads...' %
                        (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend(
                    [sample for sample in samples if sample is not None])
            except multiprocessing.TimeoutError:
                logger.warning('Profiles had not completed importing after ' +
                               '%s seconds.' % TIMEOUT)
                logger.info('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {
            'tag': 'nts',
            'no_errors': no_errors,
        }
        run_info.update(self._get_cc_info(cmake_vars))
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order

        machine_info = {}

        machine = lnt.testing.Machine(self.opts.label, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
Exemple #54
0
    def _getOrCreateRun(self, session, run_data, machine, merge):
        """
        _getOrCreateRun(session, run_data, machine, merge) -> Run, bool

        Add a new Run record from the given data (as recorded by the test
        interchange format).

        merge comes into play when there is already a run with the same order
        fields:
        - 'reject': Reject submission (raise ValueError).
        - 'replace': Remove the existing submission(s), then add the new one.
        - 'append': Add new submission.

        The boolean result indicates whether the returned record was
        constructed or not.
        """

        # Extra the run parameters that define the order.
        run_parameters = run_data.copy()
        # Ignore incoming ids; we will create our own
        run_parameters.pop('id', None)

        # Added by REST API, we will replace as well.
        run_parameters.pop('order_by', None)
        run_parameters.pop('order_id', None)
        run_parameters.pop('machine_id', None)
        run_parameters.pop('imported_from', None)
        run_parameters.pop('simple_run_id', None)

        # Find the order record.
        order = self._getOrCreateOrder(session, run_parameters)
        new_id = None

        if merge != 'append':
            existing_runs = session.query(self.Run) \
                .filter(self.Run.machine_id == machine.id) \
                .filter(self.Run.order_id == order.id) \
                .all()
            if len(existing_runs) > 0:
                if merge == 'reject':
                    raise ValueError("Duplicate submission for '%s'" %
                                     order.name)
                elif merge == 'replace':
                    for previous_run in existing_runs:
                        logger.info("Duplicate submission for order %r: "
                                    "deleting previous run %r" %
                                    (order, previous_run))

                        # Keep the latest ID so the URL is still valid on replace
                        new_id = previous_run.id

                        session.delete(previous_run)
                else:
                    raise ValueError('Invalid Run mergeStrategy %r' % merge)

        # We'd like ISO8061 timestamps, but will also accept the old format.
        try:
            start_time = aniso8601.parse_datetime(run_data['start_time'])
        except ValueError:
            start_time = datetime.datetime.strptime(run_data['start_time'],
                                                    "%Y-%m-%d %H:%M:%S")
        run_parameters.pop('start_time')

        try:
            end_time = aniso8601.parse_datetime(run_data['end_time'])
        except ValueError:
            end_time = datetime.datetime.strptime(run_data['end_time'],
                                                  "%Y-%m-%d %H:%M:%S")
        run_parameters.pop('end_time')

        run = self.Run(new_id, machine, order, start_time, end_time)

        # First, extract all of the specified run fields.
        for item in self.run_fields:
            value = run_parameters.pop(item.name, None)
            run.set_field(item, value)

        # Any remaining parameters are saved as a JSON encoded array.
        run.parameters = run_parameters
        session.add(run)
        return run
Exemple #55
0
    def _configure(self, path, extra_cmake_defs=[], execute=True):
        cmake_cmd = self.opts.cmake

        defs = {}
        if self.opts.cc:
            defs['CMAKE_C_COMPILER'] = self.opts.cc
        if self.opts.cxx:
            defs['CMAKE_CXX_COMPILER'] = self.opts.cxx

        cmake_build_types = ('DEBUG', 'MINSIZEREL', 'RELEASE',
                             'RELWITHDEBINFO')
        if self.opts.cppflags or self.opts.cflags:
            all_cflags = ' '.join([self.opts.cppflags, self.opts.cflags])
            defs['CMAKE_C_FLAGS'] = self._unix_quote_args(all_cflags)
            # Ensure that no flags get added based on build type when the user
            # explicitly specifies flags to use.
            for build_type in cmake_build_types:
                defs['CMAKE_C_FLAGS_'+build_type] = ""

        if self.opts.cppflags or self.opts.cxxflags:
            all_cxx_flags = ' '.join([self.opts.cppflags, self.opts.cxxflags])
            defs['CMAKE_CXX_FLAGS'] = self._unix_quote_args(all_cxx_flags)
            # Ensure that no flags get added based on build type when the user
            # explicitly specifies flags to use.
            for build_type in cmake_build_types:
                defs['CMAKE_CXX_FLAGS_'+build_type] = ""

        if self.opts.run_under:
            defs['TEST_SUITE_RUN_UNDER'] = \
                self._unix_quote_args(self.opts.run_under)
        if self.opts.benchmarking_only:
            defs['TEST_SUITE_BENCHMARKING_ONLY'] = 'ON'
        if self.opts.only_compile:
            defs['TEST_SUITE_RUN_BENCHMARKS'] = 'Off'
        if self.opts.use_perf in ('time', 'all'):
            defs['TEST_SUITE_USE_PERF'] = 'ON'
        if self.opts.test_suite_externals:
            defs['TEST_SUITE_EXTERNALS_DIR'] = self.opts.test_suite_externals
        if self.opts.pgo and self.trained:
            defs['TEST_SUITE_PROFILE_USE'] = "On"
            defs['TEST_SUITE_PROFILE_GENERATE'] = "Off"
            if 'TEST_SUITE_RUN_TYPE' not in defs:
                defs['TEST_SUITE_RUN_TYPE'] = 'ref'

        for item in tuple(self.opts.cmake_defines) + tuple(extra_cmake_defs):
            k, v = item.split('=', 1)
            # make sure the overriding of the settings above also works
            # when the cmake-define-defined variable has a datatype
            # specified.
            key_no_datatype = k.split(':', 1)[0]
            if key_no_datatype in defs:
                del defs[key_no_datatype]
            defs[k] = v

        # We use 'cmake -LAH -N' later to find out the value of the
        # CMAKE_C_COMPILER and CMAKE_CXX_COMPILER variables.
        # 'cmake -LAH -N' will only return variables in the cache that have
        # a cmake type set. Therefore, explicitly set a 'FILEPATH' type on
        # these variables here, if they were untyped so far.
        if 'CMAKE_C_COMPILER' in defs:
            defs['CMAKE_C_COMPILER:FILEPATH'] = defs['CMAKE_C_COMPILER']
            del defs['CMAKE_C_COMPILER']
        if 'CMAKE_CXX_COMPILER' in defs:
            defs['CMAKE_CXX_COMPILER:FILEPATH'] = defs['CMAKE_CXX_COMPILER']
            del defs['CMAKE_CXX_COMPILER']

        lines = ['Configuring with {']
        for k, v in sorted(defs.items()):
            lines.append("  %s: '%s'" % (k, v))
        lines.append('}')

        if 'TEST_SUITE_REMOTE_HOST' in defs:
            self.remote_run = True

        # Prepare cmake cache if requested:
        cmake_flags = []
        for cache in self.opts.cmake_cache:
            if cache == "":
                continue
            # Shortcut for the common case.
            if not cache.endswith(".cmake") and "/" not in cache:
                cache = os.path.join(self._test_suite_dir(),
                                     "cmake/caches", cache + ".cmake")
            cache = os.path.abspath(cache)
            if not os.path.exists(cache):
                fatal("Could not find CMake cache file: " + cache)
            cmake_flags += ['-C', cache]

        for l in lines:
            logger.info(l)

        # Define compilers before specifying the cache files.
        early_defs = {}
        for key in ['CMAKE_C_COMPILER:FILEPATH',
                    'CMAKE_CXX_COMPILER:FILEPATH']:
            value = defs.pop(key, None)
            if value is not None:
                early_defs[key] = value

        cmake_cmd = ([cmake_cmd] +
                     ['-D%s=%s' % (k, v) for k, v in early_defs.items()] +
                     cmake_flags + [self._test_suite_dir()] +
                     ['-D%s=%s' % (k, v) for k, v in defs.items()])
        if execute:
            self._check_call(cmake_cmd, cwd=path)

        return cmake_cmd