Ejemplo n.º 1
0
    def deserialize(f, nm='nm', objdump='objdump', propagateExceptions=False):
        f = f.name

        if os.path.getsize(f) == 0:
            # Empty file - exit early.
            return None

        try:
            data = cPerf.importPerf(f, nm, objdump)

            # Go through the data and convert counter values to percentages.
            for f in data['functions'].values():
                fc = f['counters']
                for l in f['data']:
                    for k, v in l[0].items():
                        l[0][k] = 100.0 * float(v) / fc[k]
                for k, v in fc.items():
                    fc[k] = 100.0 * v / data['counters'][k]

            return ProfileV1(data)

        except Exception:
            if propagateExceptions:
                raise
            logger.warning(traceback.format_exc())
            return None
Ejemplo n.º 2
0
    def deserialize(f, nm='nm', objdump='objdump', propagateExceptions=False):
        f = f.name

        if os.path.getsize(f) == 0:
            # Empty file - exit early.
            return None

        try:
            data = cPerf.importPerf(f, nm, objdump)

            # Go through the data and convert counter values to percentages.
            for f in data['functions'].values():
                fc = f['counters']
                for l in f['data']:
                    for k, v in l[0].items():
                        l[0][k] = 100.0 * float(v) / fc[k]
                for k, v in fc.items():
                    fc[k] = 100.0 * v / data['counters'][k]

            return ProfileV1(data)

        except Exception:
            if propagateExceptions:
                raise
            logger.warning(traceback.format_exc())
            return None
Ejemplo n.º 3
0
def get_source_version(path):
    """get_source_version(path) -> str or None

    Given the path to a revision controlled source tree, return a revision
    number, hash, etc. which identifies the source version.
    """

    if os.path.exists(os.path.join(path, ".svn")):
        return commands.capture(
            ['/bin/sh', '-c', 'cd "%s" && svnversion' % path]).strip()
    elif os.path.exists(os.path.join(path, ".git", "svn")):
        # git-svn is pitifully slow, extract the revision manually.
        res = commands.capture(
            ['/bin/sh', '-c', ('cd "%s" && '
                               'git log -1') % path]).strip()
        last_line = res.split("\n")[-1]
        m = _git_svn_id_re.match(last_line)
        if not m:
            logger.warning("unable to understand git svn log: %r" % res)
            return
        return m.group(1)
    elif os.path.exists(os.path.join(path, ".git")):
        return commands.capture([
            '/bin/sh', '-c', ('cd "%s" && '
                              'git log -1 --pretty=format:%%H') % path
        ]).strip()
Ejemplo n.º 4
0
def _start_browser(url, debug=False):
    from lnt.util import logger

    def url_is_up(url):
        try:
            o = urllib.urlopen(url)
        except IOError:
            return False
        o.close()
        return True

    # Wait for server to start...
    if debug:
        logger.info('waiting for server to start...')
    for i in range(10000):
        if url_is_up(url):
            break
        if debug:
            sys.stderr.write('.')
            sys.stderr.flush()
        time.sleep(.01)
    else:
        logger.warning('unable to detect that server started')

    if debug:
        logger.info('opening webbrowser...')
    webbrowser.open(url)
Ejemplo n.º 5
0
def _start_browser(url, debug=False):
    from lnt.util import logger

    def url_is_up(url):
        try:
            o = urllib.urlopen(url)
        except IOError:
            return False
        o.close()
        return True

    # Wait for server to start...
    if debug:
        logger.info('waiting for server to start...')
    for i in range(10000):
        if url_is_up(url):
            break
        if debug:
            sys.stderr.write('.')
            sys.stderr.flush()
        time.sleep(.01)
    else:
        logger.warning('unable to detect that server started')

    if debug:
        logger.info('opening webbrowser...')
    webbrowser.open(url)
Ejemplo n.º 6
0
    def post():
        """Add a new run into the lnt database"""
        session = request.session
        db = request.get_db()
        data = request.data
        select_machine = request.values.get('select_machine', 'match')
        merge = request.values.get('merge', None)
        result = lnt.util.ImportData.import_from_string(
            current_app.old_config, g.db_name, db, session, g.testsuite_name,
            data, select_machine=select_machine, merge_run=merge)

        error = result['error']
        if error is not None:
            response = jsonify(result)
            response.status = '400'
            logger.warning("%s: Submission rejected: %s" %
                           (request.url, error))
            return response

        new_url = ('%sapi/db_%s/v4/%s/runs/%s' %
                   (request.url_root, g.db_name, g.testsuite_name,
                    result['run_id']))
        result['result_url'] = new_url
        response = jsonify(result)
        response.status = '301'
        response.headers.add('Location', new_url)
        return response
Ejemplo n.º 7
0
    def post():
        """Add a new run into the lnt database"""
        session = request.session
        db = request.get_db()
        data = request.data
        select_machine = request.values.get('select_machine', 'match')
        merge = request.values.get('merge', None)
        result = lnt.util.ImportData.import_from_string(
            current_app.old_config,
            g.db_name,
            db,
            session,
            g.testsuite_name,
            data,
            select_machine=select_machine,
            merge_run=merge)

        error = result['error']
        if error is not None:
            response = jsonify(result)
            response.status = '400'
            logger.warning("%s: Submission rejected: %s" %
                           (request.url, error))
            return response

        new_url = (
            '%sapi/db_%s/v4/%s/runs/%s' %
            (request.url_root, g.db_name, g.testsuite_name, result['run_id']))
        result['result_url'] = new_url
        response = jsonify(result)
        response.status = '301'
        response.headers.add('Location', new_url)
        return response
Ejemplo n.º 8
0
def load_rules():
    """
    Load available rules scripts from a directory.

    Rules are organized as:

    <current dir>/rules/
    <current dir>/rules/rule_.*.py
    ...
    """

    rule_script_rex = re.compile(r'^rule_(.*)\.py$')
    rule_scripts = {}

    rules_path = os.path.join(os.path.dirname(__file__), 'rules')
    for item in os.listdir(rules_path):
        # Ignore certain known non-scripts.
        if item in ('README.txt', '__init__.py') or item.endswith('.pyc'):
            continue

        # Ignore non-matching files.
        m = rule_script_rex.match(item)
        if m is None:
            logger.warning("ignoring item {} in rule  directory: {}".format(
                item, rules_path))
            continue

        name = m.groups()[0]
        # Allow rules to be disabled by name
        if name.endswith("disabled"):
            continue

        rule_scripts[name] = os.path.join(rules_path, item)

    return rule_scripts
Ejemplo n.º 9
0
def _mk_index_on(engine, ts_name):
    fc_table = introspect_table(engine, "{}_FieldChangeV2".format(ts_name))

    fast_fc_lookup = Index('idx_fast_fieldchange_lookup', fc_table.c.StartOrderID)
    try:
        fast_fc_lookup.create(engine)
    except (sqlalchemy.exc.OperationalError, sqlalchemy.exc.ProgrammingError) as e:
        logger.warning("Skipping index creation on {}, because of {}".format(fc_table.name, e.message))
Ejemplo n.º 10
0
def _mk_index_on(engine, ts_name):
    fc_table = introspect_table(engine, "{}_RegressionIndicator".format(ts_name))

    fast_fc_lookup = Index('idx_fast_ri_lookup', fc_table.c.RegressionID)
    try:
        fast_fc_lookup.create(engine)
    except (sqlalchemy.exc.OperationalError, sqlalchemy.exc.ProgrammingError) as e:
        logger.warning("Skipping index creation on {}, because of {}".format(fc_table.name, e.message))
Ejemplo n.º 11
0
def _mk_index_on(engine, ts_name):
    fc_table = introspect_table(engine, "{}_RegressionIndicator".format(ts_name))

    fast_fc_lookup = Index('idx_fast_ri_lookup', fc_table.c.RegressionID)
    try:
        fast_fc_lookup.create(engine)
    except (sqlalchemy.exc.OperationalError, sqlalchemy.exc.ProgrammingError) as e:
        logger.warning("Skipping index creation on {}, because of {}".format(fc_table.name, e.message))
Ejemplo n.º 12
0
 def close(self):
     t = self.elapsed_time()
     if t > 10:
         logger.warning("Request {} took {}s".format(self.url, t))
     db = getattr(self, 'db', None)
     if db is not None:
         db.close()
     return super(Request, self).close()
Ejemplo n.º 13
0
Archivo: app.py Proyecto: quangvd3/lnt
 def close(self):
     t = self.elapsed_time()
     if t > 10:
         logger.warning("Request {} took {}s".format(self.url, t))
     db = getattr(self, 'db', None)
     if db is not None:
         db.close()
     return super(Request, self).close()
Ejemplo n.º 14
0
 def timed(*args, **kw):
     t_start = time.time()
     result = func(*args, **kw)
     t_end = time.time()
     delta = t_end - t_start
     msg = 'timer: %r %2.2f sec' % (func.__name__, delta)
     if delta > 10:
         logger.warning(msg)
     else:
         logger.info(msg)
     return result
Ejemplo n.º 15
0
def _mk_index_on(engine, ts_name):
    fc_table = introspect_table(engine, "{}_FieldChangeV2".format(ts_name))

    fast_fc_lookup = Index('idx_fast_fieldchange_lookup',
                           fc_table.c.StartOrderID)
    try:
        fast_fc_lookup.create(engine)
    except (sqlalchemy.exc.OperationalError,
            sqlalchemy.exc.ProgrammingError) as e:
        logger.warning("Skipping index creation on {}, because of {}".format(
            fc_table.name, e.message))
Ejemplo n.º 16
0
 def timed(*args, **kw):
     t_start = time.time()
     result = func(*args, **kw)
     t_end = time.time()
     delta = t_end - t_start
     msg = 'timer: %r %2.2f sec' % (func.__name__, delta)
     if delta > 10:
         logger.warning(msg)
     else:
         logger.info(msg)
     return result
Ejemplo n.º 17
0
 def timed(*args, **kw):
     t_start = time.time()
     result = func(*args, **kw)
     t_end = time.time()
     short_args = repr(args)
     if len(short_args) > 80:
         short_args = short_args[0:80]
     delta = t_end - t_start
     msg = '%r (%s, %r) %2.2f sec' % (func.__name__, short_args, kw, delta)
     if delta > 10:
         logger.warning(msg)
     else:
         logger.info(msg)
     return result
Ejemplo n.º 18
0
    def _lit(self, path, test, profile):
        lit_cmd = self.opts.lit

        output_json_path = tempfile.NamedTemporaryFile(prefix='output',
                                                       suffix='.json',
                                                       dir=path,
                                                       delete=False)
        output_json_path.close()

        subdir = path
        if self.opts.only_test:
            components = [path] + [self.opts.only_test[0]]
            subdir = os.path.join(*components)

        extra_args = []
        if not test:
            extra_args = ['--no-execute']

        nr_threads = self._test_threads()
        if profile:
            if nr_threads != 1:
                logger.warning('Gathering profiles with perf requires -j 1 ' +
                               'as perf record cannot be run multiple times ' +
                               'simultaneously. Overriding -j %s to -j 1' %
                               nr_threads)
                nr_threads = 1
            extra_args += ['--param', 'profile=perf']
            if self.opts.perf_events:
                extra_args += [
                    '--param',
                    'perf_profile_events=%s' % self.opts.perf_events
                ]

        logger.info('Testing...')
        try:
            self._check_call([
                lit_cmd, '-v', '-j',
                str(nr_threads), subdir, '-o', output_json_path.name
            ] + extra_args)
        except subprocess.CalledProcessError:
            # LIT is expected to exit with code 1 if there were test
            # failures!
            pass
        try:
            return json.loads(open(output_json_path.name).read())
        except ValueError as e:
            fatal("Running test-suite did not create valid json report "
                  "in {}: {}".format(output_json_path.name, e.message))
Ejemplo n.º 19
0
    def _lit(self, path, test, profile):
        lit_cmd = self.opts.lit

        output_json_path = tempfile.NamedTemporaryFile(prefix='output',
                                                       suffix='.json',
                                                       dir=path,
                                                       delete=False)
        output_json_path.close()

        subdir = path
        if self.opts.only_test:
            components = [path] + [self.opts.only_test[0]]
            subdir = os.path.join(*components)

        extra_args = []
        if not test:
            extra_args = ['--no-execute']

        nr_threads = self._test_threads()
        if profile:
            if nr_threads != 1:
                logger.warning('Gathering profiles with perf requires -j 1 ' +
                               'as perf record cannot be run multiple times ' +
                               'simultaneously. Overriding -j %s to -j 1' %
                               nr_threads)
                nr_threads = 1
            extra_args += ['--param', 'profile=perf']
            if self.opts.perf_events:
                extra_args += ['--param',
                               'perf_profile_events=%s' %
                               self.opts.perf_events]

        logger.info('Testing...')
        try:
            self._check_call([lit_cmd,
                              '-v',
                              '-j', str(nr_threads),
                              subdir,
                              '-o', output_json_path.name] + extra_args)
        except subprocess.CalledProcessError:
            # LIT is expected to exit with code 1 if there were test
            # failures!
            pass
        try:
            return json.loads(open(output_json_path.name).read())
        except ValueError as e:
            fatal("Running test-suite did not create valid json report "
                  "in {}: {}".format(output_json_path.name, e.message))
Ejemplo n.º 20
0
def _importProfile(name_filename):
    """_importProfile imports a single profile. It must be at the top level
    (and not within TestSuiteTest) so that multiprocessing can import it
    correctly."""
    name, filename = name_filename

    if not os.path.exists(filename):
        logger.warning('Profile %s does not exist' % filename)
        return None

    pf = lnt.testing.profile.profile.Profile.fromFile(filename)
    if not pf:
        return None

    pf.upgrade()
    profilefile = pf.render()
    return lnt.testing.TestSamples(name + '.profile', [profilefile], {}, str)
Ejemplo n.º 21
0
def _importProfile(name_filename):
    """_importProfile imports a single profile. It must be at the top level
    (and not within TestSuiteTest) so that multiprocessing can import it
    correctly."""
    name, filename = name_filename

    if not os.path.exists(filename):
        logger.warning('Profile %s does not exist' % filename)
        return None

    pf = lnt.testing.profile.profile.Profile.fromFile(filename)
    if not pf:
        return None

    pf.upgrade()
    profilefile = pf.render()
    return lnt.testing.TestSamples(name + '.profile',
                                   [profilefile],
                                   {},
                                   str)
Ejemplo n.º 22
0
def regenerate_fieldchanges_for_run(session, ts, run_id):
    """Regenerate the set of FieldChange objects for the given run.
    """
    # Allow for potentially a few different runs, previous_runs, next_runs
    # all with the same order_id which we will aggregate together to make
    # our comparison result.
    logger.info("Regenerate fieldchanges for %s run %s" % (ts, run_id))
    run = ts.getRun(session, run_id)
    runs = session.query(ts.Run). \
        filter(ts.Run.order_id == run.order_id). \
        filter(ts.Run.machine_id == run.machine_id). \
        all()

    previous_runs = ts.get_previous_runs_on_machine(session, run,
                                                    FIELD_CHANGE_LOOKBACK)
    next_runs = ts.get_next_runs_on_machine(session, run,
                                            FIELD_CHANGE_LOOKBACK)

    # Find our start/end order.
    if previous_runs != []:
        start_order = previous_runs[0].order
    else:
        start_order = run.order
    if next_runs != []:
        end_order = next_runs[-1].order
    else:
        end_order = run.order

    # Load our run data for the creation of the new fieldchanges.
    runs_to_load = [r.id for r in (runs + previous_runs)]

    # When the same rev is submitted many times, the database accesses here
    # can be huge, and it is almost always an error to have the same rev
    # be used in so many runs.
    run_size = len(runs_to_load)
    if run_size > 50:
        logger.warning("Generating field changes for {} runs."
                       "That will be very slow.".format(run_size))
    runinfo = lnt.server.reporting.analysis.RunInfo(session, ts, runs_to_load)

    # Only store fieldchanges for "metric" samples like execution time;
    # not for fields with other data, e.g. hash of a binary
    for field in list(ts.Sample.get_metric_fields()):
        for test_id in runinfo.test_ids:
            f = None
            result = runinfo.get_comparison_result(
                runs, previous_runs, test_id, field,
                ts.Sample.get_hash_of_binary_field())
            # Try and find a matching FC and update, else create one.
            try:
                f = session.query(ts.FieldChange) \
                    .filter(ts.FieldChange.start_order == start_order) \
                    .filter(ts.FieldChange.end_order == end_order) \
                    .filter(ts.FieldChange.test_id == test_id) \
                    .filter(ts.FieldChange.machine == run.machine) \
                    .filter(ts.FieldChange.field_id == field.id) \
                    .one()
            except sqlalchemy.orm.exc.NoResultFound:
                f = None

            if not result.is_result_performance_change() and f:
                # With more data, its not a regression. Kill it!
                logger.info("Removing field change: {}".format(f.id))
                deleted = delete_fieldchange(session, ts, f)
                continue

            if result.is_result_performance_change() and not f:
                test = session.query(ts.Test) \
                    .filter(ts.Test.id == test_id) \
                    .one()
                f = ts.FieldChange(start_order=start_order,
                                   end_order=run.order,
                                   machine=run.machine,
                                   test=test,
                                   field_id=field.id)
                session.add(f)
                try:
                    found, new_reg = identify_related_changes(session, ts, f)
                except ObjectDeletedError:
                    # This can happen from time to time.
                    # So, lets retry once.
                    found, new_reg = identify_related_changes(session, ts, f)

                if found:
                    logger.info("Found field change: {}".format(run.machine))

            # Always update FCs with new values.
            if f:
                f.old_value = result.previous
                f.new_value = result.current
                f.run = run
    session.commit()

    regressions = session.query(ts.Regression).all()[::-1]
    rules.post_submission_hooks(session, ts, run_id)
Ejemplo n.º 23
0
    def fromfile(file, path):
        # I love valgrind, but this is really a horribly lame data format. Oh
        # well.

        it = iter(file)

        # Read the header.
        description_lines = []
        command = None
        events = None
        positions = initial_positions = ['line']
        for ln in it:
            # If there is no colon in the line, we have reached the end of the
            # header.
            if ':' not in ln:
                break

            key, value = ln.split(':', 1)
            if key == 'desc':
                description_lines.append(value.strip())
            elif key == 'cmd':
                if command is not None:
                    logger.warning("unexpected multiple 'cmd' keys in %r" %
                                   (path,))
                command = value.strip()
            elif key == 'events':
                if events is not None:
                    logger.warning("unexpected multiple 'events' keys in %r" %
                                   (path,))
                events = value.split()
            elif key == 'positions':
                if positions is not initial_positions:
                    logger.warning(
                        "unexpected multiple 'positions' keys in %r" %
                        (path,))
                positions = value.split()
            else:
                logger.warning("found unknown key %r in %r" % (key, path))

        # Validate that required fields were present.
        if events is None:
            raise CalltreeParseError("missing required 'events' key in header")

        # Construct an instance.
        data = CalltreeData(events, "\n".join(description_lines), command)

        # Read the file data.
        num_samples = len(positions) + len(events)
        current_file = None
        current_function = None
        summary_samples = None
        for ln in it:
            # Check if this is the closing summary line.
            if ln.startswith('summary'):
                key, value = ln.split(':', 1)
                summary_samples = map(int, value.split())
                break

            # Check if this is an update to the current file or function.
            if ln.startswith('fl='):
                current_file = ln[3:-1]
            elif ln.startswith('fn='):
                current_function = ln[3:-1]
            else:
                # Otherwise, this is a data record.
                samples = map(int, ln.split())
                if len(samples) != num_samples:
                    raise CalltreeParseError(
                        "invalid record line, unexpected sample count")
                data.records.append((current_file,
                                     current_function,
                                     samples))

        # Validate that there are no more remaining records.
        for ln in it:
            raise CalltreeParseError("unexpected line in footer: %r" % (ln,))

        # Validate that the summary line was present.
        if summary_samples is None:
            raise CalltreeParseError(
                "missing required 'summary' key in footer")

        data.summary = summary_samples

        return data
Ejemplo n.º 24
0
    def get_comparison_result(self, runs, compare_runs, test_id, field,
                              hash_of_binary_field):
        # Get the field which indicates the requested field's status.
        status_field = field.status_field

        # Load the sample data for the current and previous runs and the
        # comparison window.
        run_samples = self.get_samples(runs, test_id)
        prev_samples = self.get_samples(compare_runs, test_id)

        cur_profile = prev_profile = None
        if runs:
            cur_profile = self.profile_map.get((runs[0].id, test_id), None)
        if compare_runs:
            prev_profile = self.profile_map.get((compare_runs[0].id, test_id),
                                                None)

        # Determine whether this (test,pset) passed or failed in the current
        # and previous runs.
        #
        # FIXME: Support XFAILs and non-determinism (mixed fail and pass)
        # better.
        run_failed = prev_failed = False
        if status_field:
            status_field_index = self.testsuite.get_field_index(status_field)
            for sample in run_samples:
                run_failed |= sample[status_field_index] == FAIL
            for sample in prev_samples:
                prev_failed |= sample[status_field_index] == FAIL

        field_index = self.testsuite.get_field_index(field)

        # Get the current and previous values.
        run_values = [s[field_index] for s in run_samples
                      if s[field_index] is not None]
        prev_values = [s[field_index] for s in prev_samples
                       if s[field_index] is not None]
        if hash_of_binary_field:
            hash_of_binary_field_index = \
                    self.testsuite.get_field_index(hash_of_binary_field)
            hash_values = [s[hash_of_binary_field_index] for s in run_samples
                           if s[hash_of_binary_field_index] is not None]
            prev_hash_values = [s[hash_of_binary_field_index]
                                for s in prev_samples
                                if s[field_index] is not None]

            # All current hash_values should all be the same.
            # Warn in the log when the hash wasn't the same for all samples.
            cur_hash_set = set(hash_values)
            if len(cur_hash_set) > 1:
                logger.warning("Found different hashes for multiple samples "
                               "in the same run {0}: {1}\nTestID:{2}"
                               .format(runs, hash_values, test_id))

            cur_hash = hash_values[0] if len(hash_values) > 0 else None
            prev_hash = prev_hash_values[0] \
                if len(prev_hash_values) > 0 else None
        else:
            cur_hash = None
            prev_hash = None
        r = ComparisonResult(self.aggregation_fn,
                             run_failed, prev_failed, run_values,
                             prev_values, cur_hash, prev_hash,
                             cur_profile, prev_profile,
                             self.confidence_lv,
                             bigger_is_better=field.bigger_is_better)
        return r
Ejemplo n.º 25
0
    def _parse_lit_output(self, path, data, cmake_vars, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash',
            'link_time': 'compile',
            'size.__text': 'code_size',
            'mem_bytes': 'mem',
            'link_mem_bytes': 'mem'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str,
            'link_time': float,
            'size.__text': float,
            'mem_bytes': float,
            'link_mem_bytes': float
        }

        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []
        no_errors = True

        for test_data in data['tests']:
            code = test_data['code']
            raw_name = test_data['name']

            split_name = raw_name.split(' :: ', 1)
            if len(split_name) > 1:
                name = split_name[1]
            else:
                name = split_name[0]

            if name.endswith('.test'):
                name = name[:-5]
            name = 'nts.' + name

            # If --single-result is given, exit based on
            # --single-result-predicate
            is_pass = self._is_pass_code(code)
            if self.opts.single_result and \
               raw_name == self.opts.single_result + '.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k, v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k, v in sorted(test_data['metrics'].items()):
                    if k == 'profile':
                        profiles_to_import.append((name, v))
                        continue

                    if k not in LIT_METRIC_TO_LNT or \
                       LIT_METRIC_TO_LNT[k] in ignore:
                        continue
                    server_name = name + '.' + LIT_METRIC_TO_LNT[k]

                    if k == 'link_time' or k == 'link_mem_bytes':
                        # Move link time into a second benchmark's
                        # compile-time.
                        server_name = name + '-link.' + LIT_METRIC_TO_LNT[k]

                    test_samples.append(
                        lnt.testing.TestSamples(server_name, [v], test_info,
                                                LIT_METRIC_CONV_FN[k]))

            if code == 'NOEXE':
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL], test_info))
                no_errors = False

            elif not is_pass:
                lnt_code = self._get_lnt_code(test_data['code'])
                test_samples.append(
                    lnt.testing.TestSamples(name + '.exec.status', [lnt_code],
                                            test_info))
                no_errors = False

        # Now import the profiles in parallel.
        if profiles_to_import:
            logger.info('Importing %d profiles with %d threads...' %
                        (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend(
                    [sample for sample in samples if sample is not None])
            except multiprocessing.TimeoutError:
                logger.warning('Profiles had not completed importing after ' +
                               '%s seconds.' % TIMEOUT)
                logger.info('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {
            'tag': 'nts',
            'no_errors': no_errors,
        }
        run_info.update(self._get_cc_info(cmake_vars))
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order

        machine_info = {}

        machine = lnt.testing.Machine(self.opts.label, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
Ejemplo n.º 26
0
    def fromfile(file, path):
        # I love valgrind, but this is really a horribly lame data format. Oh
        # well.

        it = iter(file)

        # Read the header.
        description_lines = []
        command = None
        events = None
        positions = initial_positions = ['line']
        for ln in it:
            # If there is no colon in the line, we have reached the end of the
            # header.
            if ':' not in ln:
                break

            key, value = ln.split(':', 1)
            if key == 'desc':
                description_lines.append(value.strip())
            elif key == 'cmd':
                if command is not None:
                    logger.warning("unexpected multiple 'cmd' keys in %r" %
                                   (path, ))
                command = value.strip()
            elif key == 'events':
                if events is not None:
                    logger.warning("unexpected multiple 'events' keys in %r" %
                                   (path, ))
                events = value.split()
            elif key == 'positions':
                if positions is not initial_positions:
                    logger.warning(
                        "unexpected multiple 'positions' keys in %r" %
                        (path, ))
                positions = value.split()
            else:
                logger.warning("found unknown key %r in %r" % (key, path))

        # Validate that required fields were present.
        if events is None:
            raise CalltreeParseError("missing required 'events' key in header")

        # Construct an instance.
        data = CalltreeData(events, "\n".join(description_lines), command)

        # Read the file data.
        num_samples = len(positions) + len(events)
        current_file = None
        current_function = None
        summary_samples = None
        for ln in it:
            # Check if this is the closing summary line.
            if ln.startswith('summary'):
                key, value = ln.split(':', 1)
                summary_samples = map(int, value.split())
                break

            # Check if this is an update to the current file or function.
            if ln.startswith('fl='):
                current_file = ln[3:-1]
            elif ln.startswith('fn='):
                current_function = ln[3:-1]
            else:
                # Otherwise, this is a data record.
                samples = map(int, ln.split())
                if len(samples) != num_samples:
                    raise CalltreeParseError(
                        "invalid record line, unexpected sample count")
                data.records.append((current_file, current_function, samples))

        # Validate that there are no more remaining records.
        for ln in it:
            raise CalltreeParseError("unexpected line in footer: %r" % (ln, ))

        # Validate that the summary line was present.
        if summary_samples is None:
            raise CalltreeParseError(
                "missing required 'summary' key in footer")

        data.summary = summary_samples

        return data
Ejemplo n.º 27
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagnose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps']

        logger.info(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        logger.info(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_deps = [
            self.opts.make, "VERBOSE=1", "timeit-target", "timeit-host",
            "fpcmp-host"
        ]
        logger.info(" ".join(make_deps))
        p = subprocess.Popen(make_deps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        logger.info(std_out)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        logger.info(" ".join(make_save_temps))
        p = subprocess.Popen(make_save_temps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        logger.info(std_out)
        with open(report_path + "/build.log", 'w') as f:
            f.write(std_out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"),
                    report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = [
            "/*.o", "/*.time", "/*.cmake", "/*.make", "/*.includecache",
            "/*.txt"
        ]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        # Now lets do -ftime-report.
        cmd_time_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-ftime-report']

        logger.info(' '.join(cmd_time_report))

        out = subprocess.check_output(cmd_time_report)
        logger.info(out)

        make_time_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_time_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/time-report.txt", 'w') as f:
            f.write(std_err)
        logger.info("Wrote: " + report_path + "/time-report.txt")

        # Now lets do -llvm -stats.
        cmd_stats_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-mllvm -stats']

        logger.info(' '.join(cmd_stats_report))

        out = subprocess.check_output(cmd_stats_report)
        logger.info(out)

        make_stats_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_stats_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/stats-report.txt", 'w') as f:
            f.write(std_err)
        logger.info("Wrote: " + report_path + "/stats-report.txt")

        #  Collect Profile:
        if "Darwin" in platform.platform():
            # For testing and power users, lets allow overrides of how sudo
            # and iprofiler are called.
            sudo = os.getenv("SUDO_CMD", "sudo")
            if " " in sudo:
                sudo = sudo.split(" ")
            if not sudo:
                sudo = []
            else:
                sudo = [sudo]
            iprofiler = os.getenv("IPROFILER_CMD",
                                  "iprofiler -timeprofiler -I 40u")

            cmd_iprofiler = cmd + ['-DTEST_SUITE_RUN_UNDER=' + iprofiler]
            print(' '.join(cmd_iprofiler))

            out = subprocess.check_output(cmd_iprofiler)

            os.chdir(local_path)
            make_iprofiler_temps = [self.opts.make, "VERBOSE=1", short_name]
            p = subprocess.Popen(make_iprofiler_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            logger.warning("Using sudo to collect execution trace.")
            make_save_temps = sudo + [self.opts.lit, short_name + ".test"]
            p = subprocess.Popen(make_save_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            sys.stdout.write(std_out)
            sys.stderr.write(std_err)
            logger.warning("Tests may fail because of iprofiler's output.")
            # The dtps file will be saved as root, make it so
            # that we can read it.
            chmod = sudo + [
                "chown", "-R",
                getpass.getuser(), short_name + ".dtps"
            ]
            subprocess.call(chmod)
            profile = local_path + "/" + short_name + ".dtps"
            shutil.copytree(profile, report_path + "/" + short_name + ".dtps")
            logger.info(profile + "-->" + report_path)
        else:
            logger.warning("Skipping execution profiling because " +
                           "this is not Darwin.")
        logger.info("Report produced in: " + report_path)

        return lnt.util.ImportData.no_submit()
Ejemplo n.º 28
0
def _load_migrations():
    """
    Load available migration scripts from a directory.

    Migrations are organized as:

    <current dir>/migrations/
    <current dir>/migrations/upgrade_<N>_to_<N+1>.py
    ...
    """

    upgrade_script_rex = re.compile(
        r'^upgrade_(0|[1-9][0-9]*)_to_([1-9][0-9]*)\.py$')
    migrations = {}

    # Currently, we only load migrations for a '__core__' schema, and only from
    # the migrations directory. One idea if we need to eventually support
    # migrations for the per-testsuite tables is to add subdirectories keyed on
    # the testsuite.
    for schema_name in ('__core__', ):
        schema_migrations_path = os.path.join(os.path.dirname(__file__),
                                              'migrations')
        schema_migrations = {}
        for item in os.listdir(schema_migrations_path):
            # Ignore certain known non-scripts.
            if item in ('README.txt', '__init__.py', 'new_suite.py',
                        'util.py') or item.endswith('.pyc'):
                continue

            # Ignore non-matching files.
            m = upgrade_script_rex.match(item)
            if m is None:
                logger.warning(
                    "ignoring item %r in schema migration directory: %r", item,
                    schema_migrations_path)
                continue

            # Check the version numbers for validity.
            version, next_version = map(int, m.groups())
            if next_version != version + 1:
                logger.error(
                    "invalid script name %r in schema migration directory: %r",
                    item, schema_migrations_path)
                continue

            schema_migrations[version] = os.path.join(schema_migrations_path,
                                                      item)

        # Ignore directories with no migrations.
        if not schema_migrations:
            logger.warning("ignoring empty migrations directory: %r",
                           schema_migrations_path)
            continue

        # Check the provided versions for sanity.
        current_version = max(schema_migrations) + 1
        for i in range(current_version):
            if i not in schema_migrations:
                logger.error("schema %r is missing migration for version: %r",
                             schema_name, i)

        # Store the current version as another item in the per-schema migration
        # dictionary.
        schema_migrations['current_version'] = current_version

        # Store the schema migrations.
        migrations[schema_name] = schema_migrations

    return migrations
Ejemplo n.º 29
0
    def get_comparison_result(self, runs, compare_runs, test_id, field,
                              hash_of_binary_field):
        # Get the field which indicates the requested field's status.
        status_field = field.status_field

        # Load the sample data for the current and previous runs and the
        # comparison window.
        run_samples = self.get_samples(runs, test_id)
        prev_samples = self.get_samples(compare_runs, test_id)

        cur_profile = prev_profile = None
        if runs:
            cur_profile = self.profile_map.get((runs[0].id, test_id), None)
        if compare_runs:
            prev_profile = self.profile_map.get((compare_runs[0].id, test_id),
                                                None)

        # Determine whether this (test,pset) passed or failed in the current
        # and previous runs.
        #
        # FIXME: Support XFAILs and non-determinism (mixed fail and pass)
        # better.
        run_failed = prev_failed = False
        if status_field:
            status_field_index = self.testsuite.get_field_index(status_field)
            for sample in run_samples:
                run_failed |= sample[status_field_index] == FAIL
            for sample in prev_samples:
                prev_failed |= sample[status_field_index] == FAIL

        field_index = self.testsuite.get_field_index(field)

        # Get the current and previous values.
        run_values = [
            s[field_index] for s in run_samples if s[field_index] is not None
        ]
        prev_values = [
            s[field_index] for s in prev_samples if s[field_index] is not None
        ]
        if hash_of_binary_field:
            hash_of_binary_field_index = \
                    self.testsuite.get_field_index(hash_of_binary_field)
            hash_values = [
                s[hash_of_binary_field_index] for s in run_samples
                if s[hash_of_binary_field_index] is not None
            ]
            prev_hash_values = [
                s[hash_of_binary_field_index] for s in prev_samples
                if s[field_index] is not None
            ]

            # All current hash_values should all be the same.
            # Warn in the log when the hash wasn't the same for all samples.
            cur_hash_set = set(hash_values)
            if len(cur_hash_set) > 1:
                logger.warning("Found different hashes for multiple samples "
                               "in the same run {0}: {1}\nTestID:{2}".format(
                                   runs, hash_values, test_id))

            cur_hash = hash_values[0] if len(hash_values) > 0 else None
            prev_hash = prev_hash_values[0] \
                if len(prev_hash_values) > 0 else None
        else:
            cur_hash = None
            prev_hash = None
        r = ComparisonResult(self.aggregation_fn,
                             run_failed,
                             prev_failed,
                             run_values,
                             prev_values,
                             cur_hash,
                             prev_hash,
                             cur_profile,
                             prev_profile,
                             self.confidence_lv,
                             bigger_is_better=field.bigger_is_better)
        return r
Ejemplo n.º 30
0
def upgrade_1_to_2(data, ts_name):
    result = dict()

    # Pull version and database schema to toplevel
    result['format_version'] = '2'
    report_version = data['Run']['Info'].pop('__report_version__', '1')
    # We should not be in upgrade_1_to_2 for other versions
    assert (report_version == '1')
    tag = data['Run']['Info'].pop('tag', None)
    if tag is not None and tag != ts_name:
        raise ValueError("Importing '%s' data into '%s' testsuite" %
                         (tag, ts_name))

    upgrade = _upgrades.get(tag)
    if upgrade is None:
        logger.warning("No upgrade schema known for '%s'\n" % tag)
        upgrade = _default_upgrade

    # Flatten Machine.Info into machine
    Machine = data['Machine']
    result_machine = {'name': Machine['Name']}
    for key, value in Machine['Info'].items():
        newname = upgrade.machine_param_rename.get(key, key)
        if newname in result_machine:
            raise ValueError("Name clash for machine info '%s'" % newname)
        result_machine[newname] = value
    result['machine'] = result_machine

    # Flatten Result.Info into result
    Run = data['Run']
    result_run = {}
    start_time = Run.get('Start Time')
    if start_time is not None:
        result_run['start_time'] = start_time
    end_time = Run.get('End Time')
    if end_time is not None:
        result_run['end_time'] = end_time
    for key, value in Run['Info'].items():
        newname = upgrade.run_param_rename.get(key, key)
        if newname in result_run:
            raise ValueError("Name clash for run info '%s'" % newname)
        result_run[newname] = value
    result['run'] = result_run

    # Merge tests
    result_tests = list()
    result_tests_dict = dict()
    Tests = data['Tests']
    for test in Tests:
        test_Name = test['Name']

        # Old testnames always started with 'tag.', split that part.
        if len(test['Info']) != 0:
            # The Info record didn't work with the v4 database anyway...
            raise ValueError("Tests/%s: cannot convert non-empty Info record" %
                             test_Name)
        tag_dot = '%s.' % ts_name
        if not test_Name.startswith(tag_dot):
            raise ValueError("Tests/%s: test name does not start with '%s'" %
                             (test_Name, tag_dot))
        name_metric = test_Name[len(tag_dot):]

        found_metric = False
        for oldname, newname in upgrade.metric_rename.items():
            assert (oldname.startswith('.'))
            if name_metric.endswith(oldname):
                name = name_metric[:-len(oldname)]
                metric = newname
                found_metric = True
                break
        if not found_metric:
            # Fallback logic for unknown metrics: Assume they are '.xxxx'
            name, dot, metric = name_metric.rpartition('.')
            if dot != '.':
                raise ValueError("Tests/%s: name does not end in .metric" %
                                 test_Name)
            logger.warning("Found unknown metric '%s'" % metric)
            upgrade.metric_rename['.' + metric] = metric

        result_test = result_tests_dict.get(name)
        if result_test is None:
            result_test = {'name': name}
            result_tests_dict[name] = result_test
            result_tests.append(result_test)

        data = test['Data']
        if metric not in result_test:
            # Do not construct a list for the very common case of just a
            # single datum.
            if len(data) == 1:
                data = data[0]
            result_test[metric] = data
        elif len(data) > 0:
            # Transform the test data into a list
            if not isinstance(result_test[metric], list):
                result_test[metric] = [result_test[metric]]
            result_test[metric] += data

    result['tests'] = result_tests
    return result
Ejemplo n.º 31
0
def upgrade_1_to_2(data, ts_name):
    result = dict()

    # Pull version and database schema to toplevel
    result['format_version'] = '2'
    report_version = data['Run']['Info'].pop('__report_version__', '1')
    # We should not be in upgrade_1_to_2 for other versions
    assert(report_version == '1')
    tag = data['Run']['Info'].pop('tag', None)
    if tag is not None and tag != ts_name:
        raise ValueError("Importing '%s' data into '%s' testsuite" %
                         (tag, ts_name))

    upgrade = _upgrades.get(tag)
    if upgrade is None:
        logger.warning("No upgrade schema known for '%s'\n" % tag)
        upgrade = _default_upgrade

    # Flatten Machine.Info into machine
    Machine = data['Machine']
    result_machine = {'name': Machine['Name']}
    for key, value in Machine['Info'].items():
        newname = upgrade.machine_param_rename.get(key, key)
        if newname in result_machine:
            raise ValueError("Name clash for machine info '%s'" % newname)
        result_machine[newname] = value
    result['machine'] = result_machine

    # Flatten Result.Info into result
    Run = data['Run']
    result_run = {}
    start_time = Run.get('Start Time')
    if start_time is not None:
        result_run['start_time'] = start_time
    end_time = Run.get('End Time')
    if end_time is not None:
        result_run['end_time'] = end_time
    for key, value in Run['Info'].items():
        newname = upgrade.run_param_rename.get(key, key)
        if newname in result_run:
            raise ValueError("Name clash for run info '%s'" % newname)
        result_run[newname] = value
    result['run'] = result_run

    # Merge tests
    result_tests = list()
    result_tests_dict = dict()
    Tests = data['Tests']
    for test in Tests:
        test_Name = test['Name']

        # Old testnames always started with 'tag.', split that part.
        if len(test['Info']) != 0:
            # The Info record didn't work with the v4 database anyway...
            raise ValueError("Tests/%s: cannot convert non-empty Info record" %
                             test_Name)
        tag_dot = '%s.' % ts_name
        if not test_Name.startswith(tag_dot):
            raise ValueError("Tests/%s: test name does not start with '%s'" %
                             (test_Name, tag_dot))
        name_metric = test_Name[len(tag_dot):]

        found_metric = False
        for oldname, newname in upgrade.metric_rename.items():
            assert(oldname.startswith('.'))
            if name_metric.endswith(oldname):
                name = name_metric[:-len(oldname)]
                metric = newname
                found_metric = True
                break
        if not found_metric:
            # Fallback logic for unknown metrics: Assume they are '.xxxx'
            name, dot, metric = name_metric.rpartition('.')
            if dot != '.':
                raise ValueError("Tests/%s: name does not end in .metric" %
                                 test_Name)
            logger.warning("Found unknown metric '%s'" % metric)
            upgrade.metric_rename['.'+metric] = metric

        result_test = result_tests_dict.get(name)
        if result_test is None:
            result_test = {'name': name}
            result_tests_dict[name] = result_test
            result_tests.append(result_test)

        data = test['Data']
        if metric not in result_test:
            # Do not construct a list for the very common case of just a
            # single datum.
            if len(data) == 1:
                data = data[0]
            result_test[metric] = data
        elif len(data) > 0:
            # Transform the test data into a list
            if not isinstance(result_test[metric], list):
                result_test[metric] = [result_test[metric]]
            result_test[metric] += data

    result['tests'] = result_tests
    return result
Ejemplo n.º 32
0
    def _parse_lit_output(self, path, data, cmake_vars, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash',
            'link_time': 'compile',
            'size.__text': 'code_size',
            'mem_bytes': 'mem',
            'link_mem_bytes': 'mem'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str,
            'link_time': float,
            'size.__text': float,
            'mem_bytes': float,
            'link_mem_bytes': float
        }

        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []
        no_errors = True

        for test_data in data['tests']:
            code = test_data['code']
            raw_name = test_data['name']

            split_name = raw_name.split(' :: ', 1)
            if len(split_name) > 1:
                name = split_name[1]
            else:
                name = split_name[0]

            if name.endswith('.test'):
                name = name[:-5]
            name = 'nts.' + name

            # If --single-result is given, exit based on
            # --single-result-predicate
            is_pass = self._is_pass_code(code)
            if self.opts.single_result and \
               raw_name == self.opts.single_result + '.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k, v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k, v in sorted(test_data['metrics'].items()):
                    if k == 'profile':
                        profiles_to_import.append((name, v))
                        continue

                    if k not in LIT_METRIC_TO_LNT or \
                       LIT_METRIC_TO_LNT[k] in ignore:
                        continue
                    server_name = name + '.' + LIT_METRIC_TO_LNT[k]

                    if k == 'link_time' or k == 'link_mem_bytes':
                        # Move link time into a second benchmark's
                        # compile-time.
                        server_name = name + '-link.' + LIT_METRIC_TO_LNT[k]

                    test_samples.append(
                        lnt.testing.TestSamples(server_name,
                                                [v],
                                                test_info,
                                                LIT_METRIC_CONV_FN[k]))

            if code == 'NOEXE':
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL],
                                            test_info))
                no_errors = False

            elif not is_pass:
                lnt_code = self._get_lnt_code(test_data['code'])
                test_samples.append(
                    lnt.testing.TestSamples(name + '.exec.status',
                                            [lnt_code], test_info))
                no_errors = False

        # Now import the profiles in parallel.
        if profiles_to_import:
            logger.info('Importing %d profiles with %d threads...' %
                        (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend([sample
                                     for sample in samples
                                     if sample is not None])
            except multiprocessing.TimeoutError:
                logger.warning('Profiles had not completed importing after ' +
                               '%s seconds.' % TIMEOUT)
                logger.info('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {
            'tag': 'nts',
            'no_errors': no_errors,
        }
        run_info.update(self._get_cc_info(cmake_vars))
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order

        machine_info = {
        }

        machine = lnt.testing.Machine(self.opts.label, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
Ejemplo n.º 33
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagnose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps']

        logger.info(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        logger.info(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_deps = [self.opts.make, "VERBOSE=1", "timeit-target",
                     "timeit-host", "fpcmp-host"]
        logger.info(" ".join(make_deps))
        p = subprocess.Popen(make_deps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        logger.info(std_out)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        logger.info(" ".join(make_save_temps))
        p = subprocess.Popen(make_save_temps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        logger.info(std_out)
        with open(report_path + "/build.log", 'w') as f:
            f.write(std_out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"),
                    report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = ["/*.o", "/*.time", "/*.cmake", "/*.make",
                       "/*.includecache", "/*.txt"]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        # Now lets do -ftime-report.
        cmd_time_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-ftime-report']

        logger.info(' '.join(cmd_time_report))

        out = subprocess.check_output(cmd_time_report)
        logger.info(out)

        make_time_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_time_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/time-report.txt", 'w') as f:
            f.write(std_err)
        logger.info("Wrote: " + report_path + "/time-report.txt")

        # Now lets do -llvm -stats.
        cmd_stats_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-mllvm -stats']

        logger.info(' '.join(cmd_stats_report))

        out = subprocess.check_output(cmd_stats_report)
        logger.info(out)

        make_stats_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_stats_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/stats-report.txt", 'w') as f:
            f.write(std_err)
        logger.info("Wrote: " + report_path + "/stats-report.txt")

        #  Collect Profile:
        if "Darwin" in platform.platform():
            # For testing and power users, lets allow overrides of how sudo
            # and iprofiler are called.
            sudo = os.getenv("SUDO_CMD", "sudo")
            if " " in sudo:
                sudo = sudo.split(" ")
            if not sudo:
                sudo = []
            else:
                sudo = [sudo]
            iprofiler = os.getenv("IPROFILER_CMD",
                                  "iprofiler -timeprofiler -I 40u")

            cmd_iprofiler = cmd + ['-DTEST_SUITE_RUN_UNDER=' + iprofiler]
            print ' '.join(cmd_iprofiler)

            out = subprocess.check_output(cmd_iprofiler)

            os.chdir(local_path)
            make_iprofiler_temps = [self.opts.make, "VERBOSE=1", short_name]
            p = subprocess.Popen(make_iprofiler_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            logger.warning("Using sudo to collect execution trace.")
            make_save_temps = sudo + [self.opts.lit, short_name + ".test"]
            p = subprocess.Popen(make_save_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            sys.stdout.write(std_out)
            sys.stderr.write(std_err)
            logger.warning("Tests may fail because of iprofiler's output.")
            # The dtps file will be saved as root, make it so
            # that we can read it.
            chmod = sudo + ["chown", "-R", getpass.getuser(),
                            short_name + ".dtps"]
            subprocess.call(chmod)
            profile = local_path + "/" + short_name + ".dtps"
            shutil.copytree(profile, report_path + "/" + short_name + ".dtps")
            logger.info(profile + "-->" + report_path)
        else:
            logger.warning("Skipping execution profiling because " +
                           "this is not Darwin.")
        logger.info("Report produced in: " + report_path)

        return lnt.util.ImportData.no_submit()