示例#1
0
def delete_fieldchange(ts, change):
    """Delete this field change.  Since it might be attahed to a regression
    via regression indicators, fix those up too.  If this orphans a regression
    delete it as well."""
    # Find the indicators.
    indicators = ts.query(ts.RegressionIndicator). \
        filter(ts.RegressionIndicator.field_change_id == change.id). \
        all()
    # And all the related regressions.
    regression_ids = [r.regression_id for r in indicators]

    # Remove the idicators that point to this change.
    for ind in indicators:
        ts.delete(ind)
    
    # Now we can remove the change, itself.
    ts.delete(change)
    
    # We might have just created a regression with no changes.
    # If so, delete it as well.
    for r in regression_ids:
        remaining = ts.query(ts.RegressionIndicator). \
            filter(ts.RegressionIndicator.regression_id == r). \
            all()
        if len(remaining) == 0:
            r = ts.query(ts.Regression).get(r)
            note("Deleting regression because it has not changes:" + repr(r))
            ts.delete(r)
    ts.commit()
示例#2
0
def async_wrapper(job, ts_args, func_args):
    """Setup test-suite in this subprocess and run something.
    
    Because of multipocessing, capture excptions and log messages,
    and return them.
    """
    global clean_db
    try:
        start_time = time.time()
        
        if not clean_db:
            lnt.server.db.v4db.V4DB.close_all_engines()
            clean_db = True
        
        note("Running async wrapper: {} ".format(job.__name__)+ str(os.getpid()))

        _v4db = current_app.old_config.get_database(ts_args['db'])
        # with contextlib.closing(_v4db) as db:
        ts = _v4db.testsuite[ts_args['tsname']]
        nothing = job(ts, **func_args)
        assert nothing is None
        end_time = time.time()
        delta = end_time-start_time
        msg = "Finished: {name} in {time:.2f}s ".format(name=job.__name__,
                                                time=delta)
        if delta < 100:
            note(msg)
        else:
            warning(msg)
    except Exception:
        # Put all exception text into an exception and raise that for our
        # parent process.
        error("Subprocess failed with:" + "".join(traceback.format_exception(*sys.exc_info())))
        sys.exit(1)
    sys.exit(0)
示例#3
0
def delete_fieldchange(ts, change):
    """Delete this field change.  Since it might be attahed to a regression
    via regression indicators, fix those up too.  If this orphans a regression
    delete it as well."""
    # Find the indicators.
    indicators = ts.query(ts.RegressionIndicator). \
        filter(ts.RegressionIndicator.field_change_id == change.id). \
        all()
    # And all the related regressions.
    regression_ids = [r.regression_id for r in indicators]

    # Remove the idicators that point to this change.
    for ind in indicators:
        ts.delete(ind)

    # Now we can remove the change, itself.
    ts.delete(change)

    # We might have just created a regression with no changes.
    # If so, delete it as well.
    for r in regression_ids:
        remaining = ts.query(ts.RegressionIndicator). \
            filter(ts.RegressionIndicator.regression_id == r). \
            all()
        if len(remaining) == 0:
            r = ts.query(ts.Regression).get(r)
            note("Deleting regression because it has not changes:" + repr(r))
            ts.delete(r)
    ts.commit()
示例#4
0
def start_browser(url, debug=False):
    def url_is_up(url):
        try:
            o = urllib.urlopen(url)
        except IOError:
            return False
        o.close()
        return True

    # Wait for server to start...
    if debug:
        note('waiting for server to start...')
    for i in range(10000):
        if url_is_up(url):
            break
        if debug:
            sys.stderr.write('.')
            sys.stderr.flush()
        time.sleep(.01)
    else:
        warning('unable to detect that server started')
                
    if debug:
        note('opening webbrowser...')
    webbrowser.open(url)
示例#5
0
    def _make(self, path):
        make_cmd = self.opts.make

        subdir = path
        target = 'all'
        if self.opts.only_test:
            components = [path] + [self.opts.only_test[0]]
            if self.opts.only_test[1]:
                target = self.opts.only_test[1]
            subdir = os.path.join(*components)

        note('Building...')
        if not self.opts.succinct:
            args = ["VERBOSE=1", target]
        else:
            args = [target]
        try:
            self._check_call([make_cmd,
                              '-k', '-j', str(self._build_threads())] + args,
                             cwd=subdir)
        except subprocess.CalledProcessError:
            # make is expected to exit with code 1 if there was any build
            # failure. Build failures are not unexpected when testing an
            # experimental compiler.
            pass
示例#6
0
def identify_related_changes(ts, regressions, fc):
    """Can we find a home for this change in some existing regression? """
    for regression in regressions:
        regression_indicators = get_ris(ts, regression)
        for change in regression_indicators:
            regression_change = change.field_change
            if is_overlaping(regression_change, fc):
                confidence = 0
                relation = ["Revision"]
                if regression_change.machine == fc.machine:
                    confidence += 1
                    relation.append("Machine")
                if regression_change.test == fc.test:
                    confidence += 1
                    relation.append("Test")
                if regression_change.field == fc.field:
                    confidence += 1
                    relation.append("Field")

                if confidence >= 2:
                    # Matching
                    note("Found a match:" + str(regression) + " On " +
                         ', '.join(relation))
                    ri = ts.RegressionIndicator(regression, fc)
                    ts.add(ri)
                    # Update the default title if needed.
                    rebuild_title(ts, regression)
                    return (True, regression)
    note("Could not find a partner, creating new Regression for change")
    new_reg = new_regression(ts, [fc.id])
    return (False, new_reg)
示例#7
0
    def _lit(self, path, test):
        lit_cmd = self.opts.lit

        output_json_path = tempfile.NamedTemporaryFile(prefix='output',
                                                       suffix='.json',
                                                       dir=path,
                                                       delete=False)
        output_json_path.close()
        
        subdir = path
        if self.opts.only_test:
            components = [path] + [self.opts.only_test[0]]
            subdir = os.path.join(*components)

        extra_args = []
        if not test:
            extra_args = ['--no-execute']
        if self.opts.use_perf in ('profile', 'all'):
            extra_args += ['--param', 'profile=perf']
            
        note('Testing...')
        try:
            self._check_call([lit_cmd,
                              '-v',
                              '-j', str(self._test_threads()),
                              subdir,
                              '-o', output_json_path.name] + extra_args)
        except subprocess.CalledProcessError:
            # LIT is expected to exit with code 1 if there were test
            # failures!
            pass
        return json.loads(open(output_json_path.name).read())
示例#8
0
 def _check_output(self, *args, **kwargs):
     note('Execute: %s' % ' '.join(args[0]))
     if 'cwd' in kwargs:
         note('          (In %s)' % kwargs['cwd'])
     output = subprocess.check_output(*args, **kwargs)
     sys.stdout.write(output)
     return output
示例#9
0
    def _lit(self, path, test):
        lit_cmd = self.opts.lit

        output_json_path = tempfile.NamedTemporaryFile(prefix='output',
                                                       suffix='.json',
                                                       dir=path,
                                                       delete=False)
        output_json_path.close()

        subdir = path
        if self.opts.only_test:
            components = [path] + [self.opts.only_test[0]]
            subdir = os.path.join(*components)

        extra_args = []
        if not test:
            extra_args = ['--no-execute']
        if self.opts.use_perf in ('profile', 'all'):
            extra_args += ['--param', 'profile=perf']

        note('Testing...')
        try:
            self._check_call([
                lit_cmd, '-v', '-j',
                str(self._test_threads()), subdir, '-o', output_json_path.name
            ] + extra_args)
        except subprocess.CalledProcessError:
            # LIT is expected to exit with code 1 if there were test
            # failures!
            pass
        return json.loads(open(output_json_path.name).read())
示例#10
0
def start_browser(url, debug=False):
    def url_is_up(url):
        try:
            o = urllib.urlopen(url)
        except IOError:
            return False
        o.close()
        return True

    # Wait for server to start...
    if debug:
        note('waiting for server to start...')
    for i in range(10000):
        if url_is_up(url):
            break
        if debug:
            sys.stderr.write('.')
            sys.stderr.flush()
        time.sleep(.01)
    else:
        warning('unable to detect that server started')
                
    if debug:
        note('opening webbrowser...')
    webbrowser.open(url)
示例#11
0
def async_wrapper(job, ts_args, func_args):
    """Setup test-suite in this subprocess and run something.
    
    Because of multipocessing, capture excptions and log messages,
    and return them.
    """
    global clean_db
    try:
        start_time = time.time()
        
        if not clean_db:
            lnt.server.db.v4db.V4DB.close_all_engines()
            clean_db = True
        
        note("Running async wrapper: {} ".format(job.__name__)+ str(os.getpid()))

        _v4db = current_app.old_config.get_database(ts_args['db'])
        #with contextlib.closing(_v4db) as db:
        ts = _v4db.testsuite[ts_args['tsname']]
        nothing = job(ts, **func_args)
        assert nothing is None
        end_time = time.time()
        delta = end_time-start_time
        msg = "Finished: {name} in {time:.2f}s ".format(name=job.__name__,
                                                time=delta)
        if delta < 100:
            note(msg)
        else:
            warning(msg)
    except:
        # Put all exception text into an exception and raise that for our
        # parent process.
        error("Subprocess failed with:" + "".join(traceback.format_exception(*sys.exc_info())))
        sys.exit(1)
    sys.exit(0)
示例#12
0
def identify_related_changes(ts, regressions, fc):
    """Can we find a home for this change in some existing regression? """
    for regression in regressions:
        regression_indicators = get_ris(ts, regression)
        for change in regression_indicators:
            regression_change = change.field_change
            if is_overlaping(regression_change, fc):
                confidence = 0
                relation = ["Revision"]
                if regression_change.machine == fc.machine:
                    confidence += 1
                    relation.append("Machine")
                if regression_change.test == fc.test:
                    confidence += 1
                    relation.append("Test")
                if regression_change.field == fc.field:
                    confidence += 1
                    relation.append("Field")

                if confidence >= 2:
                    # Matching
                    note("Found a match:" + str(regression)  + " On " +
                         ', '.join(relation))
                    ri = ts.RegressionIndicator(regression, fc)
                    ts.add(ri)
                    # Update the default title if needed.
                    rebuild_title(ts, regression)
                    return (True, regression)
    note("Could not find a partner, creating new Regression for change")
    new_reg = new_regression(ts, [fc.id])
    return (False, new_reg)
示例#13
0
    def _configure(self, path, execute=True):
        cmake_cmd = self.opts.cmake

        defs = {
            # FIXME: Support ARCH, SMALL/LARGE etc
            'CMAKE_C_COMPILER': self.opts.cc,
            'CMAKE_CXX_COMPILER': self.opts.cxx,
        }
        if self.opts.cppflags or self.opts.cflags:
            all_cflags = ' '.join([self.opts.cppflags, self.opts.cflags])
            defs['CMAKE_C_FLAGS'] = self._unix_quote_args(all_cflags)

        if self.opts.cppflags or self.opts.cxxflags:
            all_cxx_flags = ' '.join([self.opts.cppflags, self.opts.cxxflags])
            defs['CMAKE_CXX_FLAGS'] = self._unix_quote_args(all_cxx_flags)

        if self.opts.run_under:
            defs['TEST_SUITE_RUN_UNDER'] = self._unix_quote_args(
                self.opts.run_under)
        if self.opts.benchmarking_only:
            defs['TEST_SUITE_BENCHMARKING_ONLY'] = 'ON'
        if self.opts.use_perf in ('time', 'all'):
            defs['TEST_SUITE_USE_PERF'] = 'ON'
        if self.opts.cmake_defines:
            for item in self.opts.cmake_defines:
                k, v = item.split('=', 1)
                defs[k] = v

        lines = ['Configuring with {']
        for k, v in sorted(defs.items()):
            lines.append("  %s: '%s'" % (k, v))
        lines.append('}')

        # Prepare cmake cache if requested:
        cache = []
        if self.opts.cmake_cache:
            cache_path = os.path.join(self._test_suite_dir(), "cmake/caches/",
                                      self.opts.cmake_cache + ".cmake")
            if os.path.exists(cache_path):
                cache = ['-C', cache_path]
            else:
                fatal("Could not find CMake cache file: " +
                      self.opts.cmake_cache + " in " + cache_path)

        for l in lines:
            note(l)

        cmake_cmd = [cmake_cmd] + cache + [self._test_suite_dir()] + \
                         ['-D%s=%s' % (k,v) for k,v in defs.items()]
        if execute:
            self._check_call(cmake_cmd, cwd=path)

        return cmake_cmd
示例#14
0
def async_run_job(job, db_name, ts, func_args):
    """Send a job to the async wrapper in the subprocess."""
    # If the run is not in the database, we can't do anything more.
    note("Queuing background job to process fieldchanges " + str(os.getpid()))
    launch_workers()
    check_workers(True)

    args = {'tsname': ts.name, 'db': db_name}
    job = Process(target=async_wrapper, args=[job, args, func_args])

    job.start()
    JOBS.append(job)
示例#15
0
    def _configure(self, path, execute=True):
        cmake_cmd = self.opts.cmake

        defs = {
            # FIXME: Support ARCH, SMALL/LARGE etc
            'CMAKE_C_COMPILER': self.opts.cc,
            'CMAKE_CXX_COMPILER': self.opts.cxx,
        }
        if self.opts.cppflags or self.opts.cflags:
            all_cflags = ' '.join([self.opts.cppflags, self.opts.cflags])
            defs['CMAKE_C_FLAGS'] = self._unix_quote_args(all_cflags)
        
        if self.opts.cppflags or self.opts.cxxflags:
            all_cxx_flags = ' '.join([self.opts.cppflags, self.opts.cxxflags])
            defs['CMAKE_CXX_FLAGS'] = self._unix_quote_args(all_cxx_flags)
        
        if self.opts.run_under:
            defs['TEST_SUITE_RUN_UNDER'] = self._unix_quote_args(self.opts.run_under)
        if self.opts.benchmarking_only:
            defs['TEST_SUITE_BENCHMARKING_ONLY'] = 'ON'
        if self.opts.use_perf in ('time', 'all'):
            defs['TEST_SUITE_USE_PERF'] = 'ON'
        if self.opts.cmake_defines:
            for item in self.opts.cmake_defines:
                k, v = item.split('=', 1)
                defs[k] = v
            
        lines = ['Configuring with {']
        for k,v in sorted(defs.items()):
            lines.append("  %s: '%s'" % (k,v))
        lines.append('}')
        
        # Prepare cmake cache if requested:
        cache = []
        if self.opts.cmake_cache:
            cache_path = os.path.join(self._test_suite_dir(), 
                                      "cmake/caches/", self.opts.cmake_cache + ".cmake")
            if os.path.exists(cache_path):
                cache = ['-C', cache_path]
            else:
                fatal("Could not find CMake cache file: " + 
                      self.opts.cmake_cache + " in " + cache_path)

        for l in lines:
            note(l)
        
        cmake_cmd = [cmake_cmd] + cache + [self._test_suite_dir()] + \
                         ['-D%s=%s' % (k,v) for k,v in defs.items()]
        if execute:
            self._check_call(cmake_cmd, cwd=path)
        
        return cmake_cmd
示例#16
0
def async_run_job(job, db_name, ts, func_args):
    """Send a job to the async wrapper in the subprocess."""
    # If the run is not in the database, we can't do anything more.
    note("Queuing background job to process fieldchanges " + str(os.getpid()))
    launch_workers()
    check_workers(True)

    args = {'tsname': ts.name,
            'db': db_name}
    job = Process(target=async_wrapper,
                  args=[job, args, func_args])

    job.start()
    JOBS.append(job)
示例#17
0
def launch_workers():
    """Make sure we have a worker pool ready to queue."""
    global WORKERS
    if not WORKERS:
        note("Starting workers")
        manager = Manager()
        try:
            current_app.config['mem_logger'].buffer = \
                manager.list(current_app.config['mem_logger'].buffer)
        except RuntimeError:
            #  It might be the case that we are not running in the app.
            #  In this case, don't bother memory logging, stdout should
            #  sufficent for console mode.
            pass
示例#18
0
def launch_workers():
    """Make sure we have a worker pool ready to queue."""
    global WORKERS
    if not WORKERS:
        note("Starting workers")
        manager = Manager()
        try:
            current_app.config['mem_logger'].buffer = \
                manager.list(current_app.config['mem_logger'].buffer)
        except RuntimeError:
            #  It might be the case that we are not running in the app.
            #  In this case, don't bother memory logging, stdout should
            #  sufficient for console mode.
            pass
def populate_blacklist():
    global ignored
    ignored = []
    try:
        path = current_app.old_config.blacklist
    except RuntimeError:
        path = os.path.join(os.path.dirname(sys.argv[0]), "blacklist")

    if path and os.path.isfile(path):
        note("Loading blacklist file: {}".format(path))
        with open(path, 'r') as f:
            for l in f.readlines():
                ignored.append(re.compile(l.strip()))
    else:
        warning("Ignoring blacklist file: {}".format(path))
def filter_by_benchmark_name(ts, field_change):
    """Is this a fieldchanges we care about?
    """
    if ignored is None:
        populate_blacklist()
    benchmark_name = field_change.test.name
    ts_name = ts.name
    full_name = ts_name + "." + benchmark_name

    for regex in ignored:
        if regex.match(full_name):
            note("Dropping field change {} because it matches {}".format(
                full_name, regex.pattern))
            return False
    return True
示例#21
0
def async_run_job(job, db_name, ts, func_args, db_config):
    """Send a job to the async wrapper in the subprocess."""
    # If the run is not in the database, we can't do anything more.
    note("Queuing background job to process fieldchanges " + str(os.getpid()))
    launch_workers()
    check_workers(True)

    args = {'tsname': ts.name, 'db': db_name, 'db_info': db_config}
    job = Process(target=async_wrapper, args=[job, args, func_args])

    # Set this to make sure when parent dies, children are killed.
    job.daemon = True

    job.start()
    JOBS.append(job)
示例#22
0
    def _generate_run_info(self, tag, result_type, run_order, parent_commit):
        env_vars = {
            'Build Number': 'BUILD_NUMBER',
            'Owner': 'GERRIT_CHANGE_OWNER_NAME',
            'Gerrit URL': 'GERRIT_CHANGE_URL',
            'Jenkins URL': 'BUILD_URL'
        }

        run_info = {
            key: os.getenv(env_var)
            for key, env_var in env_vars.iteritems() if os.getenv(env_var)
        }

        try:
            commit_message = os.getenv('GERRIT_CHANGE_COMMIT_MESSAGE')
            if commit_message:
                commit_message = base64.b64decode(commit_message)
        except Exception:
            warning('Unable to decode commit message "{}", skipping'.format(
                commit_message))
        else:
            run_info['Commit Message'] = commit_message

        git_sha = os.getenv('GERRIT_PATCHSET_REVISION')
        if not git_sha:
            fatal("unable to determine git SHA for result, exiting.")

        if run_order:
            run_info['run_order'] = str(run_order)
        else:
            note("run order not provided, will use server-side auto-generated "
                 "run order")

        run_info.update({
            'git_sha': git_sha,
            't': str(calendar.timegm(time.gmtime())),
            'tag': tag
        })

        if result_type == 'cv':
            if not parent_commit:
                parent_commit = self._get_parent_commit()

            run_info.update({'parent_commit': parent_commit})

        return run_info
示例#23
0
    def frompath(path):
        """
        frompath(path) -> Insance

        Load an LNT instance from the given instance specifier. The instance
        path can be one of:
          * The directory containing the instance.
          * The instance config file.
          * A tarball containing an instance.
        """

        # Accept paths to config files, or to directories containing 'lnt.cfg'.
        tmpdir = None
        if os.path.isdir(path):
            config_path = os.path.join(path, 'lnt.cfg')
        elif tarfile.is_tarfile(path):
            # Accept paths to tar/tgz etc. files, which we automatically unpack
            # into a temporary directory.
            tmpdir = tempfile.mkdtemp(suffix='lnt')

            note("extracting input tarfile %r to %r" % (path, tmpdir))
            tf = tarfile.open(path)
            tf.extractall(tmpdir)

            # Find the LNT instance inside the tar file. Support tarballs that
            # either contain the instance directly, or contain a single
            # subdirectory which is the instance.
            if os.path.exists(os.path.join(tmpdir, "lnt.cfg")):
                config_path = os.path.join(tmpdir, "lnt.cfg")
            else:
                filenames = os.listdir(tmpdir)
                if len(filenames) != 1:
                    fatal("unable to find LNT instance inside tarfile")
                config_path = os.path.join(tmpdir, filenames[0], "lnt.cfg")
        else:
            config_path = path

        if not config_path or not os.path.exists(config_path):
            fatal("invalid config: %r" % config_path)

        config_data = {}
        exec open(config_path) in config_data
        config = lnt.server.config.Config.fromData(config_path, config_data)

        return Instance(config_path, config, tmpdir)
示例#24
0
    def frompath(path):
        """
        frompath(path) -> Insance

        Load an LNT instance from the given instance specifier. The instance
        path can be one of:
          * The directory containing the instance.
          * The instance config file.
          * A tarball containing an instance.
        """

        # Accept paths to config files, or to directories containing 'lnt.cfg'.
        tmpdir = None
        if os.path.isdir(path):
            config_path = os.path.join(path, 'lnt.cfg')
        elif tarfile.is_tarfile(path):
            # Accept paths to tar/tgz etc. files, which we automatically unpack
            # into a temporary directory.
            tmpdir = tempfile.mkdtemp(suffix='lnt')

            note("extracting input tarfile %r to %r" % (path, tmpdir))
            tf = tarfile.open(path)
            tf.extractall(tmpdir)

            # Find the LNT instance inside the tar file. Support tarballs that
            # either contain the instance directly, or contain a single
            # subdirectory which is the instance.
            if os.path.exists(os.path.join(tmpdir, "lnt.cfg")):
                config_path = os.path.join(tmpdir, "lnt.cfg")
            else:
                filenames = os.listdir(tmpdir)
                if len(filenames) != 1:
                    fatal("unable to find LNT instance inside tarfile")
                config_path = os.path.join(tmpdir, filenames[0], "lnt.cfg")
        else:
            config_path = path

        if not config_path or not os.path.exists(config_path):
            fatal("invalid config: %r" % config_path)

        config_data = {}
        exec open(config_path) in config_data
        config = lnt.server.config.Config.fromData(config_path, config_data)

        return Instance(config_path, config, tmpdir)
示例#25
0
def identify_related_changes(ts, fc):
    """Can we find a home for this change in some existing regression? If a
    match is found add a regression indicator adding this change to that
    regression, otherwise create a new regression for this change.

    Regression matching looks for regressions that happen in overlapping order
    ranges. Then looks for changes that are similar.

    """
    regressions = ts.query(ts.Regression.id) \
        .filter(or_(ts.Regression.state == RegressionState.DETECTED,
                ts.Regression.state == RegressionState.DETECTED_FIXED)) \
        .all()

    for regression_packed in regressions:
        regression_id = regression_packed[0]
        regression_indicators = get_ris(ts, regression_id)
        print "RIs:", regression_indicators
        for change in regression_indicators:
            regression_change = change.field_change
            if is_overlaping(regression_change, fc):
                confidence = 0.0

                confidence += percent_similar(regression_change.machine.name,
                                              fc.machine.name)
                confidence += percent_similar(regression_change.test.name,
                                              fc.test.name)

                if regression_change.field == fc.field:
                    confidence += 1.0

                if confidence >= 2.0:
                    # Matching
                    MSG = "Found a match: {} with score {}."
                    regression = ts.query(ts.Regression).get(regression_id)
                    note(MSG.format(str(regression), confidence))
                    ri = ts.RegressionIndicator(regression, fc)
                    ts.add(ri)
                    # Update the default title if needed.
                    rebuild_title(ts, regression)
                    ts.commit()
                    return True, regression
    note("Could not find a partner, creating new Regression for change")
    new_reg = new_regression(ts, [fc.id])
    return False, new_reg
示例#26
0
    def _make(self, path):
        make_cmd = self.opts.make

        subdir = path
        target = 'all'
        if self.opts.only_test:
            components = [path] + [self.opts.only_test[0]]
            if self.opts.only_test[1]:
                target = self.opts.only_test[1]
            subdir = os.path.join(*components)

        note('Building...')
        if not self.opts.succinct:
            args = ["VERBOSE=1", target]
        else:
            args = [target]
        self._check_call(
            [make_cmd, '-j', str(self._build_threads())] + args, cwd=subdir)
示例#27
0
    def _make(self, path):
        make_cmd = self.opts.make
        
        subdir = path
        target = 'all'
        if self.opts.only_test:
            components = [path] + [self.opts.only_test[0]]
            if self.opts.only_test[1]:
                target = self.opts.only_test[1]
            subdir = os.path.join(*components)

        note('Building...')
        if not self.opts.succinct:
            args = ["VERBOSE=1", target]
        else:
            args = [target]
        self._check_call([make_cmd,
                          '-j', str(self._build_threads())] + args,
                         cwd=subdir)
示例#28
0
    def _generate_run_info(self, tag, result_type, run_order, parent_commit):
        env_vars = {'Build Number': 'BUILD_NUMBER',
                    'Owner': 'GERRIT_CHANGE_OWNER_NAME',
                    'Gerrit URL': 'GERRIT_CHANGE_URL',
                    'Jenkins URL': 'BUILD_URL'}

        run_info = {key: os.getenv(env_var)
                    for key, env_var in env_vars.iteritems()
                    if os.getenv(env_var)}

        try:
            commit_message = os.getenv('GERRIT_CHANGE_COMMIT_MESSAGE')
            if commit_message:
                commit_message = base64.b64decode(commit_message)
        except Exception:
            warning('Unable to decode commit message "{}", skipping'.format(
                commit_message))
        else:
            run_info['Commit Message'] = commit_message

        git_sha = os.getenv('GERRIT_PATCHSET_REVISION')
        if not git_sha:
            fatal("unable to determine git SHA for result, exiting.")

        if run_order:
            run_info['run_order'] = str(run_order)
        else:
            note("run order not provided, will use server-side auto-generated "
                 "run order")

        run_info.update({'git_sha': git_sha,
                         't': str(calendar.timegm(time.gmtime())),
                         'tag': tag})

        if result_type == 'cv':
            if not parent_commit:
                parent_commit = self._get_parent_commit()

            run_info.update({'parent_commit': parent_commit})

        return run_info
示例#29
0
def regression_evolution(ts, run_id):
    """Analyse regressions. If they have changes, process them.
    Look at each regression in state detect.  Move to ignore if it is fixed.
    Look at each regression in state stage. Move to verify if fixed.
    Look at regressions in detect, do they match our policy? If no, move to NTBF.

    """
    note("Running regression evolution")
    changed = 0
    regressions = ts.query(ts.Regression).all()
    detects = [r for r in regressions if r.state == RegressionState.DETECTED]

    for regression in detects:
        if is_fixed(ts, regression):
            note("Detected fixed regression" + str(regression))
            regression.state = RegressionState.IGNORED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1
    ts.commit()

    staged = [r for r in regressions if r.state == RegressionState.STAGED]

    for regression in staged:
        if is_fixed(ts, regression):
            note("Staged fixed regression" + str(regression))
            regression.state = RegressionState.DETECTED_FIXED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1
    ts.commit()

    active = [r for r in regressions if r.state == RegressionState.ACTIVE]

    for regression in active:
        if is_fixed(ts, regression):
            note("Active fixed regression" + str(regression))
            regression.state = RegressionState.DETECTED_FIXED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1
    ts.commit()
    note("Changed the state of {} regressions".format(changed))
def regression_evolution(ts, run_id):
    """Analyse regressions. If they have changes, process them.
    Look at each regression in state detect.  Move to ignore if it is fixed.
    Look at each regression in state stage. Move to verify if fixed.
    Look at regressions in detect, do they match our policy? If no, move to NTBF.

    """
    note("Running regression evolution")
    changed = 0
    regressions = ts.query(ts.Regression).all()
    detects = [r for r in regressions if r.state == RegressionState.DETECTED]
    
    for regression in detects:
        if is_fixed(ts, regression):
            note("Detected fixed regression" + str(regression))
            regression.state = RegressionState.IGNORED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1
    ts.commit()

    staged = [r for r in regressions if r.state == RegressionState.STAGED]
    
    for regression in staged:
        if is_fixed(ts, regression):
            note("Staged fixed regression" + str(regression))
            regression.state = RegressionState.DETECTED_FIXED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1
    ts.commit()
    
    active = [r for r in regressions if r.state == RegressionState.ACTIVE]
    
    for regression in active:
        if is_fixed(ts, regression):
            note("Active fixed regression" + str(regression))
            regression.state = RegressionState.DETECTED_FIXED
            regression.title = regression.title + " [Detected Fixed]"
            changed += 1
    ts.commit()
    note("Changed the state of {} regressions".format(changed))
示例#31
0
    def _get_parent_commit(self):
        required_variables = {
            'project': os.environ.get('GERRIT_PROJECT'),
            'branch': os.environ.get('GERRIT_BRANCH'),
            'change_id': os.environ.get('GERRIT_CHANGE_ID'),
            'commit': os.environ.get('GERRIT_PATCHSET_REVISION')
        }

        if all(required_variables.values()):
            url = ('http://review.couchbase.org/changes/{project}~{branch}~'
                   '{change_id}/revisions/{commit}/commit'.format(
                       **required_variables))
            note('getting parent commit from {}'.format(url))
            try:
                response = urllib2.urlopen(url).read()
            except Exception:
                fatal('failed to get parent commit from {}')
                raise

            # For some reason Gerrit returns a malformed json response
            # with extra characters before the actual json begins
            # Skip ahead to avoid this causing json deserialisation to fail
            start_index = response.index('{')
            response = response[start_index:]

            try:
                json_response = json.loads(response)
            except Exception:
                fatal('failed to decode Gerrit json response: {}'.format(
                    response))
                raise

            parent_commit = json_response['parents'][0]['commit']
            return parent_commit

        else:
            fatal('unable to find required Gerrit environment variables, '
                  'exiting')
示例#32
0
    def _get_parent_commit(self):
        required_variables = {
            'project': os.environ.get('GERRIT_PROJECT'),
            'branch': os.environ.get('GERRIT_BRANCH'),
            'change_id': os.environ.get('GERRIT_CHANGE_ID'),
            'commit': os.environ.get('GERRIT_PATCHSET_REVISION')}

        if all(required_variables.values()):
            url = ('http://review.couchbase.org/changes/{project}~{branch}~'
                   '{change_id}/revisions/{commit}/commit'
                   .format(**required_variables))
            note('getting parent commit from {}'.format(url))
            try:
                response = urllib2.urlopen(url).read()
            except Exception:
                fatal('failed to get parent commit from {}')
                raise

            # For some reason Gerrit returns a malformed json response
            # with extra characters before the actual json begins
            # Skip ahead to avoid this causing json deserialisation to fail
            start_index = response.index('{')
            response = response[start_index:]

            try:
                json_response = json.loads(response)
            except Exception:
                fatal('failed to decode Gerrit json response: {}'
                      .format(response))
                raise

            parent_commit = json_response['parents'][0]['commit']
            return parent_commit

        else:
            fatal('unable to find required Gerrit environment variables, '
                  'exiting')
示例#33
0
 def _check_call(self, *args, **kwargs):
     if self.opts.verbose:
         note('Execute: %s' % ' '.join(args[0]))
         if 'cwd' in kwargs:
             note('          (In %s)' % kwargs['cwd'])
     return subprocess.check_call(*args, **kwargs)
示例#34
0
文件: compile.py 项目: BillSeurer/lnt
    def run_test(self, name, args):
        global opts
        parser = OptionParser(
            ("%(name)s [options] [<output file>]\n" +
             usage_info) % locals())
        parser.add_option("-s", "--sandbox", dest="sandbox_path",
                          help="Parent directory to build and run tests in",
                          type=str, default=None, metavar="PATH")

        group = OptionGroup(parser, "Test Options")
        group.add_option("", "--no-timestamp", dest="timestamp_build",
                         help="Don't timestamp build directory (for testing)",
                         action="store_false", default=True)
        group.add_option("", "--cc", dest="cc", type='str',
                         help="Path to the compiler under test",
                         action="store", default=None)
        group.add_option("", "--cxx", dest="cxx",
                         help="Path to the C++ compiler to test",
                         type=str, default=None)
        group.add_option("", "--ld", dest="ld",
                         help="Path to the c linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--ldxx", dest="ldxx",
                         help="Path to the cxx linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--test-externals", dest="test_suite_externals",
                         help="Path to the LLVM test-suite externals",
                         type=str, default=None, metavar="PATH")
        group.add_option("", "--machine-param", dest="machine_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the machine parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-param", dest="run_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the run parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-order", dest="run_order", metavar="STR",
                         help="String to use to identify and order this run",
                         action="store", type=str, default=None)
        group.add_option("", "--test-subdir", dest="test_subdir",
                         help="Subdirectory of test external dir to look for tests in.",
                         type=str, default="lnt-compile-suite-src")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Selection")
        group.add_option("", "--no-memory-profiling", dest="memory_profiling",
                         help="Disable memory profiling",
                         action="store_false", default=True)
        group.add_option("", "--multisample", dest="run_count", metavar="N",
                         help="Accumulate test data from multiple runs",
                         action="store", type=int, default=3)
        group.add_option("", "--min-sample-time", dest="min_sample_time",
                         help="Ensure all tests run for at least N seconds",
                         metavar="N", action="store", type=float, default=.5)
        group.add_option("", "--save-temps", dest="save_temps",
                         help="Save temporary build output files",
                         action="store_true", default=False)
        group.add_option("", "--show-tests", dest="show_tests",
                         help="Only list the availables tests that will be run",
                         action="store_true", default=False)
        group.add_option("", "--test", dest="tests", metavar="NAME",
                         help="Individual test to run",
                         action="append", default=[])
        group.add_option("", "--test-filter", dest="test_filters",
                         help="Run tests matching the given pattern",
                         metavar="REGEXP", action="append", default=[])
        group.add_option("", "--flags-to-test", dest="flags_to_test",
                         help="Add a set of flags to test (space separated)",
                         metavar="FLAGLIST", action="append", default=[])
        group.add_option("", "--jobs-to-test", dest="jobs_to_test",
                         help="Add a job count to test (full builds)",
                         metavar="NUM", action="append", default=[], type=int)
        group.add_option("", "--config-to-test", dest="configs_to_test",
                         help="Add build configuration to test (full builds)",
                         metavar="NAME", action="append", default=[],
                         choices=('Debug','Release'))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("", "--no-machdep-info", dest="use_machdep_info",
                         help=("Don't put machine (instance) dependent "
                               "variables in machine info"),
                         action="store_false", default=True)
        group.add_option("", "--machine-name", dest="machine_name", type='str',
                         help="Machine name to use in submission [%default]",
                         action="store", default=platform.uname()[1])
        group.add_option("", "--submit", dest="submit_url", metavar="URLORPATH",
                          help=("autosubmit the test result to the given server "
                                "(or local instance) [%default]"),
                          type=str, default=None)
        group.add_option("", "--commit", dest="commit",
                          help=("whether the autosubmit result should be committed "
                                "[%default]"),
                          type=int, default=True)
        group.add_option("", "--output", dest="output", metavar="PATH",
                          help="write raw report data to PATH (or stdout if '-')",
                          action="store", default=None)
        group.add_option("-v", "--verbose", dest="verbose",
                          help="show verbose test results",
                          action="store_true", default=False)

        parser.add_option_group(group)

        opts,args = parser.parse_args(args)

        if len(args) != 0:
            parser.error("invalid number of arguments")

        if opts.cc is None:
            parser.error("You must specify a --cc argument.")

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')


        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx,))

        # Validate options.
        if opts.cc is None:
            parser.error('--cc is required')
        if opts.cxx is None:
            parser.error('--cxx is required (and could not be inferred)')
        if opts.sandbox_path is None:
            parser.error('--sandbox is required')
        if opts.test_suite_externals is None:
            parser.error("--test-externals option is required")

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))
        
        if not os.path.exists(cc_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                    opts.cc,))
        if not os.path.exists(cxx_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                    opts.cc,))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx
        
        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >>sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            report_name = "test-%s" % (timestamp().replace(' ','_').replace(':','-'))
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),report_name)
            
        try:
            os.mkdir(g_output_dir)
        except OSError,e:
            if e.errno == errno.EEXIST:
                parser.error("sandbox output directory %r already exists!" % (
                        g_output_dir,))
            else:
                raise
示例#35
0
def action_view_comparison(name, args):
    """view a report comparison using a temporary server"""

    import lnt.server.instance
    import lnt.server.ui.app
    import lnt.server.db.migrate

    parser = OptionParser("%s [options] <report A> <report B>" % (name,))
    parser.add_option("", "--hostname", dest="hostname", type=str,
                      help="host interface to use [%default]",
                      default='localhost')
    parser.add_option("", "--port", dest="port", type=int, metavar="N",
                      help="local port to use [%default]", default=8000)
    parser.add_option("", "--dry-run", dest="dry_run",
                      help="Do a dry run through the comparison. [%default]"
                      " [%default]", action="store_true", default=False)
    (opts, args) = parser.parse_args(args)

    if len(args) != 2:
        parser.error("invalid number of arguments")

    report_a_path, report_b_path = args

    # Set up the default logger.
    logger = logging.getLogger("lnt")
    logger.setLevel(logging.ERROR)
    handler = logging.StreamHandler(sys.stderr)
    handler.setFormatter(logging.Formatter(
        '%(asctime)s %(levelname)s: %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'))
    logger.addHandler(handler)

    # Create a temporary directory to hold the instance.
    tmpdir = tempfile.mkdtemp(suffix='lnt')

    try:
        # Create a temporary instance.
        url = 'http://%s:%d' % (opts.hostname, opts.port)
        db_path = os.path.join(tmpdir, 'data.db')
        db_info = lnt.server.config.DBInfo(
            'sqlite:///%s' % (db_path,), '0.4', None,
            lnt.server.config.EmailConfig(False, '', '', []), "0")
        # _(self, name, zorgURL, dbDir, tempDir,
        # profileDir, secretKey, databases, blacklist):
        config = lnt.server.config.Config('LNT', url, db_path, tmpdir,
                                          None, None, {'default': db_info},
                                          None)
        instance = lnt.server.instance.Instance(None, config)

        # Create the database.
        lnt.server.db.migrate.update_path(db_path)

        # Import the two reports.
        with contextlib.closing(config.get_database('default')) as db:
            import_and_report(
                config, 'default', db, report_a_path,
                '<auto>', commit=True)
            import_and_report(
                config, 'default', db, report_b_path,
                '<auto>', commit=True)

            # Dispatch another thread to start the webbrowser.
            comparison_url = '%s/v4/nts/2?compare_to=1' % (url,)
            note("opening comparison view: %s" % (comparison_url,))
            
            if not opts.dry_run:
                thread.start_new_thread(start_browser, (comparison_url, True))

            # Run the webserver.
            app = lnt.server.ui.app.App.create_with_instance(instance)
            app.debug = True
            
            if opts.dry_run:
                # Don't catch out exceptions.
                app.testing = True
                # Create a test client.
                client = app.test_client()
                response = client.get(comparison_url)
                assert response.status_code == 200, "Page did not return 200."
            else:
                app.run(opts.hostname, opts.port, use_reloader=False)
    finally:
        shutil.rmtree(tmpdir)
示例#36
0
def regenerate_fieldchanges_for_run(ts, run_id):
    """Regenerate the set of FieldChange objects for the given run.
    """
    # Allow for potentially a few different runs, previous_runs, next_runs
    # all with the same order_id which we will aggregate together to make
    # our comparison result.
    run = ts.getRun(run_id)
    runs = ts.query(ts.Run). \
        filter(ts.Run.order_id == run.order_id). \
        filter(ts.Run.machine_id == run.machine_id). \
        all()
    regressions = ts.query(ts.Regression).all()[::-1]
    previous_runs = ts.get_previous_runs_on_machine(run, FIELD_CHANGE_LOOKBACK)
    next_runs = ts.get_next_runs_on_machine(run, FIELD_CHANGE_LOOKBACK)

    # Find our start/end order.
    if previous_runs != []:
        start_order = previous_runs[0].order
    else:
        start_order = run.order
    if next_runs != []:
        end_order = next_runs[-1].order
    else:
        end_order = run.order

    # Load our run data for the creation of the new fieldchanges.
    runs_to_load = [r.id for r in (runs + previous_runs)]

    # When the same rev is submitted many times, the database accesses here
    # can be huge, and it is almost always an error to have the same rev
    # be used in so many runs.
    run_size = len(runs_to_load)
    if run_size > 50:
        warning("Generating field changes for {} runs."
                "That will be very slow.".format(run_size))
    runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load)

    # Only store fieldchanges for "metric" samples like execution time;
    # not for fields with other data, e.g. hash of a binary
    for field in list(ts.Sample.get_metric_fields()):
        for test_id in runinfo.test_ids:
            f = None
            result = runinfo.get_comparison_result(
                runs, previous_runs, test_id, field,
                ts.Sample.get_hash_of_binary_field())
            # Try and find a matching FC and update, else create one.
            try:
                f = ts.query(ts.FieldChange) \
                    .filter(ts.FieldChange.start_order == start_order) \
                    .filter(ts.FieldChange.end_order == end_order) \
                    .filter(ts.FieldChange.test_id == test_id) \
                    .filter(ts.FieldChange.machine == run.machine) \
                    .filter(ts.FieldChange.field == field) \
                    .one()
            except sqlalchemy.orm.exc.NoResultFound:
                f = None

            if not result.is_result_performance_change() and f:
                # With more data, its not a regression. Kill it!
                note("Removing field change: {}".format(f.id))
                delete_fieldchange(ts, f)
                continue

            if result.is_result_performance_change() and not f:
                test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
                f = ts.FieldChange(start_order=start_order,
                                   end_order=run.order,
                                   machine=run.machine,
                                   test=test,
                                   field=field)
                ts.add(f)
                ts.commit()
                found, new_reg = identify_related_changes(ts, regressions, f)
                if found:
                    regressions.append(new_reg)
                    note("Found field change: {}".format(run.machine))

            # Always update FCs with new values.
            if f:
                f.old_value = result.previous
                f.new_value = result.current
                f.run = run
    ts.commit()
    rules.post_submission_hooks(ts, run_id)
示例#37
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagenose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        if not os.path.exists(path):
            mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + [
            '-DTEST_SUITE_DIAGNOSE=On',
            '-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps'
        ]

        note(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        note(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, must be nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        note(" ".join(make_save_temps))
        out = subprocess.check_output(make_save_temps)
        note(out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"),
                    report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = [
            "/*.o", "/*.time", "/*.cmake", "/*.make", "/*.includecache",
            "/*.txt"
        ]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        note("Report produced in: " + report_path)

        # Run through the rest of LNT, but don't allow this to be submitted
        # because there is no data.
        class DontSubmitResults(object):
            def get(self, url):
                return None

        return DontSubmitResults()
示例#38
0
    def _parse_lit_output(self, path, data, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str
        }

        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []

        for test_data in data['tests']:
            raw_name = test_data['name'].split(' :: ', 1)[1]
            name = 'nts.' + raw_name.rsplit('.test', 1)[0]
            is_pass = self._is_pass_code(test_data['code'])

            # If --single-result is given, exit based on --single-result-predicate
            if self.opts.single_result and \
               raw_name == self.opts.single_result+'.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k, v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k, v in test_data['metrics'].items():
                    if k == 'profile':
                        profiles_to_import.append((name, v))
                        continue

                    if k not in LIT_METRIC_TO_LNT or LIT_METRIC_TO_LNT[
                            k] in ignore:
                        continue
                    test_samples.append(
                        lnt.testing.TestSamples(
                            name + '.' + LIT_METRIC_TO_LNT[k], [v], test_info,
                            LIT_METRIC_CONV_FN[k]))

            if self._test_failed_to_compile(raw_name, path):
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL], test_info))

            elif not is_pass:
                test_samples.append(
                    lnt.testing.TestSamples(
                        name + '.exec.status',
                        [self._get_lnt_code(test_data['code'])], test_info))

        # Now import the profiles in parallel.
        if profiles_to_import:
            note('Importing %d profiles with %d threads...' %
                 (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend(
                    [sample for sample in samples if sample is not None])
            except multiprocessing.TimeoutError:
                warning(
                    'Profiles had not completed importing after %s seconds.' %
                    TIMEOUT)
                note('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {'tag': 'nts'}
        run_info.update(self._get_cc_info())
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order

        machine_info = {}

        machine = lnt.testing.Machine(self.nick, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
示例#39
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagenose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps']

        note(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        note(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_deps = [self.opts.make, "VERBOSE=1", "timeit-target",
                     "timeit-host", "fpcmp-host"]
        note(" ".join(make_deps))
        p = subprocess.Popen(make_deps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        note(std_out)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        note(" ".join(make_save_temps))
        p = subprocess.Popen(make_save_temps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        note(std_out)
        with open(report_path + "/build.log", 'w') as f:
            f.write(std_out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"), report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = ["/*.o", "/*.time", "/*.cmake", "/*.make",
                       "/*.includecache", "/*.txt"]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        # Now lets do -ftime-report.
        cmd_time_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-ftime-report']

        note(' '.join(cmd_time_report))

        out = subprocess.check_output(cmd_time_report)
        note(out)

        make_time_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_time_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/time-report.txt", 'w') as f:
            f.write(std_err)
        note("Wrote: " + report_path + "/time-report.txt")

        # Now lets do -llvm -stats.
        cmd_stats_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-mllvm -stats']

        note(' '.join(cmd_stats_report))

        out = subprocess.check_output(cmd_stats_report)
        note(out)

        make_stats_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_stats_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/stats-report.txt", 'w') as f:
            f.write(std_err)
        note("Wrote: " + report_path + "/stats-report.txt")

        #  Collect Profile:
        if "Darwin" in platform.platform():
            # For testing and power users, lets allow overrides of how sudo
            # and iprofiler are called.
            sudo = os.getenv("SUDO_CMD", "sudo")
            if " " in sudo:
                sudo = sudo.split(" ")
            if not sudo:
                sudo = []
            else:
                sudo = [sudo]
            iprofiler = os.getenv("IPROFILER_CMD",
                                  "iprofiler -timeprofiler -I 40u")

            cmd_iprofiler = cmd + ['-DTEST_SUITE_RUN_UNDER=' + iprofiler]
            print ' '.join(cmd_iprofiler)

            out = subprocess.check_output(cmd_iprofiler)

            os.chdir(local_path)
            make_iprofiler_temps = [self.opts.make, "VERBOSE=1", short_name]
            p = subprocess.Popen(make_iprofiler_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            warning("Using sudo to collect execution trace.")
            make_save_temps = sudo + [self.opts.lit, short_name + ".test"]
            p = subprocess.Popen(make_save_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            sys.stdout.write(std_out)
            sys.stderr.write(std_err)
            warning("Tests may fail because of iprofiler's output.")
            # The dtps file will be saved as root, make it so
            # that we can read it.
            chmod = sudo + ["chown", "-R", getpass.getuser(),
                     short_name + ".dtps"]
            subprocess.call(chmod)
            profile = local_path + "/" + short_name + ".dtps"
            shutil.copytree(profile, report_path + "/" + short_name + ".dtps")
            note(profile + "-->" + report_path)
        else:
            warning("Skipping execution profiling because this is not Darwin.")
        note("Report produced in: " + report_path)

        # Run through the rest of LNT, but don't allow this to be submitted
        # because there is no data.
        class DontSubmitResults(object):

            def get(self, url):
                return report_path

            def __getitem__(self, key):
                return report_path

        return DontSubmitResults()
示例#40
0
    def run_test(self, name, args):
        global opts
        parser = OptionParser(
            ("%(name)s [options] [<output file>]\n" + usage_info) % locals())
        parser.add_option("-s",
                          "--sandbox",
                          dest="sandbox_path",
                          help="Parent directory to build and run tests in",
                          type=str,
                          default=None,
                          metavar="PATH")

        group = OptionGroup(parser, "Test Options")
        group.add_option("",
                         "--no-timestamp",
                         dest="timestamp_build",
                         help="Don't timestamp build directory (for testing)",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--cc",
                         dest="cc",
                         type='str',
                         help="Path to the compiler under test",
                         action="store",
                         default=None)
        group.add_option("",
                         "--cxx",
                         dest="cxx",
                         help="Path to the C++ compiler to test",
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--ld",
            dest="ld",
            help="Path to the c linker to use. (Xcode Distinction)",
            type=str,
            default=None)
        group.add_option(
            "",
            "--ldxx",
            dest="ldxx",
            help="Path to the cxx linker to use. (Xcode Distinction)",
            type=str,
            default=None)
        group.add_option("",
                         "--test-externals",
                         dest="test_suite_externals",
                         help="Path to the LLVM test-suite externals",
                         type=str,
                         default=None,
                         metavar="PATH")
        group.add_option("",
                         "--machine-param",
                         dest="machine_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the machine parameters",
                         type=str,
                         action="append",
                         default=[])
        group.add_option("",
                         "--run-param",
                         dest="run_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the run parameters",
                         type=str,
                         action="append",
                         default=[])
        group.add_option("",
                         "--run-order",
                         dest="run_order",
                         metavar="STR",
                         help="String to use to identify and order this run",
                         action="store",
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--test-subdir",
            dest="test_subdir",
            help="Subdirectory of test external dir to look for tests in.",
            type=str,
            default="lnt-compile-suite-src")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Selection")
        group.add_option("",
                         "--no-memory-profiling",
                         dest="memory_profiling",
                         help="Disable memory profiling",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--multisample",
                         dest="run_count",
                         metavar="N",
                         help="Accumulate test data from multiple runs",
                         action="store",
                         type=int,
                         default=3)
        group.add_option("",
                         "--min-sample-time",
                         dest="min_sample_time",
                         help="Ensure all tests run for at least N seconds",
                         metavar="N",
                         action="store",
                         type=float,
                         default=.5)
        group.add_option("",
                         "--save-temps",
                         dest="save_temps",
                         help="Save temporary build output files",
                         action="store_true",
                         default=False)
        group.add_option(
            "",
            "--show-tests",
            dest="show_tests",
            help="Only list the availables tests that will be run",
            action="store_true",
            default=False)
        group.add_option("",
                         "--test",
                         dest="tests",
                         metavar="NAME",
                         help="Individual test to run",
                         action="append",
                         default=[])
        group.add_option("",
                         "--test-filter",
                         dest="test_filters",
                         help="Run tests matching the given pattern",
                         metavar="REGEXP",
                         action="append",
                         default=[])
        group.add_option("",
                         "--flags-to-test",
                         dest="flags_to_test",
                         help="Add a set of flags to test (space separated)",
                         metavar="FLAGLIST",
                         action="append",
                         default=[])
        group.add_option("",
                         "--jobs-to-test",
                         dest="jobs_to_test",
                         help="Add a job count to test (full builds)",
                         metavar="NUM",
                         action="append",
                         default=[],
                         type=int)
        group.add_option("",
                         "--config-to-test",
                         dest="configs_to_test",
                         help="Add build configuration to test (full builds)",
                         metavar="NAME",
                         action="append",
                         default=[],
                         choices=('Debug', 'Release'))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("",
                         "--no-machdep-info",
                         dest="use_machdep_info",
                         help=("Don't put machine (instance) dependent "
                               "variables in machine info"),
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--machine-name",
                         dest="machine_name",
                         type='str',
                         help="Machine name to use in submission [%default]",
                         action="store",
                         default=platform.uname()[1])
        group.add_option(
            "",
            "--submit",
            dest="submit_url",
            metavar="URLORPATH",
            help=("autosubmit the test result to the given server "
                  "(or local instance) [%default]"),
            type=str,
            default=None)
        group.add_option(
            "",
            "--commit",
            dest="commit",
            help=("whether the autosubmit result should be committed "
                  "[%default]"),
            type=int,
            default=True)
        group.add_option(
            "",
            "--output",
            dest="output",
            metavar="PATH",
            help="write raw report data to PATH (or stdout if '-')",
            action="store",
            default=None)
        group.add_option("-v",
                         "--verbose",
                         dest="verbose",
                         help="show verbose test results",
                         action="store_true",
                         default=False)

        parser.add_option_group(group)

        opts, args = parser.parse_args(args)

        if len(args) != 0:
            parser.error("invalid number of arguments")

        if opts.cc is None:
            parser.error("You must specify a --cc argument.")

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx, ))

        # Validate options.
        if opts.cc is None:
            parser.error('--cc is required')
        if opts.cxx is None:
            parser.error('--cxx is required (and could not be inferred)')
        if opts.sandbox_path is None:
            parser.error('--sandbox is required')
        if opts.test_suite_externals is None:
            parser.error("--test-externals option is required")

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            parser.error("unable to determine absolute path for --cc: %r" %
                         (opts.cc, ))
        if not os.path.exists(cxx_abs):
            parser.error("unable to determine absolute path for --cc: %r" %
                         (opts.cc, ))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >> sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            report_name = "test-%s" % (timestamp().replace(' ', '_').replace(
                ':', '-'))
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError, e:
            if e.errno == errno.EEXIST:
                parser.error("sandbox output directory %r already exists!" %
                             (g_output_dir, ))
            else:
                raise
示例#41
0
def action_send_daily_report(name, args):
    """send a daily report email"""
    import datetime
    import email.mime.multipart
    import email.mime.text
    import smtplib

    import lnt.server.reporting.dailyreport

    parser = OptionParser("%s [options] <instance path> <address>" % (name, ))
    parser.add_option("",
                      "--database",
                      dest="database",
                      default="default",
                      help="database to use [%default]")
    parser.add_option("",
                      "--testsuite",
                      dest="testsuite",
                      default="nts",
                      help="testsuite to use [%default]")
    parser.add_option("",
                      "--host",
                      dest="host",
                      default="localhost",
                      help="email relay host to use [%default]")
    parser.add_option("",
                      "--from",
                      dest="from_address",
                      default=None,
                      help="from email address (required)")
    parser.add_option(
        "",
        "--today",
        dest="today",
        action="store_true",
        help="send the report for today (instead of most recent)")
    parser.add_option("",
                      "--subject-prefix",
                      dest="subject_prefix",
                      help="add a subject prefix")
    parser.add_option("-n",
                      "--dry-run",
                      dest="dry_run",
                      default=False,
                      action="store_true",
                      help="Don't actually send email."
                      " Used for testing.")
    parser.add_option("",
                      "--days",
                      dest="days",
                      default=3,
                      type="int",
                      help="Number of days to show in report.")
    parser.add_option("",
                      "--filter-machine-regex",
                      dest="filter_machine_regex",
                      default=None,
                      help="only show machines that contain the regex.")

    (opts, args) = parser.parse_args(args)

    if len(args) != 2:
        parser.error("invalid number of arguments")
    if opts.from_address is None:
        parser.error("--from argument is required")

    path, to_address = args

    # Load the LNT instance.
    instance = lnt.server.instance.Instance.frompath(path)
    config = instance.config

    # Get the database.
    with contextlib.closing(config.get_database(opts.database)) as db:

        # Get the testsuite.
        ts = db.testsuite[opts.testsuite]

        if opts.today:
            date = datetime.datetime.utcnow()
        else:
            # Get a timestamp to use to derive the daily report to generate.
            latest = ts.query(ts.Run).\
                order_by(ts.Run.start_time.desc()).limit(1).first()

            # If we found a run, use it's start time (rounded up to the next
            # hour, so we make sure it gets included).
            if latest:
                date = latest.start_time + datetime.timedelta(hours=1)
            else:
                # Otherwise, just use now.
                date = datetime.datetime.utcnow()

        # Generate the daily report.
        note("building report data...")
        report = lnt.server.reporting.dailyreport.DailyReport(
            ts,
            year=date.year,
            month=date.month,
            day=date.day,
            day_start_offset_hours=date.hour,
            for_mail=True,
            num_prior_days_to_include=opts.days,
            filter_machine_regex=opts.filter_machine_regex)
        report.build()

        note("generating HTML report...")
        ts_url = "%s/db_%s/v4/%s" \
            % (config.zorgURL, opts.database, opts.testsuite)
        subject = "Daily Report: %04d-%02d-%02d" % (report.year, report.month,
                                                    report.day)
        html_report = report.render(ts_url, only_html_body=False)

        if opts.subject_prefix is not None:
            subject = "%s %s" % (opts.subject_prefix, subject)

        # Form the multipart email message.
        msg = email.mime.multipart.MIMEMultipart('alternative')
        msg['Subject'] = subject
        msg['From'] = opts.from_address
        msg['To'] = to_address
        msg.attach(email.mime.text.MIMEText(html_report, "html"))

        # Send the report.
        if not opts.dry_run:
            s = smtplib.SMTP(opts.host)
            s.sendmail(opts.from_address, [to_address], msg.as_string())
            s.quit()
示例#42
0
 def _check_call(self, *args, **kwargs):
     if self.opts.verbose:
         note('Execute: %s' % ' '.join(args[0]))
         if 'cwd' in kwargs:
             note('          (In %s)' % kwargs['cwd'])
     return subprocess.check_call(*args, **kwargs)
示例#43
0
文件: main.py 项目: efcs/lnt
def action_send_daily_report(name, args):
    """send a daily report email"""
    import datetime
    import email.mime.multipart
    import email.mime.text
    import smtplib

    import lnt.server.reporting.dailyreport

    parser = OptionParser("%s [options] <instance path> <address>" % (
            name,))
    parser.add_option("", "--database", dest="database", default="default",
                      help="database to use [%default]")
    parser.add_option("", "--testsuite", dest="testsuite", default="nts",
                      help="testsuite to use [%default]")
    parser.add_option("", "--host", dest="host", default="localhost",
                      help="email relay host to use [%default]")
    parser.add_option("", "--from", dest="from_address", default=None,
                      help="from email address (required)")
    parser.add_option("", "--today", dest="today", action="store_true",
                      help="send the report for today (instead of most recent)")
    parser.add_option("", "--subject-prefix", dest="subject_prefix",
                      help="add a subject prefix")
    parser.add_option("-n", "--dry-run", dest="dry_run", default=False,
                      action="store_true", help="Don't actually send email."
                      " Used for testing.")
 
    (opts, args) = parser.parse_args(args)

    if len(args) != 2:
        parser.error("invalid number of arguments")
    if opts.from_address is None:
        parser.error("--from argument is required")

    path, to_address = args

    # Load the LNT instance.
    instance = lnt.server.instance.Instance.frompath(path)
    config = instance.config

    # Get the database.
    db = config.get_database(opts.database)

    # Get the testsuite.
    ts = db.testsuite[opts.testsuite]

    if opts.today:
        date = datetime.datetime.utcnow()
    else:
        # Get a timestamp to use to derive the daily report to generate.
        latest = ts.query(ts.Run).\
            order_by(ts.Run.start_time.desc()).limit(1).first()

        # If we found a run, use it's start time (rounded up to the next hour,
        # so we make sure it gets included).
        if latest:
            date = latest.start_time + datetime.timedelta(hours=1)
        else:
            # Otherwise, just use now.
            date = datetime.datetime.utcnow()

    # Generate the daily report.
    note("building report data...")
    report = lnt.server.reporting.dailyreport.DailyReport(
        ts, year=date.year, month=date.month, day=date.day,
        day_start_offset_hours=date.hour, for_mail=True)
    report.build()

    note("generating HTML report...")
    ts_url = "%s/db_%s/v4/%s" % (config.zorgURL, opts.database, opts.testsuite)
    subject = "Daily Report: %04d-%02d-%02d" % (
        report.year, report.month, report.day)
    html_report = report.render(ts_url, only_html_body=False)

    if opts.subject_prefix is not None:
        subject = "%s %s" % (opts.subject_prefix, subject)

    # Form the multipart email message.
    msg = email.mime.multipart.MIMEMultipart('alternative')
    msg['Subject'] = subject
    msg['From'] = opts.from_address
    msg['To'] = to_address
    msg.attach(email.mime.text.MIMEText(html_report, "html"))

    # Send the report.
    if not opts.dry_run:
        s = smtplib.SMTP(opts.host)
        s.sendmail(opts.from_address, [to_address],
                   msg.as_string())
        s.quit()
示例#44
0
def import_and_report(config, db_name, db, file, format, commit=False,
                      show_sample_count=False, disable_email=False,
                      disable_report=False):
    """
    import_and_report(config, db_name, db, file, format,
                      [commit], [show_sample_count],
                      [disable_email]) -> ... object ...

    Import a test data file into an LNT server and generate a test report. On
    success, run is the newly imported run. Note that success is uneffected by
    the value of commit, this merely changes whether the run (on success) is
    committed to the database.

    The result object is a dictionary containing information on the imported run
    and its comparison to the previous run.
    """
    numMachines = db.getNumMachines()
    numRuns = db.getNumRuns()
    numTests = db.getNumTests()

    # If the database gets fragmented, count(*) in SQLite can get really slow!?!
    if show_sample_count:
        numSamples = db.getNumSamples()

    result = {}
    result['success'] = False
    result['error'] = None
    result['import_file'] = file

    startTime = time.time()
    try:
        data = lnt.formats.read_any(file, format)
    except KeyboardInterrupt:
        raise
    except:
        import traceback
        result['error'] = "load failure: %s" % traceback.format_exc()
        return result

    result['load_time'] = time.time() - startTime

    # Auto-upgrade the data, if necessary.
    lnt.testing.upgrade_report(data)

    # Find the database config, if we have a configuration object.
    if config:
        db_config = config.databases[db_name]
    else:
        db_config = None

    # Find the email address for this machine's results.
    toAddress = email_config = None
    if db_config and not disable_email:
        email_config = db_config.email_config
        if email_config.enabled:
            # Find the machine name.
            machineName = str(data.get('Machine',{}).get('Name'))
            toAddress = email_config.get_to_address(machineName)
            if toAddress is None:
                result['error'] = ("unable to match machine name "
                                   "for test results email address!")
                return result

    importStartTime = time.time()
    try:
        success, run = db.importDataFromDict(data, commit, config=db_config)
    except KeyboardInterrupt:
        raise
    except:
        raise
        import traceback
        result['error'] = "import failure: %s" % traceback.format_exc()
        return result

    # If the import succeeded, save the import path.
    run.imported_from = file

    result['import_time'] = time.time() - importStartTime
    if not success:
        # Record the original run this is a duplicate of.
        result['original_run'] = run.id

    reportStartTime = time.time()
    result['report_to_address'] = toAddress
    if config:
        report_url = "%s/db_%s/" % (config.zorgURL, db_name)
    else:
        report_url = "localhost"

    if not disable_report:
        #  This has the side effect of building the run report for
        #  this result.
        NTEmailReport.emailReport(result, db, run, report_url,
                                  email_config, toAddress, success, commit)

    result['added_machines'] = db.getNumMachines() - numMachines
    result['added_runs'] = db.getNumRuns() - numRuns
    result['added_tests'] = db.getNumTests() - numTests
    if show_sample_count:
        result['added_samples'] = db.getNumSamples() - numSamples

    result['committed'] = commit
    result['run_id'] = run.id
    ts_name = data['Run']['Info'].get('tag')
    if commit:
        db.commit()
        if db_config:
            #  If we are not in a dummy instance, also run backgound jobs.
            #  We have to have a commit before we run, so subprocesses can
            #  see the submitted data.
            ts = db.testsuite.get(ts_name)
            async_ops.async_fieldchange_calc(db_name, ts, run)

    else:
        db.rollback()
    # Add a handy relative link to the submitted run.

    result['result_url'] = "db_{}/v4/{}/{}".format(db_name, ts_name, run.id)
    result['report_time'] = time.time() - importStartTime
    result['total_time'] = time.time() - startTime
    note("Successfully created {}".format(result['result_url']))
    # If this database has a shadow import configured, import the run into that
    # database as well.
    if config and config.databases[db_name].shadow_import:
        # Load the shadow database to import into.
        db_config = config.databases[db_name]
        shadow_name = db_config.shadow_import
        with closing(config.get_database(shadow_name)) as shadow_db:
            if shadow_db is None:
                raise ValueError, ("invalid configuration, shadow import "
                                   "database %r does not exist") % shadow_name

            # Perform the shadow import.
            shadow_result = import_and_report(config, shadow_name,
                                              shadow_db, file, format, commit,
                                              show_sample_count, disable_email,
                                              disable_report)

            # Append the shadow result to the result.
            result['shadow_result'] = shadow_result

    result['success'] = True
    return result
示例#45
0
def cleanup():
    note("Running process cleanup.")
    for p in JOBS:
        note("Waiting for %s %s" % (p.name, p.pid))
        if p.is_alive:
            p.join()
示例#46
0
    def run_test(self, name, args):
        # FIXME: Add more detailed usage information
        parser = OptionParser("%s [options] test-suite" % name)

        group = OptionGroup(parser, "Sandbox options")
        group.add_option("-S",
                         "--sandbox",
                         dest="sandbox_path",
                         help="Parent directory to build and run tests in",
                         type=str,
                         default=None,
                         metavar="PATH")
        group.add_option("",
                         "--no-timestamp",
                         dest="timestamp_build",
                         action="store_false",
                         default=True,
                         help="Don't timestamp build directory (for testing)")
        group.add_option("",
                         "--no-configure",
                         dest="run_configure",
                         action="store_false",
                         default=True,
                         help="Don't run CMake if CMakeCache.txt is present"
                         " (only useful with --no-timestamp")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Inputs")
        group.add_option("",
                         "--test-suite",
                         dest="test_suite_root",
                         type=str,
                         metavar="PATH",
                         default=None,
                         help="Path to the LLVM test-suite sources")
        group.add_option("",
                         "--test-externals",
                         dest="test_suite_externals",
                         type=str,
                         metavar="PATH",
                         help="Path to the LLVM test-suite externals")
        group.add_option(
            "",
            "--cmake-define",
            dest="cmake_defines",
            action="append",
            help=("Defines to pass to cmake. These do not require the "
                  "-D prefix and can be given multiple times. e.g.: "
                  "--cmake-define A=B => -DA=B"))
        group.add_option(
            "-C",
            "--cmake-cache",
            dest="cmake_cache",
            help=("Use one of the test-suite's cmake configurations."
                  " Ex: Release, Debug"))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test compiler")
        group.add_option("",
                         "--cc",
                         dest="cc",
                         metavar="CC",
                         type=str,
                         default=None,
                         help="Path to the C compiler to test")
        group.add_option("",
                         "--cxx",
                         dest="cxx",
                         metavar="CXX",
                         type=str,
                         default=None,
                         help="Path to the C++ compiler to test (inferred from"
                         " --cc where possible")
        group.add_option("",
                         "--llvm-arch",
                         dest="llvm_arch",
                         type='choice',
                         default=None,
                         help="Override the CMake-inferred architecture",
                         choices=TEST_SUITE_KNOWN_ARCHITECTURES)
        group.add_option("",
                         "--cross-compiling",
                         dest="cross_compiling",
                         action="store_true",
                         default=False,
                         help="Inform CMake that it should be cross-compiling")
        group.add_option("",
                         "--cross-compiling-system-name",
                         type=str,
                         default=None,
                         dest="cross_compiling_system_name",
                         help="The parameter to pass to CMAKE_SYSTEM_NAME when"
                         " cross-compiling. By default this is 'Linux' "
                         "unless -arch is in the cflags, in which case "
                         "it is 'Darwin'")
        group.add_option(
            "",
            "--cppflags",
            type=str,
            action="append",
            dest="cppflags",
            default=[],
            help="Extra flags to pass the compiler in C or C++ mode. "
            "Can be given multiple times")
        group.add_option("",
                         "--cflags",
                         type=str,
                         action="append",
                         dest="cflags",
                         default=[],
                         help="Extra CFLAGS to pass to the compiler. Can be "
                         "given multiple times")
        group.add_option("",
                         "--cxxflags",
                         type=str,
                         action="append",
                         dest="cxxflags",
                         default=[],
                         help="Extra CXXFLAGS to pass to the compiler. Can be "
                         "given multiple times")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test selection")
        group.add_option("",
                         "--test-size",
                         type='choice',
                         dest="test_size",
                         choices=['small', 'regular', 'large'],
                         default='regular',
                         help="The size of test inputs to use")
        group.add_option("",
                         "--benchmarking-only",
                         dest="benchmarking_only",
                         action="store_true",
                         default=False,
                         help="Benchmarking-only mode. Disable unit tests and "
                         "other flaky or short-running tests")
        group.add_option("",
                         "--only-test",
                         dest="only_test",
                         metavar="PATH",
                         type=str,
                         default=None,
                         help="Only run tests under PATH")

        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Execution")
        group.add_option("-j",
                         "--threads",
                         dest="threads",
                         help="Number of testing (and optionally build) "
                         "threads",
                         type=int,
                         default=1,
                         metavar="N")
        group.add_option("",
                         "--build-threads",
                         dest="build_threads",
                         help="Number of compilation threads, defaults to "
                         "--threads",
                         type=int,
                         default=0,
                         metavar="N")
        group.add_option(
            "",
            "--use-perf",
            dest="use_perf",
            help=("Use Linux perf for high accuracy timing, profile "
                  "information or both"),
            type='choice',
            choices=['none', 'time', 'profile', 'all'],
            default='none')
        group.add_option("",
                         "--run-under",
                         dest="run_under",
                         help="Wrapper to run tests under ['%default']",
                         type=str,
                         default="")
        group.add_option(
            "",
            "--exec-multisample",
            dest="exec_multisample",
            help="Accumulate execution test data from multiple runs",
            type=int,
            default=1,
            metavar="N")
        group.add_option(
            "",
            "--compile-multisample",
            dest="compile_multisample",
            help="Accumulate compile test data from multiple runs",
            type=int,
            default=1,
            metavar="N")
        group.add_option(
            "-d",
            "--diagnose",
            dest="diagnose",
            help="Produce a diagnostic report for a particular "
            "test, this will not run all the tests.  Must be"
            " used in conjunction with --only-test.",
            action="store_true",
            default=False,
        )

        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("",
                         "--no-auto-name",
                         dest="auto_name",
                         help="Don't automatically derive submission name",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--run-order",
                         dest="run_order",
                         metavar="STR",
                         help="String to use to identify and order this run",
                         action="store",
                         type=str,
                         default=None)
        group.add_option("",
                         "--submit",
                         dest="submit_url",
                         metavar="URLORPATH",
                         help=("autosubmit the test result to the given server"
                               " (or local instance) [%default]"),
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--commit",
            dest="commit",
            help=("whether the autosubmit result should be committed "
                  "[%default]"),
            type=int,
            default=True)
        group.add_option("-v",
                         "--verbose",
                         dest="verbose",
                         help="show verbose test results",
                         action="store_true",
                         default=False)
        group.add_option("",
                         "--succinct-compile-output",
                         help="run Make without VERBOSE=1",
                         action="store_true",
                         dest="succinct")
        group.add_option("",
                         "--exclude-stat-from-submission",
                         dest="exclude_stat_from_submission",
                         help="Do not submit the stat of this type [%default]",
                         action='append',
                         choices=KNOWN_SAMPLE_KEYS,
                         type='choice',
                         default=[])
        group.add_option("",
                         "--single-result",
                         dest="single_result",
                         help=("only execute this single test and apply "
                               "--single-result-predicate to calculate the "
                               "exit status"))
        group.add_option("",
                         "--single-result-predicate",
                         dest="single_result_predicate",
                         help=("the predicate to apply to calculate the exit "
                               "status (with --single-result)"),
                         default="status")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test tools")
        group.add_option("",
                         "--use-cmake",
                         dest="cmake",
                         metavar="PATH",
                         type=str,
                         default="cmake",
                         help="Path to CMake [cmake]")
        group.add_option("",
                         "--use-make",
                         dest="make",
                         metavar="PATH",
                         type=str,
                         default="make",
                         help="Path to Make [make]")
        group.add_option("",
                         "--use-lit",
                         dest="lit",
                         metavar="PATH",
                         type=str,
                         default="llvm-lit",
                         help="Path to the LIT test runner [llvm-lit]")

        (opts, args) = parser.parse_args(args)
        self.opts = opts

        if len(args) == 0:
            self.nick = platform.uname()[1]
        elif len(args) == 1:
            self.nick = args[0]
        else:
            parser.error("Expected no positional arguments (got: %r)" %
                         (args, ))

        for a in [
                'cross_compiling', 'cross_compiling_system_name', 'llvm_arch'
        ]:
            if getattr(opts, a):
                parser.error('option "%s" is not yet implemented!' % a)

        if self.opts.sandbox_path is None:
            parser.error('--sandbox is required')

        # Option validation.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # If there was no --cxx given, attempt to infer it from the --cc.
        if opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("Inferred C++ compiler under test as: %r" % (opts.cxx, ))
            else:
                parser.error("unable to infer --cxx - set it manually.")
        else:
            opts.cxx = resolve_command_path(opts.cxx)

        if not os.path.exists(opts.cxx):
            parser.error("invalid --cxx argument %r, does not exist" %
                         (opts.cxx))

        if opts.test_suite_root is None:
            parser.error('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            parser.error("invalid --test-suite argument, does not exist: %r" %
                         (opts.test_suite_root))

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                parser.error(
                    "invalid --test-externals argument, does not exist: %r" %
                    (opts.test_suite_externals, ))

        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            parser.error("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            parser.error("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            parser.error("LIT tool not found (looked for %s)" % opts.lit)
        if opts.run_under:
            split = shlex.split(opts.run_under)
            split[0] = resolve_command_path(split[0])
            if not isexecfile(split[0]):
                parser.error("Run under wrapper not found (looked for %s)" %
                             opts.run_under)

        if opts.single_result:
            # --single-result implies --only-test
            opts.only_test = opts.single_result

        if opts.only_test:
            # --only-test can either point to a particular test or a directory.
            # Therefore, test_suite_root + opts.only_test or
            # test_suite_root + dirname(opts.only_test) must be a directory.
            path = os.path.join(self.opts.test_suite_root, opts.only_test)
            parent_path = os.path.dirname(path)

            if os.path.isdir(path):
                opts.only_test = (opts.only_test, None)
            elif os.path.isdir(parent_path):
                opts.only_test = (os.path.dirname(opts.only_test),
                                  os.path.basename(opts.only_test))
            else:
                parser.error(
                    "--only-test argument not understood (must be a " +
                    " test or directory name)")

        if opts.single_result and not opts.only_test[1]:
            parser.error(
                "--single-result must be given a single test name, not a " +
                "directory name")

        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)

        if opts.diagnose:
            if not opts.only_test:
                parser.error("--diagnose requires --only-test")

        self.start_time = timestamp()

        # Work out where to put our build stuff
        if self.opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(self.opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            note("Increasing number of execution samples to %d" %
                 opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        if opts.auto_name:
            # Construct the nickname from a few key parameters.
            cc_info = self._get_cc_info()
            cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
            self.nick += "__%s__%s" % (cc_nick,
                                       cc_info['cc_target'].split('-')[0])
        note('Using nickname: %r' % self.nick)

        #  If we are doing diagnostics, skip the usual run and do them now.
        if opts.diagnose:
            return self.diagnose()
        # Now do the actual run.
        reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            reports.append(self.run(self.nick, compile=c, test=e))

        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        return self.submit(report_path, self.opts, commit=True)
示例#47
0
def import_and_report(config,
                      db_name,
                      db,
                      file,
                      format,
                      commit=False,
                      show_sample_count=False,
                      disable_email=False,
                      disable_report=False):
    """
    import_and_report(config, db_name, db, file, format,
                      [commit], [show_sample_count],
                      [disable_email]) -> ... object ...

    Import a test data file into an LNT server and generate a test report. On
    success, run is the newly imported run. Note that success is uneffected by
    the value of commit, this merely changes whether the run (on success) is
    committed to the database.

    The result object is a dictionary containing information on the imported run
    and its comparison to the previous run.
    """
    numMachines = db.getNumMachines()
    numRuns = db.getNumRuns()
    numTests = db.getNumTests()

    # If the database gets fragmented, count(*) in SQLite can get really slow!?!
    if show_sample_count:
        numSamples = db.getNumSamples()

    result = {}
    result['success'] = False
    result['error'] = None
    result['import_file'] = file

    startTime = time.time()
    try:
        data = lnt.formats.read_any(file, format)
    except KeyboardInterrupt:
        raise
    except:
        import traceback
        result['error'] = "load failure: %s" % traceback.format_exc()
        return result

    result['load_time'] = time.time() - startTime

    # Auto-upgrade the data, if necessary.
    lnt.testing.upgrade_report(data)

    # Find the database config, if we have a configuration object.
    if config:
        db_config = config.databases[db_name]
    else:
        db_config = None

    # Find the email address for this machine's results.
    toAddress = email_config = None
    if db_config and not disable_email:
        email_config = db_config.email_config
        if email_config.enabled:
            # Find the machine name.
            machineName = str(data.get('Machine', {}).get('Name'))
            toAddress = email_config.get_to_address(machineName)
            if toAddress is None:
                result['error'] = ("unable to match machine name "
                                   "for test results email address!")
                return result

    importStartTime = time.time()
    try:
        success, run = db.importDataFromDict(data, commit, config=db_config)
    except KeyboardInterrupt:
        raise
    except:
        raise
        import traceback
        result['error'] = "import failure: %s" % traceback.format_exc()
        return result

    # If the import succeeded, save the import path.
    run.imported_from = file

    result['import_time'] = time.time() - importStartTime
    if not success:
        # Record the original run this is a duplicate of.
        result['original_run'] = run.id

    reportStartTime = time.time()
    result['report_to_address'] = toAddress
    if config:
        report_url = "%s/db_%s/" % (config.zorgURL, db_name)
    else:
        report_url = "localhost"

    if not disable_report:
        #  This has the side effect of building the run report for
        #  this result.
        NTEmailReport.emailReport(result, db, run, report_url, email_config,
                                  toAddress, success, commit)

    result['added_machines'] = db.getNumMachines() - numMachines
    result['added_runs'] = db.getNumRuns() - numRuns
    result['added_tests'] = db.getNumTests() - numTests
    if show_sample_count:
        result['added_samples'] = db.getNumSamples() - numSamples

    result['committed'] = commit
    result['run_id'] = run.id
    ts_name = data['Run']['Info'].get('tag')
    if commit:
        db.commit()
        if db_config:
            #  If we are not in a dummy instance, also run backgound jobs.
            #  We have to have a commit before we run, so subprocesses can
            #  see the submitted data.
            ts = db.testsuite.get(ts_name)
            async_ops.async_fieldchange_calc(db_name, ts, run)

    else:
        db.rollback()
    # Add a handy relative link to the submitted run.

    result['result_url'] = "db_{}/v4/{}/{}".format(db_name, ts_name, run.id)
    result['report_time'] = time.time() - importStartTime
    result['total_time'] = time.time() - startTime
    note("Successfully created {}".format(result['result_url']))
    # If this database has a shadow import configured, import the run into that
    # database as well.
    if config and config.databases[db_name].shadow_import:
        # Load the shadow database to import into.
        db_config = config.databases[db_name]
        shadow_name = db_config.shadow_import
        with closing(config.get_database(shadow_name)) as shadow_db:
            if shadow_db is None:
                raise ValueError, ("invalid configuration, shadow import "
                                   "database %r does not exist") % shadow_name

            # Perform the shadow import.
            shadow_result = import_and_report(config, shadow_name, shadow_db,
                                              file, format, commit,
                                              show_sample_count, disable_email,
                                              disable_report)

            # Append the shadow result to the result.
            result['shadow_result'] = shadow_result

    result['success'] = True
    return result
示例#48
0
def action_view_comparison(name, args):
    """view a report comparison using a temporary server"""

    import lnt.server.instance
    import lnt.server.ui.app
    import lnt.server.db.migrate

    parser = OptionParser("%s [options] <report A> <report B>" % (name,))
    parser.add_option("", "--hostname", dest="hostname", type=str,
                      help="host interface to use [%default]",
                      default='localhost')
    parser.add_option("", "--port", dest="port", type=int, metavar="N",
                      help="local port to use [%default]", default=8000)
    (opts, args) = parser.parse_args(args)

    if len(args) != 2:
        parser.error("invalid number of arguments")

    report_a_path,report_b_path = args

    # Set up the default logger.
    logger = logging.getLogger("lnt")
    logger.setLevel(logging.ERROR)
    handler = logging.StreamHandler(sys.stderr)
    handler.setFormatter(logging.Formatter(
            '%(asctime)s %(levelname)s: %(message)s',
            datefmt='%Y-%m-%d %H:%M:%S'))
    logger.addHandler(handler)

    # Create a temporary directory to hold the instance.
    tmpdir = tempfile.mkdtemp(suffix='lnt')

    try:
        # Create a temporary instance.
        url = 'http://%s:%d' % (opts.hostname, opts.port)
        db_path = os.path.join(tmpdir, 'data.db')
        db_info = lnt.server.config.DBInfo(
            'sqlite:///%s' % (db_path,), '0.4', None,
            lnt.server.config.EmailConfig(False, '', '', []), "0")
        config = lnt.server.config.Config(
            'LNT', url, db_path, tmpdir,
            None, { 'default' : db_info })
        instance = lnt.server.instance.Instance(None, config)

        # Create the database.
        lnt.server.db.migrate.update_path(db_path)

        # Import the two reports.
        with contextlib.closing(config.get_database('default')) as db:
            result = lnt.util.ImportData.import_and_report(
                config, 'default', db, report_a_path,
                '<auto>', commit=True)
            result = lnt.util.ImportData.import_and_report(
                config, 'default', db, report_b_path,
                '<auto>', commit=True)

            # Dispatch another thread to start the webbrowser.
            comparison_url = '%s/v4/nts/2?compare_to=1' % (url,)
            note("opening comparison view: %s" % (comparison_url,))
            thread.start_new_thread(start_browser, (comparison_url, True))

            # Run the webserver.
            app = lnt.server.ui.app.App.create_with_instance(instance)
            app.debug = True
            app.run(opts.hostname, opts.port, use_reloader=False)
    finally:
        shutil.rmtree(tmpdir)
示例#49
0
    def run_test(self, name, args):
        global opts
        parser = OptionParser(
            ("%(name)s [options] [<output file>]\n" +
             usage_info) % locals())
        parser.add_option("-s", "--sandbox", dest="sandbox_path",
                          help="Parent directory to build and run tests in",
                          type=str, default=None, metavar="PATH")

        group = OptionGroup(parser, "Test Options")
        group.add_option("", "--no-timestamp", dest="timestamp_build",
                         help="Don't timestamp build directory (for testing)",
                         action="store_false", default=True)
        group.add_option("", "--cc", dest="cc", type='str',
                         help="Path to the compiler under test",
                         action="store", default=None)
        group.add_option("", "--cxx", dest="cxx",
                         help="Path to the C++ compiler to test",
                         type=str, default=None)
        group.add_option("", "--ld", dest="ld",
                         help="Path to the c linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--ldxx", dest="ldxx",
                         help="Path to the cxx linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--test-externals", dest="test_suite_externals",
                         help="Path to the LLVM test-suite externals",
                         type=str, default=None, metavar="PATH")
        group.add_option("", "--machine-param", dest="machine_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the machine parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-param", dest="run_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the run parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-order", dest="run_order", metavar="STR",
                         help="String to use to identify and order this run",
                         action="store", type=str, default=None)
        group.add_option("", "--test-subdir", dest="test_subdir",
                         help="Subdirectory of test external dir to look for tests in.",
                         type=str, default="lnt-compile-suite-src")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Selection")
        group.add_option("", "--no-memory-profiling", dest="memory_profiling",
                         help="Disable memory profiling",
                         action="store_false", default=True)
        group.add_option("", "--multisample", dest="run_count", metavar="N",
                         help="Accumulate test data from multiple runs",
                         action="store", type=int, default=3)
        group.add_option("", "--min-sample-time", dest="min_sample_time",
                         help="Ensure all tests run for at least N seconds",
                         metavar="N", action="store", type=float, default=.5)
        group.add_option("", "--save-temps", dest="save_temps",
                         help="Save temporary build output files",
                         action="store_true", default=False)
        group.add_option("", "--show-tests", dest="show_tests",
                         help="Only list the availables tests that will be run",
                         action="store_true", default=False)
        group.add_option("", "--test", dest="tests", metavar="NAME",
                         help="Individual test to run",
                         action="append", default=[])
        group.add_option("", "--test-filter", dest="test_filters",
                         help="Run tests matching the given pattern",
                         metavar="REGEXP", action="append", default=[])
        group.add_option("", "--flags-to-test", dest="flags_to_test",
                         help="Add a set of flags to test (space separated)",
                         metavar="FLAGLIST", action="append", default=[])
        group.add_option("", "--jobs-to-test", dest="jobs_to_test",
                         help="Add a job count to test (full builds)",
                         metavar="NUM", action="append", default=[], type=int)
        group.add_option("", "--config-to-test", dest="configs_to_test",
                         help="Add build configuration to test (full builds)",
                         metavar="NAME", action="append", default=[],
                         choices=('Debug', 'Release'))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("", "--no-machdep-info", dest="use_machdep_info",
                         help=("Don't put machine (instance) dependent "
                               "variables in machine info"),
                         action="store_false", default=True)
        group.add_option("", "--machine-name", dest="machine_name", type='str',
                         help="Machine name to use in submission [%default]",
                         action="store", default=platform.uname()[1])
        group.add_option("", "--submit", dest="submit_url", metavar="URLORPATH",
                         help=("autosubmit the test result to the given server "
                               "(or local instance) [%default]"),
                         type=str, default=None)
        group.add_option("", "--commit", dest="commit",
                         help=("whether the autosubmit result should be committed "
                               "[%default]"),
                         type=int, default=True)
        group.add_option("", "--output", dest="output", metavar="PATH",
                         help="write raw report data to PATH (or stdout if '-')",
                         action="store", default=None)
        group.add_option("-v", "--verbose", dest="verbose",
                         help="show verbose test results",
                         action="store_true", default=False)

        parser.add_option_group(group)

        opts, args = parser.parse_args(args)

        if len(args) != 0:
            parser.error("invalid number of arguments")

        if opts.cc is None:
            parser.error("You must specify a --cc argument.")

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx,))

        # Validate options.
        if opts.cc is None:
            parser.error('--cc is required')
        if opts.cxx is None:
            parser.error('--cxx is required (and could not be inferred)')
        if opts.sandbox_path is None:
            parser.error('--sandbox is required')
        if opts.test_suite_externals is None:
            parser.error("--test-externals option is required")

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                         opts.cc,))
        if not os.path.exists(cxx_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                         opts.cc,))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >>sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError(e):
            if e.errno == errno.EEXIST:
                parser.error("sandbox output directory %r already exists!" % (
                             g_output_dir,))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter("%(asctime)-7s: %(levelname)s: %(message)s",
                                      "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter("%(asctime)-7s: %(levelname)s: %(message)s",
                                      "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h
            l = logging.Logger('compile_test')
            l.setLevel(logging.INFO)
            l.addHandler(file_log_handler(os.path.join(output_dir, 'test.log')))
            l.addHandler(stderr_log_handler())
            return l
        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version', ('/usr/bin/as', '-v', '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            note("inferred run order to be: %r" % (variables['run_order'],))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            note(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [string.split(' ')
                             for string in opts.flags_to_test]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(get_tests(opts.test_suite_externals, opts.test_subdir,
                                   flags_to_test, jobs_to_test,
                                   configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >>sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >>sys.stderr, '  %s' % (name,)
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                    parser.error(("invalid test names %s, use --show-tests to "
                                  "see available tests") %
                                 (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [re.compile(pattern)
                            for pattern in opts.test_filters]

            # Form the list of tests.
            tests_to_run = [test
                            for test in all_tests
                            if (test[0] in requested_tests or
                                [True
                                 for filter in test_filters
                                 if filter.search(test[0])])]
        if not tests_to_run:
            parser.error(
                "no tests requested (invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples),)
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples),)
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' % (
                    num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(lnt.testing.TestSamples(
                                       test_name + '.status',
                                       [lnt.testing.FAIL]))
                if samples:
                    testsamples.append(lnt.testing.TestSamples(
                                       test_name, samples))
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts)

        return server_report
示例#50
0
 def _cp_artifacts(self, src, dest, patts):
     """Copy artifacts out of the build """
     for patt in patts:
         for file in glob.glob(src + patt):
             shutil.copy(file, dest)
             note(file + " --> " + dest)
示例#51
0
 def _cp_artifacts(self, src, dest, patts):
     """Copy artifacts out of the build """
     for patt in patts:
         for file in glob.glob(src + patt):
             shutil.copy(file, dest)
             note(file + " --> " + dest)
示例#52
0
class CompileTest(builtintest.BuiltinTest):
    def describe(self):
        return 'Single file compile-time performance testing'

    def run_test(self, name, args):
        global opts
        parser = OptionParser(
            ("%(name)s [options] [<output file>]\n" + usage_info) % locals())
        parser.add_option("-s",
                          "--sandbox",
                          dest="sandbox_path",
                          help="Parent directory to build and run tests in",
                          type=str,
                          default=None,
                          metavar="PATH")

        group = OptionGroup(parser, "Test Options")
        group.add_option("",
                         "--no-timestamp",
                         dest="timestamp_build",
                         help="Don't timestamp build directory (for testing)",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--cc",
                         dest="cc",
                         type='str',
                         help="Path to the compiler under test",
                         action="store",
                         default=None)
        group.add_option("",
                         "--cxx",
                         dest="cxx",
                         help="Path to the C++ compiler to test",
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--ld",
            dest="ld",
            help="Path to the c linker to use. (Xcode Distinction)",
            type=str,
            default=None)
        group.add_option(
            "",
            "--ldxx",
            dest="ldxx",
            help="Path to the cxx linker to use. (Xcode Distinction)",
            type=str,
            default=None)
        group.add_option("",
                         "--test-externals",
                         dest="test_suite_externals",
                         help="Path to the LLVM test-suite externals",
                         type=str,
                         default=None,
                         metavar="PATH")
        group.add_option("",
                         "--machine-param",
                         dest="machine_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the machine parameters",
                         type=str,
                         action="append",
                         default=[])
        group.add_option("",
                         "--run-param",
                         dest="run_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the run parameters",
                         type=str,
                         action="append",
                         default=[])
        group.add_option("",
                         "--run-order",
                         dest="run_order",
                         metavar="STR",
                         help="String to use to identify and order this run",
                         action="store",
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--test-subdir",
            dest="test_subdir",
            help="Subdirectory of test external dir to look for tests in.",
            type=str,
            default="lnt-compile-suite-src")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Selection")
        group.add_option("",
                         "--no-memory-profiling",
                         dest="memory_profiling",
                         help="Disable memory profiling",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--multisample",
                         dest="run_count",
                         metavar="N",
                         help="Accumulate test data from multiple runs",
                         action="store",
                         type=int,
                         default=3)
        group.add_option("",
                         "--min-sample-time",
                         dest="min_sample_time",
                         help="Ensure all tests run for at least N seconds",
                         metavar="N",
                         action="store",
                         type=float,
                         default=.5)
        group.add_option("",
                         "--save-temps",
                         dest="save_temps",
                         help="Save temporary build output files",
                         action="store_true",
                         default=False)
        group.add_option(
            "",
            "--show-tests",
            dest="show_tests",
            help="Only list the availables tests that will be run",
            action="store_true",
            default=False)
        group.add_option("",
                         "--test",
                         dest="tests",
                         metavar="NAME",
                         help="Individual test to run",
                         action="append",
                         default=[])
        group.add_option("",
                         "--test-filter",
                         dest="test_filters",
                         help="Run tests matching the given pattern",
                         metavar="REGEXP",
                         action="append",
                         default=[])
        group.add_option("",
                         "--flags-to-test",
                         dest="flags_to_test",
                         help="Add a set of flags to test (space separated)",
                         metavar="FLAGLIST",
                         action="append",
                         default=[])
        group.add_option("",
                         "--jobs-to-test",
                         dest="jobs_to_test",
                         help="Add a job count to test (full builds)",
                         metavar="NUM",
                         action="append",
                         default=[],
                         type=int)
        group.add_option("",
                         "--config-to-test",
                         dest="configs_to_test",
                         help="Add build configuration to test (full builds)",
                         metavar="NAME",
                         action="append",
                         default=[],
                         choices=('Debug', 'Release'))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("",
                         "--no-machdep-info",
                         dest="use_machdep_info",
                         help=("Don't put machine (instance) dependent "
                               "variables in machine info"),
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--machine-name",
                         dest="machine_name",
                         type='str',
                         help="Machine name to use in submission [%default]",
                         action="store",
                         default=platform.uname()[1])
        group.add_option(
            "",
            "--submit",
            dest="submit_url",
            metavar="URLORPATH",
            help=("autosubmit the test result to the given server "
                  "(or local instance) [%default]"),
            type=str,
            default=None)
        group.add_option(
            "",
            "--commit",
            dest="commit",
            help=("whether the autosubmit result should be committed "
                  "[%default]"),
            type=int,
            default=True)
        group.add_option(
            "",
            "--output",
            dest="output",
            metavar="PATH",
            help="write raw report data to PATH (or stdout if '-')",
            action="store",
            default=None)
        group.add_option("-v",
                         "--verbose",
                         dest="verbose",
                         help="show verbose test results",
                         action="store_true",
                         default=False)

        parser.add_option_group(group)

        opts, args = parser.parse_args(args)

        if len(args) != 0:
            parser.error("invalid number of arguments")

        if opts.cc is None:
            parser.error("You must specify a --cc argument.")

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx, ))

        # Validate options.
        if opts.cc is None:
            parser.error('--cc is required')
        if opts.cxx is None:
            parser.error('--cxx is required (and could not be inferred)')
        if opts.sandbox_path is None:
            parser.error('--sandbox is required')
        if opts.test_suite_externals is None:
            parser.error("--test-externals option is required")

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            parser.error("unable to determine absolute path for --cc: %r" %
                         (opts.cc, ))
        if not os.path.exists(cxx_abs):
            parser.error("unable to determine absolute path for --cc: %r" %
                         (opts.cc, ))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >> sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            report_name = "test-%s" % (timestamp().replace(' ', '_').replace(
                ':', '-'))
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError, e:
            if e.errno == errno.EEXIST:
                parser.error("sandbox output directory %r already exists!" %
                             (g_output_dir, ))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            l = logging.Logger('compile_test')
            l.setLevel(logging.INFO)
            l.addHandler(file_log_handler(os.path.join(output_dir,
                                                       'test.log')))
            l.addHandler(stderr_log_handler())
            return l

        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version', ('/usr/bin/as', '-v',
                                              '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            note("inferred run order to be: %r" % (variables['run_order'], ))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            note(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = [('-O0', ), (
                '-O0',
                '-g',
            ), ('-Os', '-g'), ('-O3', )]
        else:
            flags_to_test = [
                string.split(' ') for string in opts.flags_to_test
            ]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(
            get_tests(opts.test_suite_externals, opts.test_subdir,
                      flags_to_test, jobs_to_test, configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >> sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >> sys.stderr, '  %s' % (name, )
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                parser.error(("invalid test names %s, use --show-tests to "
                              "see available tests") %
                             (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [
                re.compile(pattern) for pattern in opts.test_filters
            ]

            # Form the list of tests.
            tests_to_run = [
                test for test in all_tests if
                (test[0] in requested_tests or
                 [True for filter in test_filters if filter.search(test[0])])
            ]
        if not tests_to_run:
            parser.error(
                "no tests requested (invalid --test or --test-filter options)!"
            )

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples), )
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples), )
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' %
                           (num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name + '.status',
                                                [lnt.testing.FAIL]))
                if samples:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name, samples))
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts)

        return server_report
示例#53
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagenose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        if not os.path.exists(path):
            mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + ['-DTEST_SUITE_DIAGNOSE=On',
                           '-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps']

        note(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        note(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, must be nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        note(" ".join(make_save_temps))
        out = subprocess.check_output(make_save_temps)
        note(out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"), report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = ["/*.o", "/*.time", "/*.cmake", "/*.make",
                       "/*.includecache", "/*.txt"]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        note("Report produced in: " + report_path)

        # Run through the rest of LNT, but don't allow this to be submitted
        # because there is no data.
        class DontSubmitResults(object):
            def get(self, url):
                return None

        return DontSubmitResults()
示例#54
0
    def _parse_lit_output(self, path, data, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str
        }
        
        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []
            
        for test_data in data['tests']:
            raw_name = test_data['name'].split(' :: ', 1)[1]
            name = 'nts.' + raw_name.rsplit('.test', 1)[0]
            is_pass = self._is_pass_code(test_data['code'])

            # If --single-result is given, exit based on --single-result-predicate
            if self.opts.single_result and \
               raw_name == self.opts.single_result+'.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k,v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k,v in test_data['metrics'].items():
                    if k == 'profile':
                        profiles_to_import.append( (name, v) )
                        continue
                    
                    if k not in LIT_METRIC_TO_LNT or LIT_METRIC_TO_LNT[k] in ignore:
                        continue
                    test_samples.append(
                        lnt.testing.TestSamples(name + '.' + LIT_METRIC_TO_LNT[k],
                                                [v],
                                                test_info,
                                                LIT_METRIC_CONV_FN[k]))

            if self._test_failed_to_compile(raw_name, path):
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL],
                                            test_info))

            elif not is_pass:
                test_samples.append(
                    lnt.testing.TestSamples(name + '.exec.status',
                                            [self._get_lnt_code(test_data['code'])],
                                            test_info))

        # Now import the profiles in parallel.
        if profiles_to_import:
            note('Importing %d profiles with %d threads...' %
                 (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend([sample
                                     for sample in samples
                                     if sample is not None])
            except multiprocessing.TimeoutError:
                warning('Profiles had not completed importing after %s seconds.'
                        % TIMEOUT)
                note('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {
            'tag': 'nts'
        }
        run_info.update(self._get_cc_info())
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order
        
        machine_info = {
        }
        
        machine = lnt.testing.Machine(self.nick, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
示例#55
0
    def run_test(self, name, args):
        # FIXME: Add more detailed usage information
        parser = OptionParser("%s [options] test-suite" % name)

        group = OptionGroup(parser, "Sandbox options")
        group.add_option("-S", "--sandbox", dest="sandbox_path",
                         help="Parent directory to build and run tests in",
                         type=str, default=None, metavar="PATH")
        group.add_option("", "--no-timestamp", dest="timestamp_build",
                         action="store_false", default=True,
                         help="Don't timestamp build directory (for testing)")
        group.add_option("", "--no-configure", dest="run_configure",
                         action="store_false", default=True,
                         help="Don't run CMake if CMakeCache.txt is present"
                              " (only useful with --no-timestamp")
        parser.add_option_group(group)
        
        group = OptionGroup(parser, "Inputs")
        group.add_option("", "--test-suite", dest="test_suite_root",
                         type=str, metavar="PATH", default=None,
                         help="Path to the LLVM test-suite sources")
        group.add_option("", "--test-externals", dest="test_suite_externals",
                         type=str, metavar="PATH",
                         help="Path to the LLVM test-suite externals")
        group.add_option("", "--cmake-define", dest="cmake_defines",
                         action="append",
                         help=("Defines to pass to cmake. These do not require the "
                               "-D prefix and can be given multiple times. e.g.: "
                               "--cmake-define A=B => -DA=B"))
        group.add_option("-C", "--cmake-cache", dest="cmake_cache",
                         help=("Use one of the test-suite's cmake configurations."
                               " Ex: Release, Debug"))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test compiler")
        group.add_option("", "--cc", dest="cc", metavar="CC",
                         type=str, default=None,
                         help="Path to the C compiler to test")
        group.add_option("", "--cxx", dest="cxx", metavar="CXX",
                         type=str, default=None,
                         help="Path to the C++ compiler to test (inferred from"
                              " --cc where possible")
        group.add_option("", "--llvm-arch", dest="llvm_arch",
                         type='choice', default=None,
                         help="Override the CMake-inferred architecture",
                         choices=TEST_SUITE_KNOWN_ARCHITECTURES)
        group.add_option("", "--cross-compiling", dest="cross_compiling",
                         action="store_true", default=False,
                         help="Inform CMake that it should be cross-compiling")
        group.add_option("", "--cross-compiling-system-name", type=str,
                         default=None, dest="cross_compiling_system_name",
                         help="The parameter to pass to CMAKE_SYSTEM_NAME when"
                              " cross-compiling. By default this is 'Linux' "
                              "unless -arch is in the cflags, in which case "
                              "it is 'Darwin'")
        group.add_option("", "--cppflags", type=str, action="append",
                         dest="cppflags", default=[],
                         help="Extra flags to pass the compiler in C or C++ mode. "
                              "Can be given multiple times")
        group.add_option("", "--cflags", type=str, action="append",
                         dest="cflags", default=[],
                         help="Extra CFLAGS to pass to the compiler. Can be "
                              "given multiple times")
        group.add_option("", "--cxxflags", type=str, action="append",
                         dest="cxxflags", default=[],
                         help="Extra CXXFLAGS to pass to the compiler. Can be "
                              "given multiple times")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test selection")
        group.add_option("", "--test-size", type='choice', dest="test_size",
                         choices=['small', 'regular', 'large'], default='regular',
                         help="The size of test inputs to use")
        group.add_option("", "--benchmarking-only",
                         dest="benchmarking_only", action="store_true",
                         default=False,
                         help="Benchmarking-only mode. Disable unit tests and "
                              "other flaky or short-running tests")
        group.add_option("", "--only-test", dest="only_test", metavar="PATH",
                         type=str, default=None,
                         help="Only run tests under PATH")

        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Execution")
        group.add_option("-j", "--threads", dest="threads",
                         help="Number of testing (and optionally build) "
                         "threads", type=int, default=1, metavar="N")
        group.add_option("", "--build-threads", dest="build_threads",
                         help="Number of compilation threads, defaults to "
                         "--threads", type=int, default=0, metavar="N")
        group.add_option("", "--use-perf", dest="use_perf",
                         help=("Use Linux perf for high accuracy timing, profile "
                               "information or both"),
                         type='choice',
                         choices=['none', 'time', 'profile', 'all'],
                         default='none')
        group.add_option("", "--run-under", dest="run_under",
                         help="Wrapper to run tests under ['%default']",
                         type=str, default="")
        group.add_option("", "--exec-multisample", dest="exec_multisample",
                         help="Accumulate execution test data from multiple runs",
                         type=int, default=1, metavar="N")
        group.add_option("", "--compile-multisample", dest="compile_multisample",
                         help="Accumulate compile test data from multiple runs",
                         type=int, default=1, metavar="N")
        group.add_option("-d", "--diagnose", dest="diagnose",
                         help="Produce a diagnostic report for a particular "
                              "test, this will not run all the tests.  Must be"
                              " used in conjunction with --only-test.",
                         action="store_true", default=False,)

        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("", "--no-auto-name", dest="auto_name",
                         help="Don't automatically derive submission name",
                         action="store_false", default=True)
        group.add_option("", "--run-order", dest="run_order", metavar="STR",
                         help="String to use to identify and order this run",
                         action="store", type=str, default=None)
        group.add_option("", "--submit", dest="submit_url", metavar="URLORPATH",
                         help=("autosubmit the test result to the given server"
                               " (or local instance) [%default]"),
                         type=str, default=None)
        group.add_option("", "--commit", dest="commit",
                         help=("whether the autosubmit result should be committed "
                                "[%default]"),
                          type=int, default=True)
        group.add_option("-v", "--verbose", dest="verbose",
                         help="show verbose test results",
                         action="store_true", default=False)
        group.add_option("", "--succinct-compile-output",
                         help="run Make without VERBOSE=1",
                         action="store_true", dest="succinct")
        group.add_option("", "--exclude-stat-from-submission",
                         dest="exclude_stat_from_submission",
                         help="Do not submit the stat of this type [%default]",
                         action='append', choices=KNOWN_SAMPLE_KEYS,
                         type='choice', default=[])
        group.add_option("", "--single-result", dest="single_result",
                         help=("only execute this single test and apply "
                               "--single-result-predicate to calculate the "
                               "exit status"))
        group.add_option("", "--single-result-predicate",
                         dest="single_result_predicate",
                         help=("the predicate to apply to calculate the exit "
                               "status (with --single-result)"),
                         default="status")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test tools")
        group.add_option("", "--use-cmake", dest="cmake", metavar="PATH",
                         type=str, default="cmake",
                         help="Path to CMake [cmake]")
        group.add_option("", "--use-make", dest="make", metavar="PATH",
                         type=str, default="make",
                         help="Path to Make [make]")
        group.add_option("", "--use-lit", dest="lit", metavar="PATH",
                         type=str, default="llvm-lit",
                         help="Path to the LIT test runner [llvm-lit]")


        (opts, args) = parser.parse_args(args)
        self.opts = opts

        if len(args) == 0:
            self.nick = platform.uname()[1]
        elif len(args) == 1:
            self.nick = args[0]
        else:
            parser.error("Expected no positional arguments (got: %r)" % (args,))

        for a in ['cross_compiling', 'cross_compiling_system_name', 'llvm_arch']:
            if getattr(opts, a):
                parser.error('option "%s" is not yet implemented!' % a)
            
        if self.opts.sandbox_path is None:
            parser.error('--sandbox is required')

        # Option validation.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # If there was no --cxx given, attempt to infer it from the --cc.
        if opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("Inferred C++ compiler under test as: %r" % (opts.cxx,))
            else:
                parser.error("unable to infer --cxx - set it manually.")
        else:
            opts.cxx = resolve_command_path(opts.cxx)
                
        if not os.path.exists(opts.cxx):
            parser.error("invalid --cxx argument %r, does not exist" % (opts.cxx))

        if opts.test_suite_root is None:
            parser.error('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            parser.error("invalid --test-suite argument, does not exist: %r" % (
                opts.test_suite_root))

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                parser.error(
                    "invalid --test-externals argument, does not exist: %r" % (
                        opts.test_suite_externals,))
                
        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            parser.error("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            parser.error("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            parser.error("LIT tool not found (looked for %s)" % opts.lit)
        if opts.run_under:
            split = shlex.split(opts.run_under)
            split[0] = resolve_command_path(split[0])
            if not isexecfile(split[0]):
                parser.error("Run under wrapper not found (looked for %s)" %
                             opts.run_under)

        if opts.single_result:
            # --single-result implies --only-test
            opts.only_test = opts.single_result
                
        if opts.only_test:
            # --only-test can either point to a particular test or a directory.
            # Therefore, test_suite_root + opts.only_test or
            # test_suite_root + dirname(opts.only_test) must be a directory.
            path = os.path.join(self.opts.test_suite_root, opts.only_test)
            parent_path = os.path.dirname(path)
            
            if os.path.isdir(path):
                opts.only_test = (opts.only_test, None)
            elif os.path.isdir(parent_path):
                opts.only_test = (os.path.dirname(opts.only_test),
                                  os.path.basename(opts.only_test))
            else:
                parser.error("--only-test argument not understood (must be a " +
                             " test or directory name)")

        if opts.single_result and not opts.only_test[1]:
            parser.error("--single-result must be given a single test name, not a " +
                         "directory name")
                
        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)
        
        if opts.diagnose:
            if not opts.only_test:
                parser.error("--diagnose requires --only-test")
        
        self.start_time = timestamp()

        # Work out where to put our build stuff
        if self.opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(self.opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            note("Increasing number of execution samples to %d" %
                 opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        if opts.auto_name:
            # Construct the nickname from a few key parameters.
            cc_info = self._get_cc_info()
            cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
            self.nick += "__%s__%s" % (cc_nick,
                                       cc_info['cc_target'].split('-')[0])
        note('Using nickname: %r' % self.nick)

        #  If we are doing diagnostics, skip the usual run and do them now.
        if opts.diagnose:
            return self.diagnose()
        # Now do the actual run.
        reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            reports.append(self.run(self.nick, compile=c, test=e))
            
        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        return self.submit(report_path, self.opts, commit=True)
示例#56
0
def v4_make_regression(machine_id, test_id, field_index, run_id):
    """This function is called to make a new regression from a graph data point.
    
    It is not nessessarly the case that there will be a real change there,
    so we must create a regression, bypassing the normal analysis.
    
    """
    ts = request.get_testsuite()
    field = ts.sample_fields[field_index]
    new_regression_id = 0
    run = ts.query(ts.Run).get(run_id)
    
    runs = ts.query(ts.Run). \
        filter(ts.Run.order_id == run.order_id). \
        filter(ts.Run.machine_id == run.machine_id). \
        all()
        
    if len(runs) == 0:
        abort(404)
        
    previous_runs = ts.get_previous_runs_on_machine(run, 1)
    
    # Find our start/end order.
    if previous_runs != []:
        start_order = previous_runs[0].order
    else:
        start_order = run.order
    end_order = run.order

    # Load our run data for the creation of the new fieldchanges.
    runs_to_load = [r.id for r in (runs + previous_runs)]

    runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load)

    result = runinfo.get_comparison_result(
        runs, previous_runs, test_id, field, ts.Sample.get_hash_of_binary_field())

    # Try and find a matching FC and update, else create one.
    f = None

    try:
        f = ts.query(ts.FieldChange) \
            .filter(ts.FieldChange.start_order == start_order) \
            .filter(ts.FieldChange.end_order == end_order) \
            .filter(ts.FieldChange.test_id == test_id) \
            .filter(ts.FieldChange.machine == run.machine) \
            .filter(ts.FieldChange.field == field) \
            .one()
    except sqlalchemy.orm.exc.NoResultFound:
            f = None
    
    if not f:
        test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
        f = ts.FieldChange(start_order=start_order,
                        end_order=run.order,
                        machine=run.machine,
                        test=test,
                        field=field)
        ts.add(f)
    # Always update FCs with new values.
    if f:
        f.old_value = result.previous
        f.new_value = result.current
        f.run = run
    ts.commit()
    
    # Make new regressions.
    regression = new_regression(ts, [f.id])
    regression.state = RegressionState.ACTIVE
    
    ts.commit()
    note("Manually created new regressions: {}".format(regression.id))
    flash("Created " + regression.title, FLASH_SUCCESS)

    return redirect(v4_url_for("v4_regression_detail", id=regression.id))
示例#57
0
文件: views.py 项目: linkedinyou/lnt
def log():
    async_ops.check_workers(True)
    note("Showing log page.")
    return render_template("log.html")