def submit_helper(self, parsed_args): """Submit the report to the server. If no server was specified, use a local mock server. """ result = None if parsed_args.submit_url: from lnt.util import ServerUtil for server in parsed_args.submit_url: self.log("submitting result to %r" % (server, )) try: result = ServerUtil.submitFile(server, parsed_args.report_path, parsed_args.commit, parsed_args.verbose) except (urllib2.HTTPError, urllib2.URLError) as e: warning("submitting to {} failed with {}".format( server, e)) else: # Simulate a submission to retrieve the results report. # Construct a temporary database and import the result. self.log("submitting result to dummy instance") import lnt.server.db.v4db import lnt.server.config db = lnt.server.db.v4db.V4DB( "sqlite:///:memory:", lnt.server.config.Config.dummyInstance()) result = lnt.util.ImportData.import_and_report( None, None, db, parsed_args.report_path, 'json', True) if result is None: fatal("results were not obtained from submission.") return result
def submit_helper(self, parsed_args): """Submit the report to the server. If no server was specified, use a local mock server. """ result = None if parsed_args.submit_url: from lnt.util import ServerUtil for server in parsed_args.submit_url: self.log("submitting result to %r" % (server,)) try: result = ServerUtil.submitFile( server, parsed_args.report_path, parsed_args.commit, parsed_args.verbose) except (urllib2.HTTPError, urllib2.URLError) as e: warning("submitting to {} failed with {}".format(server, e)) else: # Simulate a submission to retrieve the results report. # Construct a temporary database and import the result. self.log("submitting result to dummy instance") import lnt.server.db.v4db import lnt.server.config db = lnt.server.db.v4db.V4DB("sqlite:///:memory:", lnt.server.config.Config.dummyInstance()) result = lnt.util.ImportData.import_and_report( None, None, db, parsed_args.report_path, 'json', True) if result is None: fatal("results were not obtained from submission.") return result
def runN(args, N, cwd, preprocess_cmd=None, env=None, sample_mem=False, ignore_stderr=False, stdout=None, stderr=None): """Interface to runN. FIXME: Figure out a better way to deal with need to run as root. Maybe farm memory sampling process out into something we can setuid? Eek. """ g_log.info("preprocess_cmd at top of runN: %s:", preprocess_cmd) cmd = [opts.runn, '-a'] if sample_mem: cmd = ['sudo'] + cmd + ['-m'] if preprocess_cmd is not None: cmd.extend(('-p', preprocess_cmd)) if stdout is not None: cmd.extend(('--stdout', stdout)) if stderr is not None: cmd.extend(('--stderr', stderr)) cmd.extend(('--min-sample-time', repr(opts.min_sample_time))) cmd.extend(('--max-num-samples', '100')) cmd.append(str(int(N))) cmd.extend(args) if opts.verbose: g_log.info("running: %s" % " ".join("'%s'" % arg for arg in cmd)) p = subprocess.Popen(args=cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd, universal_newlines=True) runn_stdout, runn_stderr = p.communicate() res = p.returncode # If the runN command failed, or it had stderr when we didn't expect it, # fail immediately and don't try to parse the output. if res != 0: g_log.error("runN command failed with stderr:\n--\n%s\n--" % runn_stderr.strip()) return None elif not ignore_stderr and runn_stderr.strip(): g_log.error("command had unexpected output on stderr:\n--\n%s\n--" % (runn_stderr.strip(), )) return None # Otherwise, parse the timing data from runN. try: return eval(runn_stdout) except Exception: fatal("failed to parse output: %s\n" % runn_stdout)
def _get_mac_addresses(): lines = capture(['ifconfig']).strip() current_ifc = None for ln in lines.split('\n'): if ln.startswith('\t'): if current_ifc is None: fatal('unexpected ifconfig output') if ln.startswith('\tether '): yield current_ifc, ln[len('\tether '):].strip() else: current_ifc, = re.match(r'([A-Za-z0-9]*): .*', ln).groups()
def _configure(self, path, execute=True): cmake_cmd = self.opts.cmake defs = { # FIXME: Support ARCH, SMALL/LARGE etc 'CMAKE_C_COMPILER': self.opts.cc, 'CMAKE_CXX_COMPILER': self.opts.cxx, } if self.opts.cppflags or self.opts.cflags: all_cflags = ' '.join([self.opts.cppflags, self.opts.cflags]) defs['CMAKE_C_FLAGS'] = self._unix_quote_args(all_cflags) if self.opts.cppflags or self.opts.cxxflags: all_cxx_flags = ' '.join([self.opts.cppflags, self.opts.cxxflags]) defs['CMAKE_CXX_FLAGS'] = self._unix_quote_args(all_cxx_flags) if self.opts.run_under: defs['TEST_SUITE_RUN_UNDER'] = self._unix_quote_args( self.opts.run_under) if self.opts.benchmarking_only: defs['TEST_SUITE_BENCHMARKING_ONLY'] = 'ON' if self.opts.use_perf in ('time', 'all'): defs['TEST_SUITE_USE_PERF'] = 'ON' if self.opts.cmake_defines: for item in self.opts.cmake_defines: k, v = item.split('=', 1) defs[k] = v lines = ['Configuring with {'] for k, v in sorted(defs.items()): lines.append(" %s: '%s'" % (k, v)) lines.append('}') # Prepare cmake cache if requested: cache = [] if self.opts.cmake_cache: cache_path = os.path.join(self._test_suite_dir(), "cmake/caches/", self.opts.cmake_cache + ".cmake") if os.path.exists(cache_path): cache = ['-C', cache_path] else: fatal("Could not find CMake cache file: " + self.opts.cmake_cache + " in " + cache_path) for l in lines: note(l) cmake_cmd = [cmake_cmd] + cache + [self._test_suite_dir()] + \ ['-D%s=%s' % (k,v) for k,v in defs.items()] if execute: self._check_call(cmake_cmd, cwd=path) return cmake_cmd
def _configure(self, path, execute=True): cmake_cmd = self.opts.cmake defs = { # FIXME: Support ARCH, SMALL/LARGE etc 'CMAKE_C_COMPILER': self.opts.cc, 'CMAKE_CXX_COMPILER': self.opts.cxx, } if self.opts.cppflags or self.opts.cflags: all_cflags = ' '.join([self.opts.cppflags, self.opts.cflags]) defs['CMAKE_C_FLAGS'] = self._unix_quote_args(all_cflags) if self.opts.cppflags or self.opts.cxxflags: all_cxx_flags = ' '.join([self.opts.cppflags, self.opts.cxxflags]) defs['CMAKE_CXX_FLAGS'] = self._unix_quote_args(all_cxx_flags) if self.opts.run_under: defs['TEST_SUITE_RUN_UNDER'] = self._unix_quote_args(self.opts.run_under) if self.opts.benchmarking_only: defs['TEST_SUITE_BENCHMARKING_ONLY'] = 'ON' if self.opts.use_perf in ('time', 'all'): defs['TEST_SUITE_USE_PERF'] = 'ON' if self.opts.cmake_defines: for item in self.opts.cmake_defines: k, v = item.split('=', 1) defs[k] = v lines = ['Configuring with {'] for k,v in sorted(defs.items()): lines.append(" %s: '%s'" % (k,v)) lines.append('}') # Prepare cmake cache if requested: cache = [] if self.opts.cmake_cache: cache_path = os.path.join(self._test_suite_dir(), "cmake/caches/", self.opts.cmake_cache + ".cmake") if os.path.exists(cache_path): cache = ['-C', cache_path] else: fatal("Could not find CMake cache file: " + self.opts.cmake_cache + " in " + cache_path) for l in lines: note(l) cmake_cmd = [cmake_cmd] + cache + [self._test_suite_dir()] + \ ['-D%s=%s' % (k,v) for k,v in defs.items()] if execute: self._check_call(cmake_cmd, cwd=path) return cmake_cmd
def runN(args, N, cwd, preprocess_cmd=None, env=None, sample_mem=False, ignore_stderr=False, stdout=None, stderr=None): cmd = ['runN', '-a'] if sample_mem: cmd = ['sudo'] + cmd + ['-m'] if preprocess_cmd is not None: cmd.extend(('-p', preprocess_cmd)) if stdout is not None: cmd.extend(('--stdout', stdout)) if stderr is not None: cmd.extend(('--stderr', stderr)) cmd.extend(('--min-sample-time', repr(opts.min_sample_time))) cmd.extend(('--max-num-samples', '100')) cmd.append(str(int(N))) cmd.extend(args) if opts.verbose: g_log.info("running: %s" % " ".join("'%s'" % arg for arg in cmd)) p = subprocess.Popen(args=cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd) stdout, stderr = p.communicate() res = p.returncode # If the runN command failed, or it had stderr when we didn't expect it, # fail immediately and don't try to parse the output. if res != 0: g_log.error("command failed with stderr:\n--\n%s\n--" % stderr.strip()) return None elif not ignore_stderr and stderr.strip(): g_log.error("command had unexpected output on stderr:\n--\n%s\n--" % (stderr.strip(), )) return None # Otherwise, parse the timing data from runN. try: return eval(stdout) except: fatal("failed to parse output: %s\n" % stdout)
def runN(args, N, cwd, preprocess_cmd=None, env=None, sample_mem=False, ignore_stderr=False, stdout=None, stderr=None): """Interface to runN. FIXME: Figure out a better way to deal with need to run as root. Maybe farm memory sampling process out into something we can setuid? Eek. """ g_log.info("preprocess_cmd at top of runN: %s:", preprocess_cmd) cmd = [opts.runn, '-a'] if sample_mem: cmd = ['sudo'] + cmd + ['-m'] if preprocess_cmd is not None: cmd.extend(('-p', preprocess_cmd)) if stdout is not None: cmd.extend(('--stdout', stdout)) if stderr is not None: cmd.extend(('--stderr', stderr)) cmd.extend(('--min-sample-time', repr(opts.min_sample_time))) cmd.extend(('--max-num-samples', '100')) cmd.append(str(int(N))) cmd.extend(args) if opts.verbose: g_log.info("running: %s" % " ".join("'%s'" % arg for arg in cmd)) p = subprocess.Popen(args=cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd) runn_stdout, runn_stderr = p.communicate() res = p.returncode # If the runN command failed, or it had stderr when we didn't expect it, # fail immediately and don't try to parse the output. if res != 0: g_log.error("runN command failed with stderr:\n--\n%s\n--" % runn_stderr.strip()) return None elif not ignore_stderr and runn_stderr.strip(): g_log.error("command had unexpected output on stderr:\n--\n%s\n--" % ( runn_stderr.strip(),)) return None # Otherwise, parse the timing data from runN. try: return eval(runn_stdout) except Exception: fatal("failed to parse output: %s\n" % runn_stdout)
def _lit(self, path, test, profile): lit_cmd = self.opts.lit output_json_path = tempfile.NamedTemporaryFile(prefix='output', suffix='.json', dir=path, delete=False) output_json_path.close() subdir = path if self.opts.only_test: components = [path] + [self.opts.only_test[0]] subdir = os.path.join(*components) extra_args = [] if not test: extra_args = ['--no-execute'] nr_threads = self._test_threads() if profile: if nr_threads != 1: logger.warning('Gathering profiles with perf requires -j 1 ' + 'as perf record cannot be run multiple times ' + 'simultaneously. Overriding -j %s to -j 1' % nr_threads) nr_threads = 1 extra_args += ['--param', 'profile=perf'] if self.opts.perf_events: extra_args += ['--param', 'perf_profile_events=%s' % self.opts.perf_events] logger.info('Testing...') try: self._check_call([lit_cmd, '-v', '-j', str(nr_threads), subdir, '-o', output_json_path.name] + extra_args) except subprocess.CalledProcessError: # LIT is expected to exit with code 1 if there were test # failures! pass try: return json.loads(open(output_json_path.name).read()) except ValueError as e: fatal("Running test-suite did not create valid json report " "in {}: {}".format(output_json_path.name, e.message))
def _lit(self, path, test, profile): lit_cmd = self.opts.lit output_json_path = tempfile.NamedTemporaryFile(prefix='output', suffix='.json', dir=path, delete=False) output_json_path.close() subdir = path if self.opts.only_test: components = [path] + [self.opts.only_test[0]] subdir = os.path.join(*components) extra_args = [] if not test: extra_args = ['--no-execute'] nr_threads = self._test_threads() if profile: if nr_threads != 1: logger.warning('Gathering profiles with perf requires -j 1 ' + 'as perf record cannot be run multiple times ' + 'simultaneously. Overriding -j %s to -j 1' % nr_threads) nr_threads = 1 extra_args += ['--param', 'profile=perf'] if self.opts.perf_events: extra_args += [ '--param', 'perf_profile_events=%s' % self.opts.perf_events ] logger.info('Testing...') try: self._check_call([ lit_cmd, '-v', '-j', str(nr_threads), subdir, '-o', output_json_path.name ] + extra_args) except subprocess.CalledProcessError: # LIT is expected to exit with code 1 if there were test # failures! pass try: return json.loads(open(output_json_path.name).read()) except ValueError as e: fatal("Running test-suite did not create valid json report " "in {}: {}".format(output_json_path.name, e.message))
def _generate_run_info(self, tag, result_type, run_order, parent_commit): env_vars = { 'Build Number': 'BUILD_NUMBER', 'Owner': 'GERRIT_CHANGE_OWNER_NAME', 'Gerrit URL': 'GERRIT_CHANGE_URL', 'Jenkins URL': 'BUILD_URL' } run_info = { key: os.getenv(env_var) for key, env_var in env_vars.iteritems() if os.getenv(env_var) } try: commit_message = os.getenv('GERRIT_CHANGE_COMMIT_MESSAGE') if commit_message: commit_message = base64.b64decode(commit_message) except Exception: warning('Unable to decode commit message "{}", skipping'.format( commit_message)) else: run_info['Commit Message'] = commit_message git_sha = os.getenv('GERRIT_PATCHSET_REVISION') if not git_sha: fatal("unable to determine git SHA for result, exiting.") if run_order: run_info['run_order'] = str(run_order) else: note("run order not provided, will use server-side auto-generated " "run order") run_info.update({ 'git_sha': git_sha, 't': str(calendar.timegm(time.gmtime())), 'tag': tag }) if result_type == 'cv': if not parent_commit: parent_commit = self._get_parent_commit() run_info.update({'parent_commit': parent_commit}) return run_info
def frompath(path): """ frompath(path) -> Insance Load an LNT instance from the given instance specifier. The instance path can be one of: * The directory containing the instance. * The instance config file. * A tarball containing an instance. """ # Accept paths to config files, or to directories containing 'lnt.cfg'. tmpdir = None if os.path.isdir(path): config_path = os.path.join(path, 'lnt.cfg') elif tarfile.is_tarfile(path): # Accept paths to tar/tgz etc. files, which we automatically unpack # into a temporary directory. tmpdir = tempfile.mkdtemp(suffix='lnt') note("extracting input tarfile %r to %r" % (path, tmpdir)) tf = tarfile.open(path) tf.extractall(tmpdir) # Find the LNT instance inside the tar file. Support tarballs that # either contain the instance directly, or contain a single # subdirectory which is the instance. if os.path.exists(os.path.join(tmpdir, "lnt.cfg")): config_path = os.path.join(tmpdir, "lnt.cfg") else: filenames = os.listdir(tmpdir) if len(filenames) != 1: fatal("unable to find LNT instance inside tarfile") config_path = os.path.join(tmpdir, filenames[0], "lnt.cfg") else: config_path = path if not config_path or not os.path.exists(config_path): fatal("invalid config: %r" % config_path) config_data = {} exec open(config_path) in config_data config = lnt.server.config.Config.fromData(config_path, config_data) return Instance(config_path, config, tmpdir)
def runN(args, N, cwd, preprocess_cmd=None, env=None, sample_mem=False, ignore_stderr=False, stdout=None, stderr=None): cmd = ['runN', '-a'] if sample_mem: cmd = ['sudo'] + cmd + ['-m'] if preprocess_cmd is not None: cmd.extend(('-p', preprocess_cmd)) if stdout is not None: cmd.extend(('--stdout', stdout)) if stderr is not None: cmd.extend(('--stderr', stderr)) cmd.extend(('--min-sample-time', repr(opts.min_sample_time))) cmd.extend(('--max-num-samples', '100')) cmd.append(str(int(N))) cmd.extend(args) if opts.verbose: g_log.info("running: %s" % " ".join("'%s'" % arg for arg in cmd)) p = subprocess.Popen(args=cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd) stdout,stderr = p.communicate() res = p.returncode # If the runN command failed, or it had stderr when we didn't expect it, # fail immediately and don't try to parse the output. if res != 0: g_log.error("command failed with stderr:\n--\n%s\n--" % stderr.strip()) return None elif not ignore_stderr and stderr.strip(): g_log.error("command had unexpected output on stderr:\n--\n%s\n--" % ( stderr.strip(),)) return None # Otherwise, parse the timing data from runN. try: return eval(stdout) except: fatal("failed to parse output: %s\n" % stdout)
def _generate_run_info(self, tag, result_type, run_order, parent_commit): env_vars = {'Build Number': 'BUILD_NUMBER', 'Owner': 'GERRIT_CHANGE_OWNER_NAME', 'Gerrit URL': 'GERRIT_CHANGE_URL', 'Jenkins URL': 'BUILD_URL'} run_info = {key: os.getenv(env_var) for key, env_var in env_vars.iteritems() if os.getenv(env_var)} try: commit_message = os.getenv('GERRIT_CHANGE_COMMIT_MESSAGE') if commit_message: commit_message = base64.b64decode(commit_message) except Exception: warning('Unable to decode commit message "{}", skipping'.format( commit_message)) else: run_info['Commit Message'] = commit_message git_sha = os.getenv('GERRIT_PATCHSET_REVISION') if not git_sha: fatal("unable to determine git SHA for result, exiting.") if run_order: run_info['run_order'] = str(run_order) else: note("run order not provided, will use server-side auto-generated " "run order") run_info.update({'git_sha': git_sha, 't': str(calendar.timegm(time.gmtime())), 'tag': tag}) if result_type == 'cv': if not parent_commit: parent_commit = self._get_parent_commit() run_info.update({'parent_commit': parent_commit}) return run_info
def _get_parent_commit(self): required_variables = { 'project': os.environ.get('GERRIT_PROJECT'), 'branch': os.environ.get('GERRIT_BRANCH'), 'change_id': os.environ.get('GERRIT_CHANGE_ID'), 'commit': os.environ.get('GERRIT_PATCHSET_REVISION') } if all(required_variables.values()): url = ('http://review.couchbase.org/changes/{project}~{branch}~' '{change_id}/revisions/{commit}/commit'.format( **required_variables)) note('getting parent commit from {}'.format(url)) try: response = urllib2.urlopen(url).read() except Exception: fatal('failed to get parent commit from {}') raise # For some reason Gerrit returns a malformed json response # with extra characters before the actual json begins # Skip ahead to avoid this causing json deserialisation to fail start_index = response.index('{') response = response[start_index:] try: json_response = json.loads(response) except Exception: fatal('failed to decode Gerrit json response: {}'.format( response)) raise parent_commit = json_response['parents'][0]['commit'] return parent_commit else: fatal('unable to find required Gerrit environment variables, ' 'exiting')
def _get_parent_commit(self): required_variables = { 'project': os.environ.get('GERRIT_PROJECT'), 'branch': os.environ.get('GERRIT_BRANCH'), 'change_id': os.environ.get('GERRIT_CHANGE_ID'), 'commit': os.environ.get('GERRIT_PATCHSET_REVISION')} if all(required_variables.values()): url = ('http://review.couchbase.org/changes/{project}~{branch}~' '{change_id}/revisions/{commit}/commit' .format(**required_variables)) note('getting parent commit from {}'.format(url)) try: response = urllib2.urlopen(url).read() except Exception: fatal('failed to get parent commit from {}') raise # For some reason Gerrit returns a malformed json response # with extra characters before the actual json begins # Skip ahead to avoid this causing json deserialisation to fail start_index = response.index('{') response = response[start_index:] try: json_response = json.loads(response) except Exception: fatal('failed to decode Gerrit json response: {}' .format(response)) raise parent_commit = json_response['parents'][0]['commit'] return parent_commit else: fatal('unable to find required Gerrit environment variables, ' 'exiting')
def run_test(self, opts): if self.opts.cc is not None: self.opts.cc = resolve_command_path(self.opts.cc) if not lnt.testing.util.compilers.is_valid(self.opts.cc): self._fatal('--cc does not point to a valid executable.') # If there was no --cxx given, attempt to infer it from the --cc. if self.opts.cxx is None: self.opts.cxx = \ lnt.testing.util.compilers.infer_cxx_compiler(self.opts.cc) if self.opts.cxx is not None: logger.info("Inferred C++ compiler under test as: %r" % (self.opts.cxx,)) else: self._fatal("unable to infer --cxx - set it manually.") else: self.opts.cxx = resolve_command_path(self.opts.cxx) if not os.path.exists(self.opts.cxx): self._fatal("invalid --cxx argument %r, does not exist" % (self.opts.cxx)) if opts.test_suite_root is None: self._fatal('--test-suite is required') if not os.path.exists(opts.test_suite_root): self._fatal("invalid --test-suite argument, does not exist: %r" % ( opts.test_suite_root)) opts.test_suite_root = os.path.abspath(opts.test_suite_root) if opts.test_suite_externals: if not os.path.exists(opts.test_suite_externals): self._fatal( "invalid --test-externals argument, does not exist: %r" % ( opts.test_suite_externals,)) opts.test_suite_externals = os.path.abspath( opts.test_suite_externals) opts.cmake = resolve_command_path(opts.cmake) if not isexecfile(opts.cmake): self._fatal("CMake tool not found (looked for %s)" % opts.cmake) opts.make = resolve_command_path(opts.make) if not isexecfile(opts.make): self._fatal("Make tool not found (looked for %s)" % opts.make) opts.lit = resolve_command_path(opts.lit) if not isexecfile(opts.lit): self._fatal("LIT tool not found (looked for %s)" % opts.lit) if opts.run_under: split = shlex.split(opts.run_under) split[0] = resolve_command_path(split[0]) if not isexecfile(split[0]): self._fatal("Run under wrapper not found (looked for %s)" % opts.run_under) if opts.single_result: # --single-result implies --only-test opts.only_test = opts.single_result if opts.only_test: # --only-test can either point to a particular test or a directory. # Therefore, test_suite_root + opts.only_test or # test_suite_root + dirname(opts.only_test) must be a directory. path = os.path.join(self.opts.test_suite_root, opts.only_test) parent_path = os.path.dirname(path) if os.path.isdir(path): opts.only_test = (opts.only_test, None) elif os.path.isdir(parent_path): opts.only_test = (os.path.dirname(opts.only_test), os.path.basename(opts.only_test)) else: self._fatal("--only-test argument not understood (must be a " + " test or directory name)") if opts.single_result and not opts.only_test[1]: self._fatal("--single-result must be given a single test name, " "not a directory name") opts.cppflags = ' '.join(opts.cppflags) opts.cflags = ' '.join(opts.cflags) opts.cxxflags = ' '.join(opts.cxxflags) if opts.diagnose: if not opts.only_test: self._fatal("--diagnose requires --only-test") self.start_time = timestamp() # Work out where to put our build stuff if self.opts.timestamp_build: ts = self.start_time.replace(' ', '_').replace(':', '-') build_dir_name = "test-%s" % ts else: build_dir_name = "build" basedir = os.path.join(self.opts.sandbox_path, build_dir_name) self._base_path = basedir cmakecache = os.path.join(self._base_path, 'CMakeCache.txt') self.configured = not self.opts.run_configure and \ os.path.exists(cmakecache) # If we are doing diagnostics, skip the usual run and do them now. if opts.diagnose: return self.diagnose() # configure, so we can extract toolchain information from the cmake # output. self._configure_if_needed() # Verify that we can actually find a compiler before continuing cmake_vars = self._extract_cmake_vars_from_cache() if "CMAKE_C_COMPILER" not in cmake_vars or \ not os.path.exists(cmake_vars["CMAKE_C_COMPILER"]): self._fatal( "Couldn't find C compiler (%s). Maybe you should specify --cc?" % cmake_vars.get("CMAKE_C_COMPILER")) # We don't support compiling without testing as we can't get compile- # time numbers from LIT without running the tests. if opts.compile_multisample > opts.exec_multisample: logger.info("Increasing number of execution samples to %d" % opts.compile_multisample) opts.exec_multisample = opts.compile_multisample if opts.auto_name: # Construct the nickname from a few key parameters. cc_info = self._get_cc_info(cmake_vars) cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build']) opts.label += "__%s__%s" %\ (cc_nick, cc_info['cc_target'].split('-')[0]) logger.info('Using nickname: %r' % opts.label) # When we can't detect the clang version we use 0 instead. That # is a horrible failure mode because all of our data ends up going # to order 0. The user needs to give an order if we can't detect! if opts.run_order is None: cc_info = self._get_cc_info(cmake_vars) if cc_info['inferred_run_order'] == 0: fatal("Cannot detect compiler version. Specify --run-order" " to manually define it.") # Now do the actual run. reports = [] json_reports = [] for i in range(max(opts.exec_multisample, opts.compile_multisample)): c = i < opts.compile_multisample e = i < opts.exec_multisample # only gather perf profiles on a single run. p = i == 0 and self.opts.use_perf in ('profile', 'all') run_report, json_data = self.run(cmake_vars, compile=c, test=e, profile=p) reports.append(run_report) json_reports.append(json_data) report = self._create_merged_report(reports) # Write the report out so it can be read by the submission tool. report_path = os.path.join(self._base_path, 'report.json') with open(report_path, 'w') as fd: fd.write(report.render()) if opts.output: with open(opts.output, 'w') as fd: fd.write(report.render()) xml_report_path = os.path.join(self._base_path, 'test-results.xunit.xml') str_template = _lit_json_to_xunit_xml(json_reports) with open(xml_report_path, 'w') as fd: fd.write(str_template) csv_report_path = os.path.join(self._base_path, 'test-results.csv') str_template = _lit_json_to_csv(json_reports) with open(csv_report_path, 'w') as fd: fd.write(str_template) return self.submit(report_path, self.opts, 'nts')
def run_test(self, name, args): # FIXME: Add more detailed usage information parser = OptionParser("%s [options] test-suite" % name) group = OptionGroup(parser, "Sandbox options") group.add_option("-S", "--sandbox", dest="sandbox_path", help="Parent directory to build and run tests in", type=str, default=None, metavar="PATH") group.add_option("", "--no-timestamp", dest="timestamp_build", action="store_false", default=True, help="Don't timestamp build directory (for testing)") group.add_option("", "--no-configure", dest="run_configure", action="store_false", default=True, help="Don't run CMake if CMakeCache.txt is present" " (only useful with --no-timestamp") parser.add_option_group(group) group = OptionGroup(parser, "Inputs") group.add_option("", "--test-suite", dest="test_suite_root", type=str, metavar="PATH", default=None, help="Path to the LLVM test-suite sources") group.add_option("", "--test-externals", dest="test_suite_externals", type=str, metavar="PATH", help="Path to the LLVM test-suite externals") group.add_option("", "--cmake-define", dest="cmake_defines", action="append", default=[], help=("Defines to pass to cmake. These do not require the " "-D prefix and can be given multiple times. e.g.: " "--cmake-define A=B => -DA=B")) group.add_option("-C", "--cmake-cache", dest="cmake_cache", action="append", default=[], help=("Use one of the test-suite's cmake configurations." " Ex: Release, Debug")) parser.add_option_group(group) group = OptionGroup(parser, "Test compiler") group.add_option("", "--cc", dest="cc", metavar="CC", type=str, default=None, help="Path to the C compiler to test") group.add_option("", "--cxx", dest="cxx", metavar="CXX", type=str, default=None, help="Path to the C++ compiler to test (inferred from" " --cc where possible") group.add_option("", "--cppflags", type=str, action="append", dest="cppflags", default=[], help="Extra flags to pass the compiler in C or C++ mode. " "Can be given multiple times") group.add_option("", "--cflags", type=str, action="append", dest="cflags", default=[], help="Extra CFLAGS to pass to the compiler. Can be " "given multiple times") group.add_option("", "--cxxflags", type=str, action="append", dest="cxxflags", default=[], help="Extra CXXFLAGS to pass to the compiler. Can be " "given multiple times") parser.add_option_group(group) group = OptionGroup(parser, "Test selection") group.add_option("", "--test-size", type='choice', dest="test_size", choices=['small', 'regular', 'large'], default='regular', help="The size of test inputs to use") group.add_option("", "--benchmarking-only", dest="benchmarking_only", action="store_true", default=False, help="Benchmarking-only mode. Disable unit tests and " "other flaky or short-running tests") group.add_option("", "--only-test", dest="only_test", metavar="PATH", type=str, default=None, help="Only run tests under PATH") parser.add_option_group(group) group = OptionGroup(parser, "Test Execution") group.add_option("", "--only-compile", dest="only_compile", help="Don't run the tests, just compile them.", action="store_true", default=False, ) group.add_option("-j", "--threads", dest="threads", help="Number of testing (and optionally build) " "threads", type=int, default=1, metavar="N") group.add_option("", "--build-threads", dest="build_threads", help="Number of compilation threads, defaults to " "--threads", type=int, default=0, metavar="N") group.add_option("", "--use-perf", dest="use_perf", help=("Use Linux perf for high accuracy timing, profile " "information or both"), type='choice', choices=['none', 'time', 'profile', 'all'], default='none') group.add_option("", "--run-under", dest="run_under", help="Wrapper to run tests under ['%default']", type=str, default="") group.add_option("", "--exec-multisample", dest="exec_multisample", help="Accumulate execution test data from multiple runs", type=int, default=1, metavar="N") group.add_option("", "--compile-multisample", dest="compile_multisample", help="Accumulate compile test data from multiple runs", type=int, default=1, metavar="N") group.add_option("-d", "--diagnose", dest="diagnose", help="Produce a diagnostic report for a particular " "test, this will not run all the tests. Must be" " used in conjunction with --only-test.", action="store_true", default=False,) group.add_option("", "--pgo", dest="pgo", help="Run the test-suite in training mode first and" " collect PGO data, then rerun with that training " "data.", action="store_true", default=False,) parser.add_option_group(group) group = OptionGroup(parser, "Output Options") group.add_option("", "--no-auto-name", dest="auto_name", help="Don't automatically derive submission name", action="store_false", default=True) group.add_option("", "--run-order", dest="run_order", metavar="STR", help="String to use to identify and order this run", action="store", type=str, default=None) group.add_option("", "--submit", dest="submit_url", metavar="URLORPATH", help=("autosubmit the test result to the given server" " (or local instance) [%default]"), type=str, default=None) group.add_option("", "--commit", dest="commit", help=("whether the autosubmit result should be committed " "[%default]"), type=int, default=True) group.add_option("", "--succinct-compile-output", help="run Make without VERBOSE=1", action="store_true", dest="succinct") group.add_option("-v", "--verbose", dest="verbose", help="show verbose test results", action="store_true", default=False) group.add_option("", "--exclude-stat-from-submission", dest="exclude_stat_from_submission", help="Do not submit the stat of this type [%default]", action='append', choices=KNOWN_SAMPLE_KEYS, type='choice', default=[]) group.add_option("", "--single-result", dest="single_result", help=("only execute this single test and apply " "--single-result-predicate to calculate the " "exit status")) group.add_option("", "--single-result-predicate", dest="single_result_predicate", help=("the predicate to apply to calculate the exit " "status (with --single-result)"), default="status") parser.add_option_group(group) group = OptionGroup(parser, "Test tools") group.add_option("", "--use-cmake", dest="cmake", metavar="PATH", type=str, default="cmake", help="Path to CMake [cmake]") group.add_option("", "--use-make", dest="make", metavar="PATH", type=str, default="make", help="Path to Make [make]") group.add_option("", "--use-lit", dest="lit", metavar="PATH", type=str, default="llvm-lit", help="Path to the LIT test runner [llvm-lit]") parser.add_option_group(group) (opts, args) = parser.parse_args(args) self.opts = opts if len(args) == 0: self.nick = platform.uname()[1] elif len(args) == 1: self.nick = args[0] else: parser.error("Expected no positional arguments (got: %r)" % (args,)) if self.opts.sandbox_path is None: parser.error('--sandbox is required') if self.opts.cc is not None: self.opts.cc = resolve_command_path(self.opts.cc) if not lnt.testing.util.compilers.is_valid(self.opts.cc): parser.error('--cc does not point to a valid executable.') # If there was no --cxx given, attempt to infer it from the --cc. if self.opts.cxx is None: self.opts.cxx = \ lnt.testing.util.compilers.infer_cxx_compiler(self.opts.cc) if self.opts.cxx is not None: note("Inferred C++ compiler under test as: %r" % (self.opts.cxx,)) else: parser.error("unable to infer --cxx - set it manually.") else: self.opts.cxx = resolve_command_path(self.opts.cxx) if not os.path.exists(self.opts.cxx): parser.error("invalid --cxx argument %r, does not exist" % (self.opts.cxx)) if opts.test_suite_root is None: parser.error('--test-suite is required') if not os.path.exists(opts.test_suite_root): parser.error("invalid --test-suite argument, does not exist: %r" % ( opts.test_suite_root)) if opts.test_suite_externals: if not os.path.exists(opts.test_suite_externals): parser.error( "invalid --test-externals argument, does not exist: %r" % ( opts.test_suite_externals,)) opts.cmake = resolve_command_path(opts.cmake) if not isexecfile(opts.cmake): parser.error("CMake tool not found (looked for %s)" % opts.cmake) opts.make = resolve_command_path(opts.make) if not isexecfile(opts.make): parser.error("Make tool not found (looked for %s)" % opts.make) opts.lit = resolve_command_path(opts.lit) if not isexecfile(opts.lit): parser.error("LIT tool not found (looked for %s)" % opts.lit) if opts.run_under: split = shlex.split(opts.run_under) split[0] = resolve_command_path(split[0]) if not isexecfile(split[0]): parser.error("Run under wrapper not found (looked for %s)" % opts.run_under) if opts.single_result: # --single-result implies --only-test opts.only_test = opts.single_result if opts.only_test: # --only-test can either point to a particular test or a directory. # Therefore, test_suite_root + opts.only_test or # test_suite_root + dirname(opts.only_test) must be a directory. path = os.path.join(self.opts.test_suite_root, opts.only_test) parent_path = os.path.dirname(path) if os.path.isdir(path): opts.only_test = (opts.only_test, None) elif os.path.isdir(parent_path): opts.only_test = (os.path.dirname(opts.only_test), os.path.basename(opts.only_test)) else: parser.error("--only-test argument not understood (must be a " + " test or directory name)") if opts.single_result and not opts.only_test[1]: parser.error("--single-result must be given a single test name, not a " + "directory name") opts.cppflags = ' '.join(opts.cppflags) opts.cflags = ' '.join(opts.cflags) opts.cxxflags = ' '.join(opts.cxxflags) if opts.diagnose: if not opts.only_test: parser.error("--diagnose requires --only-test") self.start_time = timestamp() # Work out where to put our build stuff if self.opts.timestamp_build: ts = self.start_time.replace(' ', '_').replace(':', '-') build_dir_name = "test-%s" % ts else: build_dir_name = "build" basedir = os.path.join(self.opts.sandbox_path, build_dir_name) self._base_path = basedir cmakecache = os.path.join(self._base_path, 'CMakeCache.txt') self.configured = not self.opts.run_configure and \ os.path.exists(cmakecache) # If we are doing diagnostics, skip the usual run and do them now. if opts.diagnose: return self.diagnose() # configure, so we can extract toolchain information from the cmake # output. self._configure_if_needed() # Verify that we can actually find a compiler before continuing cmake_vars = self._extract_cmake_vars_from_cache() if "CMAKE_C_COMPILER" not in cmake_vars or \ not os.path.exists(cmake_vars["CMAKE_C_COMPILER"]): parser.error( "Couldn't find C compiler (%s). Maybe you should specify --cc?" % cmake_vars.get("CMAKE_C_COMPILER")) # We don't support compiling without testing as we can't get compile- # time numbers from LIT without running the tests. if opts.compile_multisample > opts.exec_multisample: note("Increasing number of execution samples to %d" % opts.compile_multisample) opts.exec_multisample = opts.compile_multisample if opts.auto_name: # Construct the nickname from a few key parameters. cc_info = self._get_cc_info(cmake_vars) cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build']) self.nick += "__%s__%s" % (cc_nick, cc_info['cc_target'].split('-')[0]) note('Using nickname: %r' % self.nick) # When we can't detect the clang version we use 0 instead. That # is a horrible failure mode because all of our data ends up going # to order 0. The user needs to give an order if we can't detect! if opts.run_order is None: cc_info = self._get_cc_info(cmake_vars) if cc_info['inferred_run_order'] == 0: fatal("Cannot detect compiler version. Specify --run-order" " to manually define it.") # Now do the actual run. reports = [] json_reports = [] for i in range(max(opts.exec_multisample, opts.compile_multisample)): c = i < opts.compile_multisample e = i < opts.exec_multisample run_report, json_data = self.run(cmake_vars, compile=c, test=e) reports.append(run_report) json_reports.append(json_data) report = self._create_merged_report(reports) # Write the report out so it can be read by the submission tool. report_path = os.path.join(self._base_path, 'report.json') with open(report_path, 'w') as fd: fd.write(report.render()) xml_report_path = os.path.join(self._base_path, 'test-results.xunit.xml') str_template = _lit_json_to_xunit_xml(json_reports) with open(xml_report_path, 'w') as fd: fd.write(str_template) csv_report_path = os.path.join(self._base_path, 'test-results.csv') str_template = _lit_json_to_csv(json_reports) with open(csv_report_path, 'w') as fd: fd.write(str_template) return self.submit(report_path, self.opts, commit=True)
def test_cc_command(base_name, run_info, variables, input, output, flags, extra_flags, has_output=True, ignore_stderr=False): name = '%s/(%s)' % (base_name,' '.join(flags),) input = get_input_path(opts, input) output = get_output_path(output) cmd = [variables.get('cc')] cmd.extend(extra_flags) cmd.append(input) cmd.extend(flags) # Inhibit all warnings, we don't want to count the time to generate them # against newer compilers which have added (presumably good) warnings. cmd.append('-w') # Do a memory profiling run, if requested. # # FIXME: Doing this as a separate step seems silly. We shouldn't do any # extra run just to get the memory statistics. if opts.memory_profiling: # Find the cc1 command, which we use to do memory profiling. To do this # we execute the compiler with '-###' to figure out what it wants to do. cc_output = commands.capture(cmd + ['-o','/dev/null','-###'], include_stderr=True).strip() cc_commands = [] for ln in cc_output.split('\n'): # Filter out known garbage. if (ln == 'Using built-in specs.' or ln.startswith('Configured with:') or ln.startswith('Target:') or ln.startswith('Thread model:') or ln.startswith('InstalledDir:') or ' version ' in ln): continue cc_commands.append(ln) if len(cc_commands) != 1: fatal('unable to determine cc1 command: %r' % cc_output) cc1_cmd = shlex.split(cc_commands[0]) for res in get_runN_test_data(name, variables, cc1_cmd, ignore_stderr=ignore_stderr, sample_mem=True, only_mem=True): yield res commands.rm_f(output) for res in get_runN_test_data(name, variables, cmd + ['-o',output], ignore_stderr=ignore_stderr): yield res # If the command has output, track its size. if has_output: tname = '%s.size' % (name,) success = False samples = [] try: stat = os.stat(output) success = True # For now, the way the software is set up things are going to get # confused if we don't report the same number of samples as reported # for other variables. So we just report the size N times. # # FIXME: We should resolve this, eventually. for i in range(variables.get('run_count')): samples.append(stat.st_size) except OSError,e: if e.errno != errno.ENOENT: raise yield (success, tname, samples)
def test_build(base_name, run_info, variables, project, build_config, num_jobs, codesize_util=None): name = '%s(config=%r,j=%d)' % (base_name, build_config, num_jobs) # Check if we need to expand the archive into the sandbox. archive_path = get_input_path(opts, project['archive']) with open(archive_path) as f: archive_hash = hashlib.md5(f.read() + str(project)).hexdigest() # Compute the path to unpack to. source_path = get_output_path("..", "Sources", project['name']) # Load the hash of the last unpack, in case the archive has been updated. last_unpack_hash_path = os.path.join(source_path, "last_unpack_hash.txt") if os.path.exists(last_unpack_hash_path): with open(last_unpack_hash_path) as f: last_unpack_hash = f.read() else: last_unpack_hash = None # Unpack if necessary. if last_unpack_hash == archive_hash: g_log.info('reusing sources %r (already unpacked)' % name) else: # Remove any existing content, if necessary. try: shutil.rmtree(source_path) except OSError, e: if e.errno != errno.ENOENT: raise # Extract the zip file. # # We shell out to unzip here because zipfile's extractall does not # appear to preserve permissions properly. commands.mkdir_p(source_path) g_log.info('extracting sources for %r' % name) if archive_path[-6:] == "tar.gz": p = subprocess.Popen(args=['tar', '-xzf', archive_path], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=source_path) else: p = subprocess.Popen(args=['unzip', '-q', archive_path], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=source_path) stdout,stderr = p.communicate() if p.wait() != 0: fatal(("unable to extract archive %r at %r\n" "-- stdout --\n%s\n" "-- stderr --\n%s\n") % (archive_path, source_path, stdout, stderr)) if p.wait() != 0: fatal # Apply the patch file, if necessary. patch_files = project.get('patch_files', []) for patch_file in patch_files: g_log.info('applying patch file %r for %r' % (patch_file, name)) patch_file_path = get_input_path(opts, patch_file) p = subprocess.Popen(args=['patch', '-i', patch_file_path, '-p', '1'], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=source_path) stdout,stderr = p.communicate() if p.wait() != 0: fatal(("unable to apply patch file %r in %r\n" "-- stdout --\n%s\n" "-- stderr --\n%s\n") % (patch_file_path, source_path, stdout, stderr)) # Write the hash tag. with open(last_unpack_hash_path, "w") as f: f.write(archive_hash)
def _configure(self, path, extra_cmake_defs=[], execute=True): cmake_cmd = self.opts.cmake defs = {} if self.opts.cc: defs['CMAKE_C_COMPILER'] = self.opts.cc if self.opts.cxx: defs['CMAKE_CXX_COMPILER'] = self.opts.cxx cmake_build_types = ('DEBUG', 'MINSIZEREL', 'RELEASE', 'RELWITHDEBINFO') if self.opts.cppflags or self.opts.cflags: all_cflags = ' '.join([self.opts.cppflags, self.opts.cflags]) defs['CMAKE_C_FLAGS'] = self._unix_quote_args(all_cflags) # Ensure that no flags get added based on build type when the user # explicitly specifies flags to use. for build_type in cmake_build_types: defs['CMAKE_C_FLAGS_'+build_type] = "" if self.opts.cppflags or self.opts.cxxflags: all_cxx_flags = ' '.join([self.opts.cppflags, self.opts.cxxflags]) defs['CMAKE_CXX_FLAGS'] = self._unix_quote_args(all_cxx_flags) # Ensure that no flags get added based on build type when the user # explicitly specifies flags to use. for build_type in cmake_build_types: defs['CMAKE_CXX_FLAGS_'+build_type] = "" if self.opts.run_under: defs['TEST_SUITE_RUN_UNDER'] = \ self._unix_quote_args(self.opts.run_under) if self.opts.benchmarking_only: defs['TEST_SUITE_BENCHMARKING_ONLY'] = 'ON' if self.opts.only_compile: defs['TEST_SUITE_RUN_BENCHMARKS'] = 'Off' if self.opts.use_perf in ('time', 'all'): defs['TEST_SUITE_USE_PERF'] = 'ON' if self.opts.test_suite_externals: defs['TEST_SUITE_EXTERNALS_DIR'] = self.opts.test_suite_externals if self.opts.pgo and self.trained: defs['TEST_SUITE_PROFILE_USE'] = "On" defs['TEST_SUITE_PROFILE_GENERATE'] = "Off" if 'TEST_SUITE_RUN_TYPE' not in defs: defs['TEST_SUITE_RUN_TYPE'] = 'ref' for item in tuple(self.opts.cmake_defines) + tuple(extra_cmake_defs): k, v = item.split('=', 1) # make sure the overriding of the settings above also works # when the cmake-define-defined variable has a datatype # specified. key_no_datatype = k.split(':', 1)[0] if key_no_datatype in defs: del defs[key_no_datatype] defs[k] = v # We use 'cmake -LAH -N' later to find out the value of the # CMAKE_C_COMPILER and CMAKE_CXX_COMPILER variables. # 'cmake -LAH -N' will only return variables in the cache that have # a cmake type set. Therefore, explicitly set a 'FILEPATH' type on # these variables here, if they were untyped so far. if 'CMAKE_C_COMPILER' in defs: defs['CMAKE_C_COMPILER:FILEPATH'] = defs['CMAKE_C_COMPILER'] del defs['CMAKE_C_COMPILER'] if 'CMAKE_CXX_COMPILER' in defs: defs['CMAKE_CXX_COMPILER:FILEPATH'] = defs['CMAKE_CXX_COMPILER'] del defs['CMAKE_CXX_COMPILER'] lines = ['Configuring with {'] for k, v in sorted(defs.items()): lines.append(" %s: '%s'" % (k, v)) lines.append('}') if 'TEST_SUITE_REMOTE_HOST' in defs: self.remote_run = True # Prepare cmake cache if requested: cmake_flags = [] for cache in self.opts.cmake_cache: if cache == "": continue # Shortcut for the common case. if not cache.endswith(".cmake") and "/" not in cache: cache = os.path.join(self._test_suite_dir(), "cmake/caches", cache + ".cmake") cache = os.path.abspath(cache) if not os.path.exists(cache): fatal("Could not find CMake cache file: " + cache) cmake_flags += ['-C', cache] for l in lines: logger.info(l) # Define compilers before specifying the cache files. early_defs = {} for key in ['CMAKE_C_COMPILER:FILEPATH', 'CMAKE_CXX_COMPILER:FILEPATH']: value = defs.pop(key, None) if value is not None: early_defs[key] = value cmake_cmd = ([cmake_cmd] + ['-D%s=%s' % (k, v) for k, v in early_defs.items()] + cmake_flags + [self._test_suite_dir()] + ['-D%s=%s' % (k, v) for k, v in defs.items()]) if execute: self._check_call(cmake_cmd, cwd=path) return cmake_cmd
cmd = [] preprocess_cmd = None if build_info['style'].startswith('xcode-'): file_path = os.path.join(source_path, build_info['file']) cmd.extend(['xcodebuild']) # Add the arguments to select the build target. if build_info['style'] == 'xcode-project': cmd.extend(('-target', build_info['target'], '-project', file_path)) elif build_info['style'] == 'xcode-workspace': cmd.extend(('-scheme', build_info['scheme'], '-workspace', file_path)) else: fatal("unknown build style in project: %r" % project) # Add the build configuration selection. cmd.extend(('-configuration', build_config)) cmd.append('OBJROOT=%s' % (os.path.join(build_base, 'obj'))) cmd.append('SYMROOT=%s' % (os.path.join(build_base, 'sym'))) cmd.append('DSTROOT=%s' % (os.path.join(build_base, 'dst'))) cmd.append('SHARED_PRECOMPS_DIR=%s' % (os.path.join(build_base, 'pch'))) # Add arguments to force the appropriate compiler. cmd.append('CC=%s' % (opts.cc,)) cmd.append('CPLUSPLUS=%s' % (opts.cxx,)) # We need to force this variable here because Xcode has some completely # broken logic for deriving this variable from the compiler
def _configure(self, path, extra_cmake_defs=[], execute=True): cmake_cmd = self.opts.cmake defs = { # FIXME: Support ARCH, SMALL/LARGE etc 'CMAKE_C_COMPILER': self.opts.cc, 'CMAKE_CXX_COMPILER': self.opts.cxx, } if self.opts.cppflags or self.opts.cflags: all_cflags = ' '.join([self.opts.cppflags, self.opts.cflags]) defs['CMAKE_C_FLAGS'] = self._unix_quote_args(all_cflags) if self.opts.cppflags or self.opts.cxxflags: all_cxx_flags = ' '.join([self.opts.cppflags, self.opts.cxxflags]) defs['CMAKE_CXX_FLAGS'] = self._unix_quote_args(all_cxx_flags) if self.opts.run_under: defs['TEST_SUITE_RUN_UNDER'] = self._unix_quote_args( self.opts.run_under) if self.opts.benchmarking_only: defs['TEST_SUITE_BENCHMARKING_ONLY'] = 'ON' if self.opts.only_compile: defs['TEST_SUITE_RUN_BENCHMARKS'] = 'Off' if self.opts.use_perf in ('time', 'all'): defs['TEST_SUITE_USE_PERF'] = 'ON' if self.opts.test_suite_externals: defs['TEST_SUITE_EXTERNALS_DIR'] = self.opts.test_suite_externals if self.opts.pgo and self.trained: defs['TEST_SUITE_PROFILE_USE'] = "On" defs['TEST_SUITE_PROFILE_GENERATE'] = "Off" if 'TEST_SUITE_RUN_TYPE' not in defs: defs['TEST_SUITE_RUN_TYPE'] = 'ref' if self.opts.cmake_defines: for item in self.opts.cmake_defines: k, v = item.split('=', 1) defs[k] = v for item in extra_cmake_defs: k, v = item.split('=', 1) defs[k] = v lines = ['Configuring with {'] for k, v in sorted(defs.items()): lines.append(" %s: '%s'" % (k, v)) lines.append('}') # Prepare cmake cache if requested: cmake_flags = [] for cache in self.opts.cmake_cache: # Shortcut for the common case. if not cache.endswith(".cmake") and "/" not in cache: cache = os.path.join(self._test_suite_dir(), "cmake/caches", cache + ".cmake") if not os.path.exists(cache): fatal("Could not find CMake cache file: " + cache) cmake_flags += ['-C', cache] for l in lines: note(l) cmake_cmd = [cmake_cmd] + cmake_flags + [self._test_suite_dir()] + \ ['-D%s=%s' % (k, v) for k, v in defs.items()] if execute: self._check_call(cmake_cmd, cwd=path) return cmake_cmd
def test_build(base_name, run_info, variables, project, build_config, num_jobs, codesize_util=None): name = '%s(config=%r,j=%d)' % (base_name, build_config, num_jobs) # Check if we need to expand the archive into the sandbox. archive_path = get_input_path(opts, project['archive']) with open(archive_path) as f: archive_hash = hashlib.md5(f.read() + str(project)).hexdigest() # Compute the path to unpack to. source_path = get_output_path("..", "Sources", project['name']) # Load the hash of the last unpack, in case the archive has been updated. last_unpack_hash_path = os.path.join(source_path, "last_unpack_hash.txt") if os.path.exists(last_unpack_hash_path): with open(last_unpack_hash_path) as f: last_unpack_hash = f.read() else: last_unpack_hash = None # Unpack if necessary. if last_unpack_hash == archive_hash: g_log.info('reusing sources %r (already unpacked)' % name) else: # Remove any existing content, if necessary. try: shutil.rmtree(source_path) except OSError, e: if e.errno != errno.ENOENT: raise # Extract the zip file. # # We shell out to unzip here because zipfile's extractall does not # appear to preserve permissions properly. commands.mkdir_p(source_path) g_log.info('extracting sources for %r' % name) if archive_path[-6:] == "tar.gz": p = subprocess.Popen(args=['tar', '-xzf', archive_path], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=source_path) else: p = subprocess.Popen(args=['unzip', '-q', archive_path], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=source_path) stdout, stderr = p.communicate() if p.wait() != 0: fatal(("unable to extract archive %r at %r\n" "-- stdout --\n%s\n" "-- stderr --\n%s\n") % (archive_path, source_path, stdout, stderr)) if p.wait() != 0: fatal # Apply the patch file, if necessary. patch_files = project.get('patch_files', []) for patch_file in patch_files: g_log.info('applying patch file %r for %r' % (patch_file, name)) patch_file_path = get_input_path(opts, patch_file) p = subprocess.Popen( args=['patch', '-i', patch_file_path, '-p', '1'], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=source_path) stdout, stderr = p.communicate() if p.wait() != 0: fatal(("unable to apply patch file %r in %r\n" "-- stdout --\n%s\n" "-- stderr --\n%s\n") % (patch_file_path, source_path, stdout, stderr)) # Write the hash tag. with open(last_unpack_hash_path, "w") as f: f.write(archive_hash)
cmd = [] preprocess_cmd = None if build_info['style'].startswith('xcode-'): file_path = os.path.join(source_path, build_info['file']) cmd.extend(['xcodebuild']) # Add the arguments to select the build target. if build_info['style'] == 'xcode-project': cmd.extend( ('-target', build_info['target'], '-project', file_path)) elif build_info['style'] == 'xcode-workspace': cmd.extend( ('-scheme', build_info['scheme'], '-workspace', file_path)) else: fatal("unknown build style in project: %r" % project) # Add the build configuration selection. cmd.extend(('-configuration', build_config)) cmd.append('OBJROOT=%s' % (os.path.join(build_base, 'obj'))) cmd.append('SYMROOT=%s' % (os.path.join(build_base, 'sym'))) cmd.append('DSTROOT=%s' % (os.path.join(build_base, 'dst'))) cmd.append('SHARED_PRECOMPS_DIR=%s' % (os.path.join(build_base, 'pch'))) # Add arguments to force the appropriate compiler. cmd.append('CC=%s' % (opts.cc, )) cmd.append('CPLUSPLUS=%s' % (opts.cxx, )) # We need to force this variable here because Xcode has some completely
def _configure(self, path, extra_cmake_defs=[], execute=True): cmake_cmd = self.opts.cmake defs = {} if self.opts.cc: defs['CMAKE_C_COMPILER'] = self.opts.cc if self.opts.cxx: defs['CMAKE_CXX_COMPILER'] = self.opts.cxx cmake_build_types = ('DEBUG', 'MINSIZEREL', 'RELEASE', 'RELWITHDEBINFO') if self.opts.cppflags or self.opts.cflags: all_cflags = ' '.join([self.opts.cppflags, self.opts.cflags]) defs['CMAKE_C_FLAGS'] = self._unix_quote_args(all_cflags) # Ensure that no flags get added based on build type when the user # explicitly specifies flags to use. for build_type in cmake_build_types: defs['CMAKE_C_FLAGS_' + build_type] = "" if self.opts.cppflags or self.opts.cxxflags: all_cxx_flags = ' '.join([self.opts.cppflags, self.opts.cxxflags]) defs['CMAKE_CXX_FLAGS'] = self._unix_quote_args(all_cxx_flags) # Ensure that no flags get added based on build type when the user # explicitly specifies flags to use. for build_type in cmake_build_types: defs['CMAKE_CXX_FLAGS_' + build_type] = "" if self.opts.run_under: defs['TEST_SUITE_RUN_UNDER'] = \ self._unix_quote_args(self.opts.run_under) if self.opts.benchmarking_only: defs['TEST_SUITE_BENCHMARKING_ONLY'] = 'ON' if self.opts.only_compile: defs['TEST_SUITE_RUN_BENCHMARKS'] = 'Off' if self.opts.use_perf in ('time', 'all'): defs['TEST_SUITE_USE_PERF'] = 'ON' if self.opts.test_suite_externals: defs['TEST_SUITE_EXTERNALS_DIR'] = self.opts.test_suite_externals if self.opts.pgo and self.trained: defs['TEST_SUITE_PROFILE_USE'] = "On" defs['TEST_SUITE_PROFILE_GENERATE'] = "Off" if 'TEST_SUITE_RUN_TYPE' not in defs: defs['TEST_SUITE_RUN_TYPE'] = 'ref' for item in tuple(self.opts.cmake_defines) + tuple(extra_cmake_defs): k, v = item.split('=', 1) # make sure the overriding of the settings above also works # when the cmake-define-defined variable has a datatype # specified. key_no_datatype = k.split(':', 1)[0] if key_no_datatype in defs: del defs[key_no_datatype] defs[k] = v # We use 'cmake -LAH -N' later to find out the value of the # CMAKE_C_COMPILER and CMAKE_CXX_COMPILER variables. # 'cmake -LAH -N' will only return variables in the cache that have # a cmake type set. Therefore, explicitly set a 'FILEPATH' type on # these variables here, if they were untyped so far. if 'CMAKE_C_COMPILER' in defs: defs['CMAKE_C_COMPILER:FILEPATH'] = defs['CMAKE_C_COMPILER'] del defs['CMAKE_C_COMPILER'] if 'CMAKE_CXX_COMPILER' in defs: defs['CMAKE_CXX_COMPILER:FILEPATH'] = defs['CMAKE_CXX_COMPILER'] del defs['CMAKE_CXX_COMPILER'] lines = ['Configuring with {'] for k, v in sorted(defs.items()): lines.append(" %s: '%s'" % (k, v)) lines.append('}') if 'TEST_SUITE_REMOTE_HOST' in defs: self.remote_run = True # Prepare cmake cache if requested: cmake_flags = [] for cache in self.opts.cmake_cache: if cache == "": continue # Shortcut for the common case. if not cache.endswith(".cmake") and "/" not in cache: cache = os.path.join(self._test_suite_dir(), "cmake/caches", cache + ".cmake") cache = os.path.abspath(cache) if not os.path.exists(cache): fatal("Could not find CMake cache file: " + cache) cmake_flags += ['-C', cache] for l in lines: logger.info(l) # Define compilers before specifying the cache files. early_defs = {} for key in [ 'CMAKE_C_COMPILER:FILEPATH', 'CMAKE_CXX_COMPILER:FILEPATH' ]: value = defs.pop(key, None) if value is not None: early_defs[key] = value cmake_cmd = ([cmake_cmd] + ['-D%s=%s' % (k, v) for k, v in early_defs.items()] + cmake_flags + [self._test_suite_dir()] + ['-D%s=%s' % (k, v) for k, v in defs.items()]) if execute: self._check_call(cmake_cmd, cwd=path) return cmake_cmd
def test_build(base_name, run_info, variables, project, build_config, num_jobs, codesize_util=None): name = '%s(config=%r,j=%d)' % (base_name, build_config, num_jobs) # Check if we need to expand the archive into the sandbox. archive_path = get_input_path(opts, project['archive']) with open(archive_path) as f: archive_hash = hashlib.md5(f.read() + str(project)).hexdigest() # Compute the path to unpack to. source_path = get_output_path("..", "Sources", project['name']) # Load the hash of the last unpack, in case the archive has been updated. last_unpack_hash_path = os.path.join(source_path, "last_unpack_hash.txt") if os.path.exists(last_unpack_hash_path): with open(last_unpack_hash_path) as f: last_unpack_hash = f.read() else: last_unpack_hash = None # Unpack if necessary. if last_unpack_hash == archive_hash: g_log.info('reusing sources %r (already unpacked)' % name) else: # Remove any existing content, if necessary. try: shutil.rmtree(source_path) except OSError as e: if e.errno != errno.ENOENT: raise # Extract the zip file. # # We shell out to unzip here because zipfile's extractall does not # appear to preserve permissions properly. commands.mkdir_p(source_path) g_log.info('extracting sources for %r' % name) if archive_path.endswith(".tar.gz") or \ archive_path.endswith(".tar.bz2") or \ archive_path.endswith(".tar.lzma"): p = subprocess.Popen(args=['tar', '-xf', archive_path], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=source_path, universal_newlines=True) else: p = subprocess.Popen(args=['unzip', '-q', archive_path], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=source_path, universal_newlines=True) stdout, stderr = p.communicate() if p.wait() != 0: fatal(("unable to extract archive %r at %r\n" "-- stdout --\n%s\n" "-- stderr --\n%s\n") % (archive_path, source_path, stdout, stderr)) # Apply the patch file, if necessary. patch_files = project.get('patch_files', []) for patch_file in patch_files: g_log.info('applying patch file %r for %r' % (patch_file, name)) patch_file_path = get_input_path(opts, patch_file) p = subprocess.Popen( args=['patch', '-i', patch_file_path, '-p', '1'], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=source_path, universal_newlines=True) stdout, stderr = p.communicate() if p.wait() != 0: fatal(("unable to apply patch file %r in %r\n" "-- stdout --\n%s\n" "-- stderr --\n%s\n") % (patch_file_path, source_path, stdout, stderr)) # Write the hash tag. with open(last_unpack_hash_path, "w") as f: f.write(archive_hash) # Create an env dict in case the user wants to use it. env = dict(os.environ) # Form the test build command. build_info = project['build_info'] # Add arguments to ensure output files go into our build directory. dir_name = '%s_%s_j%d' % (base_name, build_config, num_jobs) output_base = get_output_path(dir_name) build_base = os.path.join(output_base, 'build', build_config) # Create the build base directory and by extension output base directory. commands.mkdir_p(build_base) cmd = [] preprocess_cmd = None if build_info['style'].startswith('xcode-'): file_path = os.path.join(source_path, build_info['file']) cmd.extend(['xcodebuild']) # Add the arguments to select the build target. if build_info['style'] == 'xcode-project': cmd.extend( ('-target', build_info['target'], '-project', file_path)) elif build_info['style'] == 'xcode-workspace': cmd.extend( ('-scheme', build_info['scheme'], '-workspace', file_path)) cmd.extend(('-derivedDataPath', build_base)) else: fatal("unknown build style in project: %r" % project) # Add the build configuration selection. cmd.extend(('-configuration', build_config)) cmd.append('OBJROOT=%s' % os.path.join(build_base, 'obj')) cmd.append('SYMROOT=%s' % os.path.join(build_base, 'sym')) cmd.append('DSTROOT=%s' % os.path.join(build_base, 'dst')) cmd.append('SHARED_PRECOMPS_DIR=%s' % os.path.join(build_base, 'pch')) # Add arguments to force the appropriate compiler. cmd.append('CC=%s' % (opts.cc, )) cmd.append('CPLUSPLUS=%s' % (opts.cxx, )) # We need to force this variable here because Xcode has some completely # broken logic for deriving this variable from the compiler # name. <rdar://problem/7989147> cmd.append('LD=%s' % (opts.ld, )) cmd.append('LDPLUSPLUS=%s' % (opts.ldxx, )) # Force off the static analyzer, in case it was enabled in any projects # (we don't want to obscure what we are trying to time). cmd.append('RUN_CLANG_STATIC_ANALYZER=NO') # Inhibit all warnings, we don't want to count the time to generate # them against newer compilers which have added (presumably good) # warnings. cmd.append('GCC_WARN_INHIBIT_ALL_WARNINGS=YES') # Add additional arguments to force the build scenario we want. cmd.extend(('-jobs', str(num_jobs))) # If the user specifies any additional options to be included on the # command line, append them here. cmd.extend(build_info.get('extra_args', [])) # If the user specifies any extra environment variables, put # them in our env dictionary. env_format = {'build_base': build_base} extra_env = build_info.get('extra_env', {}) for k in extra_env: extra_env[k] = extra_env[k] % env_format env.update(extra_env) # Create preprocess cmd preprocess_cmd = 'rm -rf "%s"' % (build_base, ) elif build_info['style'] == 'make': # Get the subdirectory in Source where our sources exist. src_dir = os.path.dirname(os.path.join(source_path, build_info['file'])) # Grab our config from build_info. This is config is currently only # used in the make build style since Xcode, the only other build style # as of today, handles changing configuration through the configuration # type variables. Make does not do this so we have to use more brute # force to get it right. config = build_info.get('config', {}).get(build_config, {}) # Copy our source directory over to build_base. # We do this since we assume that we are processing a make project # which has already been configured and so that we do not need to worry # about make install or anything like that. We can just build the # project and use the user supplied path to its location in the build # directory. copied_src_dir = os.path.join(build_base, os.path.basename(dir_name)) shutil.copytree(src_dir, copied_src_dir) # Create our make command. cmd.extend([ 'make', '-C', copied_src_dir, build_info['target'], "-j", str(num_jobs) ]) # If the user specifies any additional options to be included on the # command line, append them here. cmd.extend(config.get('extra_args', [])) # If the user specifies any extra environment variables, put # them in our env dictionary. # We create a dictionary for build_base so that users can use # it optionally in an environment variable via the python # format %(build_base)s. env_format = {'build_base': build_base} extra_env = config.get('extra_env', {}) for k in extra_env: extra_env[k] = extra_env[k] % env_format env.update(extra_env) # Set build base to copied_src_dir so that if codesize_util # is not None, we pass it the correct path. build_base = copied_src_dir preprocess_cmd = 'rm -rf "%s"/build' % (build_base, ) g_log.info('preprocess_cmd: %s' % preprocess_cmd) else: fatal("unknown build style in project: %r" % project) # Collect the samples. g_log.info('executing full build: %s' % args_to_quoted_string(cmd)) stdout_path = os.path.join(output_base, "stdout.log") stderr_path = os.path.join(output_base, "stderr.log") for res in get_runN_test_data(name, variables, cmd, stdout=stdout_path, stderr=stderr_path, preprocess_cmd=preprocess_cmd, env=env): yield res # If we have a binary path, get the text size of our result. binary_path = build_info.get('binary_path', None) if binary_path is not None and codesize_util is not None: tname = "%s.size" % (name, ) success = False samples = [] try: # We use a dictionary here for our formatted processing of # binary_path so that if the user needs our build config he can get # it via %(build_config)s in his string and if he does not, an # error is not thrown. format_args = {"build_config": build_config} cmd = codesize_util + [ os.path.join(build_base, binary_path % format_args) ] if opts.verbose: g_log.info('running: %s' % " ".join("'%s'" % arg for arg in cmd)) result = subprocess.check_output(cmd).strip() if result != "fail": bytes = int(result) success = True # For now, the way the software is set up things are going to # get confused if we don't report the same number of samples # as reported for other variables. So we just report the size # N times. # # FIXME: We should resolve this, eventually. for i in range(variables.get('run_count')): samples.append(bytes) else: g_log.warning('Codesize failed.') except OSError as e: if e.errno != errno.ENOENT: raise else: g_log.warning('Codesize failed with ENOENT.') yield (success, tname, samples) # Check that the file sizes of the output log files "make sense", and warn # if they do not. That might indicate some kind of non-determinism in the # test command, which makes timing less useful. stdout_sizes = [] stderr_sizes = [] run_count = variables['run_count'] for i in range(run_count): iter_stdout_path = '%s.%d' % (stdout_path, i) iter_stderr_path = '%s.%d' % (stderr_path, i) if os.path.exists(iter_stdout_path): stdout_sizes.append(os.stat(iter_stdout_path).st_size) else: stdout_sizes.append(None) if os.path.exists(iter_stderr_path): stderr_sizes.append(os.stat(iter_stderr_path).st_size) else: stderr_sizes.append(None) if len(set(stdout_sizes)) != 1: g_log.warning(('test command had stdout files with ' 'different sizes: %r') % stdout_sizes) if len(set(stderr_sizes)) != 1: g_log.warning(('test command had stderr files with ' 'different sizes: %r') % stderr_sizes) # Unless cleanup is disabled, rerun the preprocessing command. if not opts.save_temps and preprocess_cmd: g_log.info('cleaning up temporary results') if os.system(preprocess_cmd) != 0: g_log.warning("cleanup command returned a non-zero exit status")
def test_cc_command(base_name, run_info, variables, input, output, flags, extra_flags, has_output=True, ignore_stderr=False): name = '%s/(%s)' % ( base_name, ' '.join(flags), ) input = get_input_path(opts, input) output = get_output_path(output) cmd = [variables.get('cc')] cmd.extend(extra_flags) cmd.append(input) cmd.extend(flags) # Inhibit all warnings, we don't want to count the time to generate them # against newer compilers which have added (presumably good) warnings. cmd.append('-w') # Do a memory profiling run, if requested. # # FIXME: Doing this as a separate step seems silly. We shouldn't do any # extra run just to get the memory statistics. if opts.memory_profiling: # Find the cc1 command, which we use to do memory profiling. To do this # we execute the compiler with '-###' to figure out what it wants to do. cc_output = commands.capture(cmd + ['-o', '/dev/null', '-###'], include_stderr=True).strip() cc_commands = [] for ln in cc_output.split('\n'): # Filter out known garbage. if (ln == 'Using built-in specs.' or ln.startswith('Configured with:') or ln.startswith('Target:') or ln.startswith('Thread model:') or ln.startswith('InstalledDir:') or ' version ' in ln): continue cc_commands.append(ln) if len(cc_commands) != 1: fatal('unable to determine cc1 command: %r' % cc_output) cc1_cmd = shlex.split(cc_commands[0]) for res in get_runN_test_data(name, variables, cc1_cmd, ignore_stderr=ignore_stderr, sample_mem=True, only_mem=True): yield res commands.rm_f(output) for res in get_runN_test_data(name, variables, cmd + ['-o', output], ignore_stderr=ignore_stderr): yield res # If the command has output, track its size. if has_output: tname = '%s.size' % (name, ) success = False samples = [] try: stat = os.stat(output) success = True # For now, the way the software is set up things are going to get # confused if we don't report the same number of samples as reported # for other variables. So we just report the size N times. # # FIXME: We should resolve this, eventually. for i in range(variables.get('run_count')): samples.append(stat.st_size) except OSError, e: if e.errno != errno.ENOENT: raise yield (success, tname, samples)
def _configure(self, path, extra_cmake_defs=[], execute=True): cmake_cmd = self.opts.cmake defs = {} if self.opts.cc: defs['CMAKE_C_COMPILER'] = self.opts.cc if self.opts.cxx: defs['CMAKE_CXX_COMPILER'] = self.opts.cxx if self.opts.cppflags or self.opts.cflags: all_cflags = ' '.join([self.opts.cppflags, self.opts.cflags]) defs['CMAKE_C_FLAGS'] = self._unix_quote_args(all_cflags) if self.opts.cppflags or self.opts.cxxflags: all_cxx_flags = ' '.join([self.opts.cppflags, self.opts.cxxflags]) defs['CMAKE_CXX_FLAGS'] = self._unix_quote_args(all_cxx_flags) if self.opts.run_under: defs['TEST_SUITE_RUN_UNDER'] = self._unix_quote_args(self.opts.run_under) if self.opts.benchmarking_only: defs['TEST_SUITE_BENCHMARKING_ONLY'] = 'ON' if self.opts.only_compile: defs['TEST_SUITE_RUN_BENCHMARKS'] = 'Off' if self.opts.use_perf in ('time', 'all'): defs['TEST_SUITE_USE_PERF'] = 'ON' if self.opts.test_suite_externals: defs['TEST_SUITE_EXTERNALS_DIR'] = self.opts.test_suite_externals if self.opts.pgo and self.trained: defs['TEST_SUITE_PROFILE_USE'] = "On" defs['TEST_SUITE_PROFILE_GENERATE'] = "Off" if 'TEST_SUITE_RUN_TYPE' not in defs: defs['TEST_SUITE_RUN_TYPE'] = 'ref' # Problem Size if self.opts.test_size != "regular": if self.opts.test_size == "large": defs["LARGE_PROBLEM_SIZE"] = "On" elif self.opts.test_size == "small": defs["SMALL_PROBLEM_SIZE"] = "On" else: pass for item in self.opts.cmake_defines + extra_cmake_defs: k, v = item.split('=', 1) # make sure the overriding of the settings above also works # when the cmake-define-defined variable has a datatype # specified. key_no_datatype = k.split(':', 1)[0] if key_no_datatype in defs: del defs[key_no_datatype] defs[k] = v # We use 'cmake -LAH -N' later to find out the value of the # CMAKE_C_COMPILER and CMAKE_CXX_COMPILER variables. # 'cmake -LAH -N' will only return variables in the cache that have # a cmake type set. Therefore, explicitly set a 'FILEPATH' type on # these variables here, if they were untyped so far. if 'CMAKE_C_COMPILER' in defs: defs['CMAKE_C_COMPILER:FILEPATH'] = defs['CMAKE_C_COMPILER'] del defs['CMAKE_C_COMPILER'] if 'CMAKE_CXX_COMPILER' in defs: defs['CMAKE_CXX_COMPILER:FILEPATH'] = defs['CMAKE_CXX_COMPILER'] del defs['CMAKE_CXX_COMPILER'] lines = ['Configuring with {'] for k, v in sorted(defs.items()): lines.append(" %s: '%s'" % (k, v)) lines.append('}') # Prepare cmake cache if requested: cmake_flags = [] for cache in self.opts.cmake_cache: # Shortcut for the common case. if not cache.endswith(".cmake") and "/" not in cache: cache = os.path.join(self._test_suite_dir(), "cmake/caches", cache + ".cmake") if not os.path.exists(cache): fatal("Could not find CMake cache file: " + cache) cmake_flags += ['-C', cache] for l in lines: note(l) cmake_cmd = [cmake_cmd] + cmake_flags + [self._test_suite_dir()] + \ ['-D%s=%s' % (k, v) for k, v in defs.items()] if execute: self._check_call(cmake_cmd, cwd=path) return cmake_cmd
def get_cc_info(path, cc_flags=[]): """get_cc_info(path) -> { ... } Extract various information on the given compiler and return a dictionary of the results.""" cc = path # Interrogate the compiler. cc_version = capture([cc, '-v', '-E'] + cc_flags + ['-x', 'c', '/dev/null', '-###'], include_stderr=True).strip() # Determine the assembler version, as found by the compiler. cc_as_version = capture([cc, "-c", '-Wa,-v', '-o', '/dev/null'] + cc_flags + ['-x', 'assembler', '/dev/null'], include_stderr=True).strip() if "clang: error: unsupported argument '-v'" in cc_as_version: cc_as_version = "Clang built in." # Determine the linker version, as found by the compiler. cc_ld_version = capture(([cc, "-Wl,-v", "-dynamiclib"]), include_stderr=True).strip() # Extract the default target .ll (or assembly, for non-LLVM compilers). cc_target_assembly = capture([cc, '-S', '-flto', '-o', '-'] + cc_flags + ['-x', 'c', '/dev/null'], include_stderr=True).strip() # Extract the compiler's response to -dumpmachine as the target. cc_target = cc_dumpmachine = capture([cc, '-dumpmachine']).strip() # Default the target to the response from dumpmachine. cc_target = cc_dumpmachine # Parse out the compiler's version line and the path to the "cc1" binary. cc1_binary = None version_ln = None cc_name = cc_version_num = cc_build_string = cc_extra = "" for ln in cc_version.split('\n'): if ' version ' in ln: version_ln = ln elif 'cc1' in ln or 'clang-cc' in ln: m = re.match(r' "?([^"]*)"?.*"?-E"?.*', ln) if not m: fatal("unable to determine cc1 binary: %r: %r" % (cc, ln)) cc1_binary, = m.groups() elif "-_Amachine" in ln: m = re.match(r'([^ ]*) *-.*', ln) if not m: fatal("unable to determine cc1 binary: %r: %r" % (cc, ln)) cc1_binary, = m.groups() if cc1_binary is None: logger.error("unable to find compiler cc1 binary: %r: %r" % (cc, cc_version)) if version_ln is None: logger.error("unable to find compiler version: %r: %r" % (cc, cc_version)) else: m = re.match(r'(.*) version ([^ ]*) +(\([^(]*\))(.*)', version_ln) if m is not None: cc_name, cc_version_num, cc_build_string, cc_extra = m.groups() else: # If that didn't match, try a more basic pattern. m = re.match(r'(.*) version ([^ ]*)', version_ln) if m is not None: cc_name, cc_version_num = m.groups() else: logger.error("unable to determine compiler version: %r: %r" % (cc, version_ln)) cc_name = "unknown" # Compute normalized compiler name and type. We try to grab source # revisions, branches, and tags when possible. cc_norm_name = None cc_build = None cc_src_branch = cc_alt_src_branch = None cc_src_revision = cc_alt_src_revision = None cc_src_tag = None llvm_capable = False cc_extra = cc_extra.strip() if cc_name == 'icc': cc_norm_name = 'icc' cc_build = 'PROD' cc_src_tag = cc_version_num elif cc_name == 'gcc' and (cc_extra == '' or re.match(r' \(dot [0-9]+\)', cc_extra)): cc_norm_name = 'gcc' m = re.match(r'\(Apple Inc. build ([0-9]*)\)', cc_build_string) if m: cc_build = 'PROD' cc_src_tag, = m.groups() else: logger.error('unable to determine gcc build version: %r' % cc_build_string) elif (cc_name in ('clang', 'LLVM', 'Debian clang', 'Apple clang', 'Apple LLVM') and (cc_extra == '' or 'based on LLVM' in cc_extra or (cc_extra.startswith('(') and cc_extra.endswith(')')))): llvm_capable = True if cc_name == 'Apple clang' or cc_name == 'Apple LLVM': cc_norm_name = 'apple_clang' else: cc_norm_name = 'clang' m = re.match(r'\(([^ ]*)( ([0-9]+))?\)', cc_build_string) if m: cc_src_branch, _, cc_src_revision = m.groups() # With a CMake build, the branch is not emitted. if cc_src_branch and not cc_src_revision and \ cc_src_branch.isdigit(): cc_src_revision = cc_src_branch cc_src_branch = "" # These show up with git-svn. if cc_src_branch == '$URL$': cc_src_branch = "" else: # Otherwise, see if we can match a branch and a tag name. That # could be a git hash. m = re.match(r'\((.+) ([^ ]+)\)', cc_build_string) if m: cc_src_branch, cc_src_revision = m.groups() else: logger.error('unable to determine ' 'Clang development build info: %r' % ((cc_name, cc_build_string, cc_extra),)) cc_src_branch = "" m = re.search('clang-([0-9.]*)', cc_src_branch) if m: cc_build = 'PROD' cc_src_tag, = m.groups() # We sometimes use a tag of 9999 to indicate a dev build. if cc_src_tag == '9999': cc_build = 'DEV' else: cc_build = 'DEV' # Newer Clang's can report separate versions for LLVM and Clang. Parse # the cc_extra text so we can get the maximum SVN version. if cc_extra.startswith('(') and cc_extra.endswith(')'): m = re.match(r'\((.+) ([^ ]+)\)', cc_extra) if m: cc_alt_src_branch, cc_alt_src_revision = m.groups() # With a CMake build, the branch is not emitted. if cc_src_branch and not cc_src_revision and \ cc_src_branch.isdigit(): cc_alt_src_revision = cc_alt_src_branch cc_alt_src_branch = "" else: logger.error('unable to determine ' 'Clang development build info: %r' % ((cc_name, cc_build_string, cc_extra), )) elif cc_name == 'gcc' and 'LLVM build' in cc_extra: llvm_capable = True cc_norm_name = 'llvm-gcc' m = re.match(r' \(LLVM build ([0-9.]+)\)', cc_extra) if m: llvm_build, = m.groups() if llvm_build: cc_src_tag = llvm_build.strip() cc_build = 'PROD' else: cc_build = 'DEV' else: logger.error("unable to determine compiler name: %r" % ((cc_name, cc_build_string),)) if cc_build is None: logger.error("unable to determine compiler build: %r" % cc_version) # If LLVM capable, fetch the llvm target instead. if llvm_capable: m = re.search('target triple = "(.*)"', cc_target_assembly) if m: cc_target, = m.groups() else: logger.error("unable to determine LLVM compiler target: %r: %r" % (cc, cc_target_assembly)) cc_exec_hash = hashlib.sha1() cc_exec_hash.update(open(cc, 'rb').read()) info = { 'cc_build': cc_build, 'cc_name': cc_norm_name, 'cc_version_number': cc_version_num, 'cc_dumpmachine': cc_dumpmachine, 'cc_target': cc_target, 'cc_version': cc_version, 'cc_exec_hash': cc_exec_hash.hexdigest(), 'cc_as_version': cc_as_version, 'cc_ld_version': cc_ld_version, 'cc_target_assembly': cc_target_assembly, } if cc1_binary is not None and os.path.exists(cc1_binary): cc1_exec_hash = hashlib.sha1() cc1_exec_hash.update(open(cc1_binary, 'rb').read()) info['cc1_exec_hash'] = cc1_exec_hash.hexdigest() if cc_src_tag is not None: info['cc_src_tag'] = cc_src_tag if cc_src_revision is not None: info['cc_src_revision'] = cc_src_revision if cc_src_branch: info['cc_src_branch'] = cc_src_branch if cc_alt_src_revision is not None: info['cc_alt_src_revision'] = cc_alt_src_revision if cc_alt_src_branch is not None: info['cc_alt_src_branch'] = cc_alt_src_branch # Infer the run order from the other things we have computed. info['inferred_run_order'] = get_inferred_run_order(info) return info
def run_test(self, opts): if opts.cc is not None: opts.cc = resolve_command_path(opts.cc) if not lnt.testing.util.compilers.is_valid(opts.cc): self._fatal('--cc does not point to a valid executable.') # If there was no --cxx given, attempt to infer it from the --cc. if opts.cxx is None: opts.cxx = \ lnt.testing.util.compilers.infer_cxx_compiler(opts.cc) if opts.cxx is not None: logger.info("Inferred C++ compiler under test as: %r" % (opts.cxx, )) else: self._fatal("unable to infer --cxx - set it manually.") else: opts.cxx = resolve_command_path(opts.cxx) if not os.path.exists(opts.cxx): self._fatal("invalid --cxx argument %r, does not exist" % (opts.cxx)) if opts.test_suite_root is None: self._fatal('--test-suite is required') if not os.path.exists(opts.test_suite_root): self._fatal("invalid --test-suite argument, does not exist: %r" % (opts.test_suite_root)) opts.test_suite_root = os.path.abspath(opts.test_suite_root) if opts.test_suite_externals: if not os.path.exists(opts.test_suite_externals): self._fatal( "invalid --test-externals argument, does not exist: %r" % (opts.test_suite_externals, )) opts.test_suite_externals = os.path.abspath( opts.test_suite_externals) opts.cmake = resolve_command_path(opts.cmake) if not isexecfile(opts.cmake): self._fatal("CMake tool not found (looked for %s)" % opts.cmake) opts.make = resolve_command_path(opts.make) if not isexecfile(opts.make): self._fatal("Make tool not found (looked for %s)" % opts.make) opts.lit = resolve_command_path(opts.lit) if not isexecfile(opts.lit): self._fatal("LIT tool not found (looked for %s)" % opts.lit) if opts.run_under: split = shlex.split(opts.run_under) split[0] = resolve_command_path(split[0]) if not isexecfile(split[0]): self._fatal("Run under wrapper not found (looked for %s)" % opts.run_under) if opts.single_result: # --single-result implies --only-test opts.only_test = opts.single_result if opts.only_test: # --only-test can either point to a particular test or a directory. # Therefore, test_suite_root + opts.only_test or # test_suite_root + dirname(opts.only_test) must be a directory. path = os.path.join(opts.test_suite_root, opts.only_test) parent_path = os.path.dirname(path) if os.path.isdir(path): opts.only_test = (opts.only_test, None) elif os.path.isdir(parent_path): opts.only_test = (os.path.dirname(opts.only_test), os.path.basename(opts.only_test)) else: self._fatal("--only-test argument not understood (must be a " + " test or directory name)") if opts.single_result and not opts.only_test[1]: self._fatal("--single-result must be given a single test name, " "not a directory name") opts.cppflags = ' '.join(opts.cppflags) opts.cflags = ' '.join(opts.cflags) opts.cxxflags = ' '.join(opts.cxxflags) if opts.diagnose: if not opts.only_test: self._fatal("--diagnose requires --only-test") self.start_time = timestamp() # Work out where to put our build stuff if opts.timestamp_build: ts = self.start_time.replace(' ', '_').replace(':', '-') build_dir_name = "test-%s" % ts else: build_dir_name = "build" basedir = os.path.join(opts.sandbox_path, build_dir_name) self._base_path = basedir cmakecache = os.path.join(self._base_path, 'CMakeCache.txt') self.configured = not opts.run_configure and \ os.path.exists(cmakecache) # If we are doing diagnostics, skip the usual run and do them now. if opts.diagnose: return self.diagnose() # configure, so we can extract toolchain information from the cmake # output. self._configure_if_needed() # Verify that we can actually find a compiler before continuing cmake_vars = self._extract_cmake_vars_from_cache() if "CMAKE_C_COMPILER" not in cmake_vars or \ not os.path.exists(cmake_vars["CMAKE_C_COMPILER"]): self._fatal( "Couldn't find C compiler (%s). Maybe you should specify --cc?" % cmake_vars.get("CMAKE_C_COMPILER")) # We don't support compiling without testing as we can't get compile- # time numbers from LIT without running the tests. if opts.compile_multisample > opts.exec_multisample: logger.info("Increasing number of execution samples to %d" % opts.compile_multisample) opts.exec_multisample = opts.compile_multisample if opts.auto_name: # Construct the nickname from a few key parameters. cc_info = self._get_cc_info(cmake_vars) cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build']) opts.label += "__%s__%s" %\ (cc_nick, cc_info['cc_target'].split('-')[0]) logger.info('Using nickname: %r' % opts.label) # When we can't detect the clang version we use 0 instead. That # is a horrible failure mode because all of our data ends up going # to order 0. The user needs to give an order if we can't detect! if opts.run_order is None: cc_info = self._get_cc_info(cmake_vars) if cc_info['inferred_run_order'] == 0: fatal("Cannot detect compiler version. Specify --run-order" " to manually define it.") # Now do the actual run. reports = [] json_reports = [] for i in range(max(opts.exec_multisample, opts.compile_multisample)): c = i < opts.compile_multisample e = i < opts.exec_multisample # only gather perf profiles on a single run. p = i == 0 and opts.use_perf in ('profile', 'all') run_report, json_data = self.run(cmake_vars, compile=c, test=e, profile=p) reports.append(run_report) json_reports.append(json_data) report = self._create_merged_report(reports) # Write the report out so it can be read by the submission tool. report_path = os.path.join(self._base_path, 'report.json') with open(report_path, 'w') as fd: fd.write(report.render()) if opts.output: with open(opts.output, 'w') as fd: fd.write(report.render()) xml_report_path = os.path.join(self._base_path, 'test-results.xunit.xml') str_template = _lit_json_to_xunit_xml(json_reports) with open(xml_report_path, 'w') as fd: fd.write(str_template) csv_report_path = os.path.join(self._base_path, 'test-results.csv') str_template = _lit_json_to_csv(json_reports) with open(csv_report_path, 'w') as fd: fd.write(str_template) return self.submit(report_path, opts, 'nts')