def version(tool, nlines=1): """ Return version information as reported by the execution of TOOL --version, expected on the first NLINES of output. If TOOL is not available from PATH, return a version text indicating unavailability. If TOOL is 'gcc', append the target for which it was configured to the base version info. """ # If TOOL is not on PATH, return a version text indicating unavailability. # This situation is legitimate here for gnatemu when running through a # probe, and if we happen to actually need the tool later on, we'll see # test failures anyway. if not which(tool): return 'N/A' # --version often dumps more than the version number on a line. A # copyright notice is typically found there as well. Our heuristic # here is to strip everything past the first comma. def version_on_line(text): cprpos = text.find(',') return text[0:cprpos] if cprpos != -1 else text tool_version_output = Run([tool, '--version']).out.split('\n') version_info = '\n'.join( [version_on_line(l) for l in tool_version_output[0:nlines]]) if tool == 'gcc': gcc_target = Run([tool, '-dumpmachine']).out.strip() version_info += ' [%s]' % gcc_target return version_info
def __init__(self, command_line, save_output=False, save_input=False): """Constructor. :param command_line: list of strings representing the command line to be spawned. :type command_line: list[str] :param save_output: Save all output generated during the session for later retrieval (see method get_session_logs). :type save_output: bool :param save_input: Save all input generated during the session for later retrieval (see method get_session_logs). :type save_input: bool """ self.save_output = save_output self.save_input = save_input # Convert the command line to a list of string is needed command_line = [str(arg) for arg in command_line] if len(command_line) < 1: raise ExpectError('__init__', 'expect a non empty list as argument') command_line[0] = which(command_line[0]) # Store the command line used logger.debug('spawn %s' % ' '.join(command_line)) self.command_line = command_line # Spawn the process (self.input, self.output, self.error, self.pid, self.handler) = \ _term.non_blocking_spawn(tuple(command_line)) # Initialize our buffer self.buffer = "" # If we have to save input or output keep another buffer that # is never flushed. self.saved_buffer = "" # Keep the state of the process self.process_is_dead = False # This is where we store that last successful expect result self.last_match = None # This is where the command returned status will be stored # when the command has exited. For the moment, it is not # available. self.status = None
def spark_install_path(): """the location of the SPARK install""" exec_loc = fileutils.which("gnatprove") return os.path.dirname(os.path.dirname(exec_loc))
def xcov(args, out=None, err=None, inp=None, register_failure=True, auto_config_args=True, auto_target_args=True): """ Run xcov with arguments ARGS, timeout control, valgrind control if available and enabled, output directed to OUT and failure registration if register_failure is True. Return the process status descriptor. ARGS may be a list or a whitespace separated string. See xcov_suite_args for the meaning of AUTO_*_ARGS arguments. """ # Make ARGS a list from whatever it is, to allow unified processing. # Then fetch the requested command, always first: args = to_list(args) covcmd = args[0] covargs = args[1:] if thistest.options.trace_dir is not None: # Bootstrap - run xcov under xcov if covcmd == 'coverage': thistest.current_test_index += 1 args = [ 'run', '-t', 'i686-pc-linux-gnu', '-o', os.path.join(thistest.options.trace_dir, str(thistest.current_test_index) + '.trace'), which(XCOV), '-eargs' ] + args covargs = xcov_suite_args(covcmd, covargs, auto_config_args, auto_target_args) + covargs # Determine which program we are actually going launch. This is # "gnatcov <cmd>" unless we are to execute some designated program # for this: covpgm = thistest.suite_covpgm_for(covcmd) covpgm = ([covpgm] if covpgm is not None else maybe_valgrind([XCOV]) + [covcmd]) # Execute, check status, raise on error and return otherwise. # # The gprvar options are only needed for the "libsupport" part of our # projects. They are pointless wrt coverage run or analysis activities # so we don't include them here. p = cmdrun(cmd=covpgm + covargs, inp=inp, out=out, err=err, register_failure=register_failure) if thistest.options.enable_valgrind == 'memcheck': memcheck_log = contents_of(MEMCHECK_LOG) thistest.fail_if( memcheck_log, 'MEMCHECK log not empty' '\nFROM "%s":' '\n%s' % (' '.join(covpgm + covargs), memcheck_log)) return p
def __init__(self, url, dest, branch='master', rev=None, force_checkout=True): """Initialize a Git working environment. :param url: the remote git url :type url: str :param dest: the local git repository path :type dest: str :param branch: the branch :type branch: str :param rev: the revision used :type rev: str | None :param force_checkout: do a checkout of the given `rev` or `branch` even if the repository already exists, it overwrite existing files :type force_checkout: bool :raise: Git_Error """ self.url = unixpath(url) self.dest = unixpath(dest) self.branch = branch self.rev = rev self.remote = None self.git = which('git', default=None) if not self.git: raise Git_Error('git not found') try: # If the dest directory does not exist or is empty, do a git clone if not os.path.exists(self.dest) or not os.listdir(self.dest): self.clone() return remotes = self.remote_info() except Git_Error: if force_checkout: self.init() remotes = self.remote_info() else: self.__error("%s not empty and force_checkout is not True" % self.dest, traceback=sys.exc_traceback) configured_remote = [r[0] for r in remotes if r[1] == self.url] if configured_remote: self.remote = configured_remote[0] elif not configured_remote: error_msg = "Remote for %s not found. " % self.url if not remotes: error_msg += "No configured remotes" else: error_msg += "Configured remotes are:\n" error_msg += '\n'.join(set((r[1] for r in remotes))) if force_checkout: vcslogger.debug(error_msg) self.init() else: self.__error(error_msg) if force_checkout: try: if rev is not None: self.checkout(rev, force=True) else: self.checkout("%s/%s" % (self.remote, branch), force=True) except Git_Error: # ??? the ref to checkout is maybe not already fetched # force an update in that case self.update(rev)
def main(): """Run the testsuite and generate reports""" # Parse the command lines options m = Main(add_targets_options=True) add_mainloop_options(m) add_run_test_options(m) m.add_option('--diffs', dest='diffs', action='store_true', default=False, help='show diffs on stdout') m.add_option("--old-result-dir", type="string", default=None, help="Old result dir (to generate the report)") m.add_option('-b', '--build-dir', dest='build_dir', help='separate PolyORB build directory') m.add_option('--testsuite-src-dir', dest='testsuite_src_dir', help='path to polyorb testsuite sources') m.add_option('--coverage', dest='coverage', action='store_true', default=False, help='generate coverage information') m.parse_args() # Various files needed or created by the testsuite results_file = m.options.output_dir + '/results' report_file = m.options.output_dir + '/report' if not m.options.failed_only: rm(m.options.output_dir, True) mkdir(m.options.output_dir) # Add current directory in PYTHONPATH (to find test_utils.py) env = Env() env.add_search_path('PYTHONPATH', os.path.join(os.getcwd(), 'tests')) fixed_support_dir = os.path.join(os.getcwd(), 'fixed_support_dir') env.add_search_path('FIXED_SUPPORT_DIR', fixed_support_dir) env.add_path(os.path.join(fixed_support_dir)) env.add_path('.') # many tests expect '.' in the PATH # Avoid extra debug traces os.environ['POLYORB_LOG_DEFAULT'] = 'error' # Generate the discs list for test.opt parsing # Always add 'ALL' common_discs = Env().discriminants # Be backward compatible with the old IDL tests # Set the polyorb discriminant and export the IDLCOMP # environment variable. common_discs.append('PolyORB') common_discs.append('PolyORB_IAC') os.environ['IDLCOMP'] = 'iac' # Retrieve also the polyorb specific discriminants p = Run([ which('bash'), which('polyorb-config').replace('\\', '/'), '--config' ]) # First find the support application perso. match = re.search('Application *personalities *: (.+)', p.out) if match is not None: common_discs += ['app_%s' % k for k in match.group(1).split()] # Then the supported protocols match = re.search('Protocol *personalities *: (.+)', p.out) if match is not None: common_discs += ['proto_%s' % k for k in match.group(1).split()] # Then the supported services match = re.search('Services *: (.+)', p.out) if match is not None: common_discs += ['serv_%s' % k for k in match.group(1).split()] # Do we have ssl support ? if re.search('SSL *support *: *yes', p.out): common_discs.append('ssl_support') with open(m.options.output_dir + '/discs', 'w') as f_disk: f_disk.write(", ".join(common_discs)) # Expand ~ and ~user contructions for user PATH if m.options.build_dir is None: m.options.build_dir = os.path.join(os.getcwd(), os.pardir) else: m.options.build_dir = os.path.expanduser(m.options.build_dir) if m.options.testsuite_src_dir is None: m.options.testsuite_src_dir = os.path.join(os.getcwd()) else: m.options.testsuite_src_dir = os.path.expanduser( m.options.testsuite_src_dir) # Compute the test list if m.args: test_glob = m.args[0] else: test_glob = None test_list = filter_list('./tests/*/*/*/test.py', test_glob) if os.path.isdir('regtests'): test_list.extend(filter_list('./regtests/*/test.*', test_glob)) collect_result = generate_collect_result(m.options.output_dir, results_file, m.options.diffs) run_testcase = generate_run_testcase('tests/run-test.py', common_discs, m.options) os.environ['TEST_CONFIG'] = os.path.join(os.getcwd(), 'env.dump') env.options = m.options env.log_dir = os.path.join(os.getcwd(), 'log') env.store(os.environ['TEST_CONFIG']) if len(test_list) == 0: logger.error("No matching test found") return MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs) # Generate the report file ReportDiff(m.options.output_dir, m.options.old_result_dir).txt_image(report_file)