def getVersion(self, executable): process = subprocess.Popen([executable, '-help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = process.communicate() if stderr: sys.exit(Util.decodeToString(stderr)) if process.returncode: sys.exit('CPAchecker returned exit code {0}'.format(process.returncode)) stdout = Util.decodeToString(stdout) version = ' '.join(stdout.splitlines()[0].split()[1:]) # first word is 'CPAchecker' # CPAchecker might be within a SVN repository # Determine the revision and add it to the version. cpaShDir = os.path.dirname(os.path.realpath(executable)) cpacheckerDir = os.path.join(cpaShDir, os.path.pardir) try: svnProcess = subprocess.Popen(['svnversion', cpacheckerDir], env={'LANG': 'C'}, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = svnProcess.communicate() stdout = Util.decodeToString(stdout).strip() if not (svnProcess.returncode or stderr or (stdout == 'exported')): return version + ' ' + stdout except OSError: pass # CPAchecker might be within a git-svn repository try: gitProcess = subprocess.Popen(['git', 'svn', 'find-rev', 'HEAD'], env={'LANG': 'C'}, cwd=cpacheckerDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = gitProcess.communicate() stdout = Util.decodeToString(stdout).strip() if not (gitProcess.returncode or stderr) and stdout: return version + ' ' + stdout + ('M' if self._isGitRepositoryDirty(cpacheckerDir) else '') # CPAchecker might be within a git repository gitProcess = subprocess.Popen(['git', 'log', '-1', '--pretty=format:%h', '--abbrev-commit'], env={'LANG': 'C'}, cwd=cpacheckerDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = gitProcess.communicate() stdout = Util.decodeToString(stdout).strip() if not (gitProcess.returncode or stderr) and stdout: return version + ' ' + stdout + ('+' if self._isGitRepositoryDirty(cpacheckerDir) else '') except OSError: pass return version
def main(argv=None): if argv is None: argv = sys.argv parser = argparse.ArgumentParser(description= """Run benchmarks with a verification tool. Documented example files for the benchmark definitions can be found as 'doc/examples/benchmark*.xml'. Use the table-generator.py script to create nice tables from the output of this script.""") parser.add_argument("files", nargs='+', metavar="FILE", help="XML file with benchmark definition") parser.add_argument("-d", "--debug", action="store_true", help="Enable debug output") parser.add_argument("-r", "--rundefinition", dest="selectedRunDefinitions", action="append", help="Run only the specified RUN_DEFINITION from the benchmark definition file. " + "This option can be specified several times.", metavar="RUN_DEFINITION") parser.add_argument("-t", "--test", dest="selectedRunDefinitions", action="append", help="Same as -r/--rundefinition (deprecated)", metavar="TEST") parser.add_argument("-s", "--sourcefiles", dest="selectedSourcefileSets", action="append", help="Run only the files from the sourcefiles tag with SOURCE as name. " + "This option can be specified several times.", metavar="SOURCES") parser.add_argument("-n", "--name", dest="name", default=None, help="Set name of benchmark execution to NAME", metavar="NAME") parser.add_argument("-o", "--outputpath", dest="output_path", type=str, default="./test/results/", help="Output prefix for the generated results. " + "If the path is a folder files are put into it," + "otherwise it is used as a prefix for the resulting files.") parser.add_argument("-T", "--timelimit", dest="timelimit", default=None, help="Time limit in seconds for each run (-1 to disable)", metavar="SECONDS") parser.add_argument("-M", "--memorylimit", dest="memorylimit", default=None, help="Memory limit in MB (-1 to disable)", metavar="MB") parser.add_argument("-N", "--numOfThreads", dest="numOfThreads", default=None, type=int, help="Run n benchmarks in parallel", metavar="n") parser.add_argument("-x", "--moduloAndRest", dest="moduloAndRest", default=(1,0), nargs=2, type=int, help="Run only a subset of run definitions for which (i %% a == b) holds" + "with i being the index of the run definition in the benchmark definition file " + "(starting with 1).", metavar=("a","b")) parser.add_argument("-c", "--limitCores", dest="corelimit", type=int, default=None, metavar="N", help="Limit each run of the tool to N CPU cores (-1 to disable).") parser.add_argument("--commit", dest="commit", action="store_true", help="If the output path is a git repository without local changes," + "add and commit the result files.") parser.add_argument("--message", dest="commitMessage", type=str, default="Results for benchmark run", help="Commit message if --commit is used.") parser.add_argument("--cloud", dest="cloud", action="store_true", help="Use cloud to execute benchmarks.") parser.add_argument("--cloudMaster", dest="cloudMaster", metavar="HOST", help="Sets the master host of the cloud to be used.") parser.add_argument("--cloudPriority", dest="cloudPriority", metavar="PRIORITY", help="Sets the priority for this benchmark used in the cloud. Possible values are IDLE, LOW, HIGH, URGENT.") parser.add_argument("--cloudCPUModel", dest="cloudCPUModel", type=str, default=None, metavar="CPU_MODEL", help="Only execute runs on CPU models that contain the given string.") parser.add_argument("--maxLogfileSize", dest="maxLogfileSize", type=int, default=20, metavar="SIZE", help="Shrink logfiles to SIZE in MB, if they are too big. (-1 to disable, default value: 20 MB).") global config, OUTPUT_PATH config = parser.parse_args(argv[1:]) if os.path.isdir(config.output_path): OUTPUT_PATH = os.path.normpath(config.output_path) + os.sep else: OUTPUT_PATH = config.output_path if config.debug: logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s", level=logging.DEBUG) else: logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s") for arg in config.files: if not os.path.exists(arg) or not os.path.isfile(arg): parser.error("File {0} does not exist.".format(repr(arg))) if not config.cloud: try: processes = subprocess.Popen(['ps', '-eo', 'cmd'], stdout=subprocess.PIPE).communicate()[0] if len(re.findall("python.*benchmark\.py", Util.decodeToString(processes))) > 1: logging.warn("Already running instance of this script detected. " + \ "Please make sure to not interfere with somebody else's benchmarks.") except OSError: pass # this does not work on Windows returnCode = 0 for arg in config.files: if STOPPED_BY_INTERRUPT: break logging.debug("Benchmark {0} is started.".format(repr(arg))) rc = executeBenchmark(arg) returnCode = returnCode or rc logging.debug("Benchmark {0} is done.".format(repr(arg))) logging.debug("I think my job is done. Have a nice day!") return returnCode