コード例 #1
0
    def expand_filename_pattern(self, pattern, base_dir, sourcefile=None):
        """
        The function expand_filename_pattern expands a filename pattern to a sorted list
        of filenames. The pattern can contain variables and wildcards.
        If base_dir is given and pattern is not absolute, base_dir and pattern are joined.
        """

        # replace vars like ${benchmark_path},
        # with converting to list and back, we can use the function 'substitute_vars()'
        expandedPattern = substitute_vars([pattern], self, sourcefile)
        assert len(expandedPattern) == 1
        expandedPattern = expandedPattern[0]

        if expandedPattern != pattern:
            logging.debug("Expanded variables in expression %r to %r.",
                          pattern, expandedPattern)

        fileList = util.expand_filename_pattern(expandedPattern, base_dir)

        # sort alphabetical,
        fileList.sort()

        if not fileList:
            logging.warning("No files found matching %r.", pattern)

        return fileList
コード例 #2
0
def print_task_cmdline(tool, executable, task_def_file):
    """Print command lines resulting for tasks from the given task-definition file."""
    no_limits = CURRENT_BASETOOL.ResourceLimits()

    task = yaml.safe_load(task_def_file)
    input_files = model.handle_files_from_task_definition(
        task.get("input_files"), task_def_file.name
    )

    def print_cmdline(task_description, property_file):
        task_description = task_def_file.name + " " + task_description
        with log_if_unsupported("task from " + task_description):
            cmdline = model.cmdline_for_run(
                tool,
                executable,
                [],
                input_files,
                task_def_file.name,
                property_file,
                copy.deepcopy(task.get("options")),
                no_limits,
            )
            print_list("Command line for " + task_description, cmdline)

    print_cmdline("without property file", None)

    for prop in task.get("properties", []):
        property_file = prop.get("property_file")
        if property_file:
            property_file = util.expand_filename_pattern(
                property_file, os.path.dirname(task_def_file.name)
            )[0]
            print_cmdline("with property " + property_file, property_file)
コード例 #3
0
ファイル: model.py プロジェクト: tautschnig/benchexec
    def expand_filename_pattern(self, pattern, base_dir, sourcefile=None):
        """
        The function expand_filename_pattern expands a filename pattern to a sorted list
        of filenames. The pattern can contain variables and wildcards.
        If base_dir is given and pattern is not absolute, base_dir and pattern are joined.
        """

        # replace vars like ${benchmark_path},
        # with converting to list and back, we can use the function 'substitute_vars()'
        expandedPattern = substitute_vars([pattern], self, sourcefile)
        assert len(expandedPattern) == 1
        expandedPattern = expandedPattern[0]

        if expandedPattern != pattern:
            logging.debug("Expanded variables in expression %r to %r.", pattern, expandedPattern)

        fileList = util.expand_filename_pattern(expandedPattern, base_dir)

        # sort alphabetical,
        fileList.sort()

        if not fileList:
            logging.warning("No files found matching %r.", pattern)

        return fileList
コード例 #4
0
ファイル: smack.py プロジェクト: wanderseeme/benchexec
 def program_files(self, executable):
     """
     Returns a list of files or directories that are necessary to run the tool.
     """
     installDir = os.path.dirname(executable)
     return [executable] + util.flatten(
         util.expand_filename_pattern(path, installDir)
         for path in REQUIRED_PATHS)
コード例 #5
0
ファイル: predatorhp.py プロジェクト: michkot/benchexec
 def program_files(self, executable):
     """ List of files/directories necessary to build and run the tool. """
     executableDir = os.path.dirname(executable)
     dependencies = [
         "predator-repo",
         "build-all.sh"
         ]
     return [executable] + util.flatten(util.expand_filename_pattern(dep, installDir) for dep in dependencies)
コード例 #6
0
ファイル: template.py プロジェクト: stieglma/benchexec
 def program_files(self, executable):
     """
     OPTIONAL, this method is only necessary for situations when the benchmark environment
     needs to know all files belonging to a tool
     (to transport them to a cloud service, for example).
     Returns a list of files or directories that are necessary to run the tool,
     relative to the current directory.
     @return a list of paths as strings
     """
     installDir = os.path.dirname(executable)
     return [executable] + util.flatten(util.expand_filename_pattern(path, installDir) for path in self.REQUIRED_PATHS)
コード例 #7
0
ファイル: symbiotic.py プロジェクト: rishabhnambiar/benchexec
    def program_files(self, executable):
        installDir = joinpath(dirname(executable), '..')
        if self._version_newer_than('5.0.0'):
            paths = self.REQUIRED_PATHS_5_0_0
        elif self._version_newer_than('4.0.1'):
            paths = self.REQUIRED_PATHS_4_0_1
        else:
            paths = OldSymbiotic.REQUIRED_PATHS

        return [executable] + util.flatten(
            util.expand_filename_pattern(path, installDir) for path in paths)
コード例 #8
0
 def program_files(self, executable):
     """
     OPTIONAL, this method is only necessary for situations when the benchmark environment
     needs to know all files belonging to a tool
     (to transport them to a cloud service, for example).
     Returns a list of files or directories that are necessary to run the tool,
     relative to the current directory.
     @return a list of paths as strings
     """
     installDir = os.path.dirname(executable)
     return [executable] + util.flatten(
         util.expand_filename_pattern(path, installDir)
         for path in self.REQUIRED_PATHS)
コード例 #9
0
 def expand_patterns_from_tag(tag):
     result = []
     patterns = task_def.get(tag, [])
     if isinstance(patterns, str) or not isinstance(patterns, collections.Iterable):
         # accept single string in addition to list of strings
         patterns = [patterns]
     for pattern in patterns:
         expanded = util.expand_filename_pattern(
             str(pattern), os.path.dirname(task_def_file))
         if not expanded:
             raise BenchExecException(
                 "Pattern '{}' in task-definition file {} did not match any paths."
                 .format(pattern, task_def_file))
         expanded.sort()
         result.extend(expanded)
     return result
コード例 #10
0
ファイル: template.py プロジェクト: thaprau/benchexec
 def _program_files_from_executable(executable,
                                    required_paths,
                                    parent_dir=False):
     """
     Get a list of program files by expanding a list of path patterns
     and interpreting it as relative to the executable.
     This method can be used as helper for implementing the method program_files().
     Contrary to the default implementation of program_files(), this method does not explicitly
     add the executable to the list of returned files, it assumes that required_paths
     contains a path that covers the executable.
     @param executable: the path to the executable of the tool (typically the result of executable())
     @param required_paths: a list of required path patterns
     @param parent_dir: whether required_paths are relative to the directory of executable or the parent directory
     @return a list of paths as strings, suitable for result of program_files()
     """
     base_dir = os.path.dirname(executable)
     if parent_dir:
         base_dir = os.path.join(base_dir, os.path.pardir)
     return util.flatten(
         util.expand_filename_pattern(path, base_dir)
         for path in required_paths)
コード例 #11
0
ファイル: ultimate.py プロジェクト: viktormalik/benchexec
 def program_files(self, executable):
     install_dir = os.path.dirname(executable)
     paths = self.REQUIRED_PATHS_SVCOMP17 if self._is_svcomp17_version(
         executable) else self.REQUIRED_PATHS
     return [executable] + util.flatten(
         util.expand_filename_pattern(path, install_dir) for path in paths)
コード例 #12
0
ファイル: cpa-witness-exec.py プロジェクト: FArian/tbf
 def program_files(self, executable):
     installDir = os.path.join(os.path.dirname(executable), os.path.pardir)
     return util.flatten(
         util.expand_filename_pattern(path, installDir)
         for path in self.REQUIRED_PATHS)
コード例 #13
0
ファイル: blast.py プロジェクト: hguenther/benchexec
 def program_files(self, executable):
     installDir = os.path.join(os.path.dirname(executable), os.path.pardir)
     return util.flatten(util.expand_filename_pattern(path, installDir) for path in REQUIRED_PATHS)
コード例 #14
0
    def __init__(self, identifier, sourcefiles, fileOptions, runSet, propertyfile=None,
                 required_files_patterns=[], required_files=[],
                 expected_results={}):
        assert identifier
        self.identifier = identifier  # used for name of logfile, substitution, result-category
        self.sourcefiles = sourcefiles
        self.runSet = runSet
        self.specific_options = fileOptions # options that are specific for this run
        self.log_file = runSet.log_folder + os.path.basename(self.identifier) + ".log"
        self.result_files_folder = os.path.join(runSet.result_files_folder, os.path.basename(self.identifier))
        self.expected_results = expected_results or {} # filled externally

        self.required_files = set(required_files)
        rel_sourcefile = os.path.relpath(self.identifier, runSet.benchmark.base_dir)
        for pattern in required_files_patterns:
            this_required_files = runSet.expand_filename_pattern(pattern, runSet.benchmark.base_dir, rel_sourcefile)
            if not this_required_files:
                logging.warning(
                    'Pattern %s in requiredfiles tag did not match any file for task %s.',
                    pattern, self.identifier)
            self.required_files.update(this_required_files)

        # lets reduce memory-consumption: if 2 lists are equal, do not use the second one
        self.options = runSet.options + fileOptions if fileOptions else runSet.options # all options to be used when executing this run
        substitutedOptions = substitute_vars(self.options, runSet, self.identifier)
        if substitutedOptions != self.options:
            self.options = substitutedOptions # for less memory again

        self.propertyfile = propertyfile or runSet.propertyfile
        self.properties = [] # filled externally

        def log_property_file_once(msg):
            if not self.propertyfile in _logged_missing_property_files:
                _logged_missing_property_files.add(self.propertyfile)
                logging.warning(msg)

        # replace run-specific stuff in the propertyfile and add it to the set of required files
        if self.propertyfile is None:
            log_property_file_once('No propertyfile specified. Score computation will ignore the results.')
        else:
            # we check two cases: direct filename or user-defined substitution, one of them must be a 'file'
            # TODO: do we need the second case? it is equal to previous used option "-spec ${sourcefile_path}/ALL.prp"
            expandedPropertyFiles = util.expand_filename_pattern(self.propertyfile, self.runSet.benchmark.base_dir)
            substitutedPropertyfiles = substitute_vars([self.propertyfile], runSet, self.identifier)
            assert len(substitutedPropertyfiles) == 1

            if expandedPropertyFiles:
                if len(expandedPropertyFiles) > 1:
                    log_property_file_once('Pattern {0} for sourcefile {1} in propertyfile tag matches more than one file. Only {2} will be used.'
                                           .format(self.propertyfile, self.identifier, expandedPropertyFiles[0]))
                self.propertyfile = expandedPropertyFiles[0]
            elif substitutedPropertyfiles and os.path.isfile(substitutedPropertyfiles[0]):
                self.propertyfile = substitutedPropertyfiles[0]
            else:
                log_property_file_once('Pattern {0} for sourcefile {1} in propertyfile tag did not match any file. It will be ignored.'
                                       .format(self.propertyfile, self.identifier))
                self.propertyfile = None

        if self.propertyfile:
            self.required_files.add(self.propertyfile)

        self.required_files = list(self.required_files)

        # Copy columns for having own objects in run
        # (we need this for storing the results in them).
        self.columns = [Column(c.text, c.title, c.number_of_digits) for c in self.runSet.benchmark.columns]

        # here we store the optional result values, e.g. memory usage, energy, host name
        # keys need to be strings, if first character is "@" the value is marked as hidden (e.g., debug info)
        self.values = {}

        # dummy values, for output in case of interrupt
        self.status = ""
        self.cputime = None
        self.walltime = None
        self.category = result.CATEGORY_UNKNOWN
コード例 #15
0
ファイル: smack.py プロジェクト: stieglma/benchexec
 def program_files(self, executable):
     """
     Returns a list of files or directories that are necessary to run the tool.
     """
     installDir = os.path.dirname(executable)
     return [executable] + util.flatten(util.expand_filename_pattern(path, installDir) for path in REQUIRED_PATHS)
コード例 #16
0
    def create_run_from_task_definition(
            self, task_def_file, options, propertyfile, required_files_pattern):
        """Create a Run from a task definition in yaml format"""
        task_def = load_task_definition_file(task_def_file)

        def expand_patterns_from_tag(tag):
            result = []
            patterns = task_def.get(tag, [])
            if isinstance(patterns, str) or not isinstance(patterns, collections.Iterable):
                # accept single string in addition to list of strings
                patterns = [patterns]
            for pattern in patterns:
                expanded = util.expand_filename_pattern(
                    str(pattern), os.path.dirname(task_def_file))
                if not expanded:
                    raise BenchExecException(
                        "Pattern '{}' in task-definition file {} did not match any paths."
                        .format(pattern, task_def_file))
                expanded.sort()
                result.extend(expanded)
            return result

        input_files = expand_patterns_from_tag("input_files")
        if not input_files:
            raise BenchExecException(
                "Task-definition file {} does not define any input files.".format(task_def_file))
        required_files = expand_patterns_from_tag("required_files")

        run = Run(
            task_def_file,
            input_files,
            options,
            self,
            propertyfile,
            required_files_pattern,
            required_files)

        # run.propertyfile of Run is fully determined only after Run is created,
        # thus we handle it and the expected results here.
        if not run.propertyfile:
            return run

        # TODO: support "property_name" attribute in yaml
        prop = result.Property.create(run.propertyfile, allow_unknown=True)
        run.properties = [prop]

        for prop_dict in task_def.get("properties", []):
            if not isinstance(prop_dict, dict) or "property_file" not in prop_dict:
                raise BenchExecException(
                    "Missing property file for property in task-definition file {}."
                    .format(task_def_file))
            expanded = util.expand_filename_pattern(
                prop_dict["property_file"], os.path.dirname(task_def_file))
            if len(expanded) != 1:
                raise BenchExecException(
                    "Property pattern '{}' in task-definition file {} does not refer to exactly one file."
                    .format(prop_dict["property_file"], task_def_file))

            # TODO We could reduce I/O by checking absolute paths and using os.path.samestat
            # with cached stat calls.
            if prop.filename == expanded[0] or os.path.samefile(prop.filename, expanded[0]):
                expected_result = prop_dict.get("expected_verdict")
                if expected_result is not None and not isinstance(expected_result, bool):
                    raise BenchExecException(
                        "Invalid expected result '{}' for property {} in task-definition file {}."
                        .format(expected_result, prop_dict["property_file"], task_def_file))
                run.expected_results[prop.filename] = \
                    result.ExpectedResult(expected_result, prop_dict.get("subproperty"))

        if not run.expected_results:
            logging.debug(
                "Ignoring run '%s' because it does not have the property from %s.",
                run.identifier, run.propertyfile)
            return None
        elif len(run.expected_results) > 1:
            raise BenchExecException(
                "Property '{}' specified multiple times in task-definition file {}."
                .format(prop.filename, task_def_file))
        else:
            return run
コード例 #17
0
    def __init__(self, benchmark_file, config, start_time):
        """
        The constructor of Benchmark reads the source files, options, columns and the tool
        from the XML in the benchmark_file..
        """
        logging.debug("I'm loading the benchmark %s.", benchmark_file)

        self.config = config
        self.benchmark_file = benchmark_file
        self.base_dir = os.path.dirname(self.benchmark_file)

        # get benchmark-name
        self.name = os.path.basename(benchmark_file)[:-4] # remove ending ".xml"
        if config.name:
            self.name += "."+config.name

        self.start_time = start_time
        self.instance = time.strftime("%Y-%m-%d_%H%M", self.start_time)

        self.output_base_name = config.output_path + self.name + "." + self.instance
        self.log_folder = self.output_base_name + ".logfiles" + os.path.sep
        self.log_zip = self.output_base_name + ".logfiles.zip"
        self.result_files_folder = self.output_base_name + ".files"

        # parse XML
        try:
            rootTag = ElementTree.ElementTree().parse(benchmark_file)
        except ElementTree.ParseError as e:
            sys.exit('Benchmark file {} is invalid: {}'.format(benchmark_file, e))
        if 'benchmark' != rootTag.tag:
            sys.exit("Benchmark file {} is invalid: "
                "It's root element is not named 'benchmark'.".format(benchmark_file))

        # get tool
        tool_name = rootTag.get('tool')
        if not tool_name:
            sys.exit('A tool needs to be specified in the benchmark definition file.')
        (self.tool_module, self.tool) = load_tool_info(tool_name)
        self.tool_name = self.tool.name()
        # will be set from the outside if necessary (may not be the case in SaaS environments)
        self.tool_version = None
        self.executable = None
        self.display_name = rootTag.get('displayName')

        logging.debug("The tool to be benchmarked is %s.", self.tool_name)

        def parse_memory_limit(value):
            try:
                value = int(value)
                logging.warning(
                    'Value "%s" for memory limit interpreted as MB for backwards compatibility, '
                    'specify a unit to make this unambiguous.',
                    value)
                return value * _BYTE_FACTOR * _BYTE_FACTOR
            except ValueError:
                return util.parse_memory_value(value)

        def handle_limit_value(name, key, cmdline_value, parse_fn):
            value = rootTag.get(key, None)
            # override limit from XML with values from command line
            if cmdline_value is not None:
                if cmdline_value.strip() == "-1": # infinity
                    value = None
                else:
                    value = cmdline_value

            if value is not None:
                try:
                    self.rlimits[key] = parse_fn(value)
                except ValueError as e:
                    sys.exit('Invalid value for {} limit: {}'.format(name.lower(), e))
                if self.rlimits[key] <= 0:
                    sys.exit('{} limit "{}" is invalid, it needs to be a positive number '
                         '(or -1 on the command line for disabling it).'.format(name, value))

        self.rlimits = {}
        keys = list(rootTag.keys())
        handle_limit_value("Time", TIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Hard time", HARDTIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Wall time", WALLTIMELIMIT, config.walltimelimit, util.parse_timespan_value)
        handle_limit_value("Memory", MEMLIMIT, config.memorylimit, parse_memory_limit)
        handle_limit_value("Core", CORELIMIT, config.corelimit, int)

        if HARDTIMELIMIT in self.rlimits:
            hardtimelimit = self.rlimits.pop(HARDTIMELIMIT)
            if TIMELIMIT in self.rlimits:
                if hardtimelimit < self.rlimits[TIMELIMIT]:
                    logging.warning(
                        'Hard timelimit %d is smaller than timelimit %d, ignoring the former.',
                        hardtimelimit, self.rlimits[TIMELIMIT])
                elif hardtimelimit > self.rlimits[TIMELIMIT]:
                    self.rlimits[SOFTTIMELIMIT] = self.rlimits[TIMELIMIT]
                    self.rlimits[TIMELIMIT] = hardtimelimit
            else:
                self.rlimits[TIMELIMIT] = hardtimelimit

        # get number of threads, default value is 1
        self.num_of_threads = int(rootTag.get("threads")) if ("threads" in keys) else 1
        if config.num_of_threads != None:
            self.num_of_threads = config.num_of_threads
        if self.num_of_threads < 1:
            logging.error("At least ONE thread must be given!")
            sys.exit()

        # get global options and property file
        self.options = util.get_list_from_xml(rootTag)
        self.propertyfile = util.text_or_none(util.get_single_child_from_xml(rootTag, PROPERTY_TAG))

        # get columns
        self.columns = Benchmark.load_columns(rootTag.find("columns"))

        # get global source files, they are used in all run sets
        globalSourcefilesTags = rootTag.findall("tasks") + rootTag.findall("sourcefiles")

        # get required files
        self._required_files = set()
        for required_files_tag in rootTag.findall('requiredfiles'):
            required_files = util.expand_filename_pattern(required_files_tag.text, self.base_dir)
            if not required_files:
                logging.warning('Pattern %s in requiredfiles tag did not match any file.',
                                required_files_tag.text)
            self._required_files = self._required_files.union(required_files)

        # get requirements
        self.requirements = Requirements(rootTag.findall("require"), self.rlimits, config)

        result_files_tags = rootTag.findall("resultfiles")
        if result_files_tags:
            self.result_files_patterns = [
                os.path.normpath(p.text) for p in result_files_tags if p.text]
            for pattern in self.result_files_patterns:
                if pattern.startswith(".."):
                    sys.exit("Invalid relative result-files pattern '{}'.".format(pattern))
        else:
            # default is "everything below current directory"
            self.result_files_patterns = ["."]

        # get benchmarks
        self.run_sets = []
        for (i, rundefinitionTag) in enumerate(rootTag.findall("rundefinition")):
            self.run_sets.append(RunSet(rundefinitionTag, self, i+1, globalSourcefilesTags))

        if not self.run_sets:
            for (i, rundefinitionTag) in enumerate(rootTag.findall("test")):
                self.run_sets.append(RunSet(rundefinitionTag, self, i+1, globalSourcefilesTags))
            if self.run_sets:
                logging.warning("Benchmark file %s uses deprecated <test> tags. "
                                "Please rename them to <rundefinition>.",
                                benchmark_file)
            else:
                logging.warning("Benchmark file %s specifies no runs to execute "
                                "(no <rundefinition> tags found).",
                                benchmark_file)

        if not any(runSet.should_be_executed() for runSet in self.run_sets):
            logging.warning("No <rundefinition> tag selected, nothing will be executed.")
            if config.selected_run_definitions:
                logging.warning("The selection %s does not match any run definitions of %s.",
                                config.selected_run_definitions,
                                [runSet.real_name for runSet in self.run_sets])
        elif config.selected_run_definitions:
            for selected in config.selected_run_definitions:
                if not any(util.wildcard_match(run_set.real_name, selected) for run_set in self.run_sets):
                    logging.warning(
                        'The selected run definition "%s" is not present in the input file, '
                        'skipping it.',
                        selected)
コード例 #18
0
ファイル: model.py プロジェクト: tautschnig/benchexec
    def __init__(self, sourcefiles, fileOptions, runSet, propertyfile=None, required_files_patterns=[]):
        assert sourcefiles
        self.identifier = sourcefiles[0]  # used for name of logfile, substitution, result-category
        self.sourcefiles = util.get_files(sourcefiles)  # expand directories to get their sub-files
        self.runSet = runSet
        self.specific_options = fileOptions  # options that are specific for this run
        self.log_file = runSet.log_folder + os.path.basename(self.identifier) + ".log"
        self.result_files_folder = os.path.join(runSet.result_files_folder, os.path.basename(self.identifier))

        self.required_files = set()
        rel_sourcefile = os.path.relpath(self.identifier, runSet.benchmark.base_dir)
        for pattern in required_files_patterns:
            this_required_files = runSet.expand_filename_pattern(pattern, runSet.benchmark.base_dir, rel_sourcefile)
            if not this_required_files:
                logging.warning(
                    "Pattern %s in requiredfiles tag did not match any file for task %s.", pattern, self.identifier
                )
            self.required_files.update(this_required_files)

        # lets reduce memory-consumption: if 2 lists are equal, do not use the second one
        self.options = (
            runSet.options + fileOptions if fileOptions else runSet.options
        )  # all options to be used when executing this run
        substitutedOptions = substitute_vars(self.options, runSet, self.identifier)
        if substitutedOptions != self.options:
            self.options = substitutedOptions  # for less memory again

        self.propertyfile = propertyfile or runSet.propertyfile

        def log_property_file_once(msg):
            if not self.propertyfile in _logged_missing_property_files:
                _logged_missing_property_files.add(self.propertyfile)
                logging.warning(msg)

        # replace run-specific stuff in the propertyfile and add it to the set of required files
        if self.propertyfile is None:
            log_property_file_once("No propertyfile specified. Score computation will ignore the results.")
        else:
            # we check two cases: direct filename or user-defined substitution, one of them must be a 'file'
            # TODO: do we need the second case? it is equal to previous used option "-spec ${sourcefile_path}/ALL.prp"
            expandedPropertyFiles = util.expand_filename_pattern(self.propertyfile, self.runSet.benchmark.base_dir)
            substitutedPropertyfiles = substitute_vars([self.propertyfile], runSet, self.identifier)
            assert len(substitutedPropertyfiles) == 1

            if expandedPropertyFiles:
                if len(expandedPropertyFiles) > 1:
                    log_property_file_once(
                        "Pattern {0} for sourcefile {1} in propertyfile tag matches more than one file. Only {2} will be used.".format(
                            self.propertyfile, self.identifier, expandedPropertyFiles[0]
                        )
                    )
                self.propertyfile = expandedPropertyFiles[0]
            elif substitutedPropertyfiles and os.path.isfile(substitutedPropertyfiles[0]):
                self.propertyfile = substitutedPropertyfiles[0]
            else:
                log_property_file_once(
                    "Pattern {0} for sourcefile {1} in propertyfile tag did not match any file. It will be ignored.".format(
                        self.propertyfile, self.identifier
                    )
                )
                self.propertyfile = None

        if self.propertyfile:
            self.required_files.add(self.propertyfile)
            self.properties = result.properties_of_file(self.propertyfile)
        else:
            self.properties = []

        self.required_files = list(self.required_files)

        # Copy columns for having own objects in run
        # (we need this for storing the results in them).
        self.columns = [Column(c.text, c.title, c.number_of_digits) for c in self.runSet.benchmark.columns]

        # here we store the optional result values, e.g. memory usage, energy, host name
        # keys need to be strings, if first character is "@" the value is marked as hidden (e.g., debug info)
        self.values = collections.OrderedDict()

        # dummy values, for output in case of interrupt
        self.status = ""
        self.cputime = None
        self.walltime = None
        self.category = result.CATEGORY_UNKNOWN
コード例 #19
0
 def program_files(self, executable):
     install_dir = os.path.dirname(executable)
     paths = self.REQUIRED_PATHS_SVCOMP17 if self._is_svcomp17_version(executable) else self.REQUIRED_PATHS
     return [executable] + util.flatten(util.expand_filename_pattern(path, install_dir) for path in paths)
コード例 #20
0
ファイル: model.py プロジェクト: tautschnig/benchexec
    def __init__(self, benchmark_file, config, start_time):
        """
        The constructor of Benchmark reads the source files, options, columns and the tool
        from the XML in the benchmark_file..
        """
        logging.debug("I'm loading the benchmark %s.", benchmark_file)

        self.config = config
        self.benchmark_file = benchmark_file
        self.base_dir = os.path.dirname(self.benchmark_file)

        # get benchmark-name
        self.name = os.path.basename(benchmark_file)[:-4]  # remove ending ".xml"
        if config.name:
            self.name += "." + config.name

        self.start_time = start_time
        self.instance = time.strftime("%Y-%m-%d_%H%M", self.start_time)

        self.output_base_name = config.output_path + self.name + "." + self.instance
        self.log_folder = self.output_base_name + ".logfiles" + os.path.sep
        self.log_zip = self.output_base_name + ".logfiles.zip"
        self.result_files_folder = self.output_base_name + ".files"

        # parse XML
        try:
            rootTag = ElementTree.ElementTree().parse(benchmark_file)
        except ElementTree.ParseError as e:
            sys.exit("Benchmark file {} is invalid: {}".format(benchmark_file, e))
        if "benchmark" != rootTag.tag:
            sys.exit(
                "Benchmark file {} is invalid: " "It's root element is not named 'benchmark'.".format(benchmark_file)
            )

        # get tool
        tool_name = rootTag.get("tool")
        if not tool_name:
            sys.exit("A tool needs to be specified in the benchmark definition file.")
        (self.tool_module, self.tool) = load_tool_info(tool_name)
        self.tool_name = self.tool.name()
        # will be set from the outside if necessary (may not be the case in SaaS environments)
        self.tool_version = None
        self.executable = None

        logging.debug("The tool to be benchmarked is %s.", self.tool_name)

        def parse_memory_limit(value):
            try:
                value = int(value)
                logging.warning(
                    'Value "%s" for memory limit interpreted as MB for backwards compatibility, '
                    "specify a unit to make this unambiguous.",
                    value,
                )
                return value * _BYTE_FACTOR * _BYTE_FACTOR
            except ValueError:
                return util.parse_memory_value(value)

        def handle_limit_value(name, key, cmdline_value, parse_fn):
            value = rootTag.get(key, None)
            # override limit from XML with values from command line
            if cmdline_value is not None:
                if cmdline_value.strip() == "-1":  # infinity
                    value = None
                else:
                    value = cmdline_value

            if value is not None:
                try:
                    self.rlimits[key] = parse_fn(value)
                except ValueError as e:
                    sys.exit("Invalid value for {} limit: {}".format(name.lower(), e))
                if self.rlimits[key] <= 0:
                    sys.exit(
                        '{} limit "{}" is invalid, it needs to be a positive number '
                        "(or -1 on the command line for disabling it).".format(name, value)
                    )

        self.rlimits = {}
        keys = list(rootTag.keys())
        handle_limit_value("Time", TIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Hard time", HARDTIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Memory", MEMLIMIT, config.memorylimit, parse_memory_limit)
        handle_limit_value("Core", CORELIMIT, config.corelimit, int)

        if HARDTIMELIMIT in self.rlimits:
            hardtimelimit = self.rlimits.pop(HARDTIMELIMIT)
            if TIMELIMIT in self.rlimits:
                if hardtimelimit < self.rlimits[TIMELIMIT]:
                    logging.warning(
                        "Hard timelimit %d is smaller than timelimit %d, ignoring the former.",
                        hardtimelimit,
                        self.rlimits[TIMELIMIT],
                    )
                elif hardtimelimit > self.rlimits[TIMELIMIT]:
                    self.rlimits[SOFTTIMELIMIT] = self.rlimits[TIMELIMIT]
                    self.rlimits[TIMELIMIT] = hardtimelimit
            else:
                self.rlimits[TIMELIMIT] = hardtimelimit

        # get number of threads, default value is 1
        self.num_of_threads = int(rootTag.get("threads")) if ("threads" in keys) else 1
        if config.num_of_threads != None:
            self.num_of_threads = config.num_of_threads
        if self.num_of_threads < 1:
            logging.error("At least ONE thread must be given!")
            sys.exit()

        # get global options and property file
        self.options = util.get_list_from_xml(rootTag)
        self.propertyfile = util.text_or_none(util.get_single_child_from_xml(rootTag, PROPERTY_TAG))

        # get columns
        self.columns = Benchmark.load_columns(rootTag.find("columns"))

        # get global source files, they are used in all run sets
        globalSourcefilesTags = rootTag.findall("tasks") + rootTag.findall("sourcefiles")

        # get required files
        self._required_files = set()
        for required_files_tag in rootTag.findall("requiredfiles"):
            required_files = util.expand_filename_pattern(required_files_tag.text, self.base_dir)
            if not required_files:
                logging.warning("Pattern %s in requiredfiles tag did not match any file.", required_files_tag.text)
            self._required_files = self._required_files.union(required_files)

        # get requirements
        self.requirements = Requirements(rootTag.findall("require"), self.rlimits, config)

        result_files_tags = rootTag.findall("resultfiles")
        if result_files_tags:
            self.result_files_patterns = [os.path.normpath(p.text) for p in result_files_tags if p.text]
            for pattern in self.result_files_patterns:
                if pattern.startswith(".."):
                    sys.exit("Invalid relative result-files pattern '{}'.".format(pattern))
        else:
            # default is "everything below current directory"
            self.result_files_patterns = ["."]

        # get benchmarks
        self.run_sets = []
        for (i, rundefinitionTag) in enumerate(rootTag.findall("rundefinition")):
            self.run_sets.append(RunSet(rundefinitionTag, self, i + 1, globalSourcefilesTags))

        if not self.run_sets:
            for (i, rundefinitionTag) in enumerate(rootTag.findall("test")):
                self.run_sets.append(RunSet(rundefinitionTag, self, i + 1, globalSourcefilesTags))
            if self.run_sets:
                logging.warning(
                    "Benchmark file %s uses deprecated <test> tags. " "Please rename them to <rundefinition>.",
                    benchmark_file,
                )
            else:
                logging.warning(
                    "Benchmark file %s specifies no runs to execute " "(no <rundefinition> tags found).", benchmark_file
                )

        if not any(runSet.should_be_executed() for runSet in self.run_sets):
            logging.warning("No <rundefinition> tag selected, nothing will be executed.")
            if config.selected_run_definitions:
                logging.warning(
                    "The selection %s does not match any run definitions of %s.",
                    config.selected_run_definitions,
                    [runSet.real_name for runSet in self.run_sets],
                )
        elif config.selected_run_definitions:
            for selected in config.selected_run_definitions:
                if not any(util.wildcard_match(run_set.real_name, selected) for run_set in self.run_sets):
                    logging.warning(
                        'The selected run definition "%s" is not present in the input file, ' "skipping it.", selected
                    )
コード例 #21
0
    def __init__(self, benchmark_file, config, start_time):
        """
        The constructor of Benchmark reads the source files, options, columns and the tool
        from the XML in the benchmark_file..
        """
        logging.debug("I'm loading the benchmark %s.", benchmark_file)

        self.config = config
        self.benchmark_file = benchmark_file
        self.base_dir = os.path.dirname(self.benchmark_file)

        # get benchmark-name
        self.name = os.path.basename(
            benchmark_file)[:-4]  # remove ending ".xml"
        if config.name:
            self.name += "." + config.name

        self.description = None
        if config.description_file is not None:
            try:
                self.description = util.read_file(config.description_file)
            except (OSError, UnicodeDecodeError) as e:
                raise BenchExecException(
                    "File '{}' given for description could not be read: {}".
                    format(config.description_file, e))

        self.start_time = start_time
        self.instance = start_time.strftime(util.TIMESTAMP_FILENAME_FORMAT)

        self.output_base_name = config.output_path + self.name + "." + self.instance
        self.log_folder = self.output_base_name + ".logfiles" + os.path.sep
        self.log_zip = self.output_base_name + ".logfiles.zip"
        self.result_files_folder = self.output_base_name + ".files"

        # parse XML
        try:
            rootTag = ElementTree.ElementTree().parse(benchmark_file)
        except ElementTree.ParseError as e:
            sys.exit("Benchmark file {} is invalid: {}".format(
                benchmark_file, e))
        if "benchmark" != rootTag.tag:
            sys.exit("Benchmark file {} is invalid: "
                     "It's root element is not named 'benchmark'.".format(
                         benchmark_file))

        # get tool
        tool_name = rootTag.get("tool")
        if not tool_name:
            sys.exit(
                "A tool needs to be specified in the benchmark definition file."
            )
        (self.tool_module, self.tool) = load_tool_info(tool_name, config)
        self.tool_name = self.tool.name()
        # will be set from the outside if necessary (may not be the case in SaaS environments)
        self.tool_version = None
        self.executable = None
        self.display_name = rootTag.get("displayName")

        def parse_memory_limit(value):
            # In a future BenchExec version, we could treat unit-less limits as bytes
            try:
                value = int(value)
            except ValueError:
                return util.parse_memory_value(value)
            else:
                raise ValueError(
                    "Memory limit must have a unit suffix, e.g., '{} MB'".
                    format(value))

        def handle_limit_value(name, key, cmdline_value, parse_fn):
            value = rootTag.get(key, None)
            # override limit from XML with values from command line
            if cmdline_value is not None:
                if cmdline_value.strip() == "-1":  # infinity
                    value = None
                else:
                    value = cmdline_value

            if value is not None:
                try:
                    self.rlimits[key] = parse_fn(value)
                except ValueError as e:
                    sys.exit("Invalid value for {} limit: {}".format(
                        name.lower(), e))
                if self.rlimits[key] <= 0:
                    sys.exit(
                        '{} limit "{}" is invalid, it needs to be a positive number '
                        "(or -1 on the command line for disabling it).".format(
                            name, value))

        self.rlimits = {}
        handle_limit_value("Time", TIMELIMIT, config.timelimit,
                           util.parse_timespan_value)
        handle_limit_value("Hard time", HARDTIMELIMIT, config.timelimit,
                           util.parse_timespan_value)
        handle_limit_value("Wall time", WALLTIMELIMIT, config.walltimelimit,
                           util.parse_timespan_value)
        handle_limit_value("Memory", MEMLIMIT, config.memorylimit,
                           parse_memory_limit)
        handle_limit_value("Core", CORELIMIT, config.corelimit, int)

        if HARDTIMELIMIT in self.rlimits:
            hardtimelimit = self.rlimits.pop(HARDTIMELIMIT)
            if TIMELIMIT in self.rlimits:
                if hardtimelimit < self.rlimits[TIMELIMIT]:
                    logging.warning(
                        "Hard timelimit %d is smaller than timelimit %d, ignoring the former.",
                        hardtimelimit,
                        self.rlimits[TIMELIMIT],
                    )
                elif hardtimelimit > self.rlimits[TIMELIMIT]:
                    self.rlimits[SOFTTIMELIMIT] = self.rlimits[TIMELIMIT]
                    self.rlimits[TIMELIMIT] = hardtimelimit
            else:
                self.rlimits[TIMELIMIT] = hardtimelimit

        self.num_of_threads = int(rootTag.get("threads", 1))
        if config.num_of_threads is not None:
            self.num_of_threads = config.num_of_threads
        if self.num_of_threads < 1:
            logging.error("At least ONE thread must be given!")
            sys.exit()

        # get global options and property file
        self.options = util.get_list_from_xml(rootTag)
        self.propertytag = get_propertytag(rootTag)

        # get columns
        self.columns = Benchmark.load_columns(rootTag.find("columns"))

        # get global source files, they are used in all run sets
        if rootTag.findall("sourcefiles"):
            sys.exit(
                "Benchmark file {} has unsupported old format. "
                "Rename <sourcefiles> tags to <tasks>.".format(benchmark_file))
        globalSourcefilesTags = rootTag.findall("tasks")

        # get required files
        self._required_files = set()
        for required_files_tag in rootTag.findall("requiredfiles"):
            required_files = util.expand_filename_pattern(
                required_files_tag.text, self.base_dir)
            if not required_files:
                logging.warning(
                    "Pattern %s in requiredfiles tag did not match any file.",
                    required_files_tag.text,
                )
            self._required_files = self._required_files.union(required_files)

        # get requirements
        self.requirements = Requirements(rootTag.findall("require"),
                                         self.rlimits, config)

        result_files_tags = rootTag.findall("resultfiles")
        if result_files_tags:
            self.result_files_patterns = [
                os.path.normpath(p.text) for p in result_files_tags if p.text
            ]
            for pattern in self.result_files_patterns:
                if pattern.startswith(".."):
                    sys.exit(
                        "Invalid relative result-files pattern '{}'.".format(
                            pattern))
        else:
            # default is "everything below current directory"
            self.result_files_patterns = ["."]

        # get benchmarks
        self.run_sets = []
        for (i,
             rundefinitionTag) in enumerate(rootTag.findall("rundefinition")):
            self.run_sets.append(
                RunSet(rundefinitionTag, self, i + 1, globalSourcefilesTags))

        if not self.run_sets:
            logging.warning(
                "Benchmark file %s specifies no runs to execute "
                "(no <rundefinition> tags found).",
                benchmark_file,
            )

        if not any(runSet.should_be_executed() for runSet in self.run_sets):
            logging.warning(
                "No <rundefinition> tag selected, nothing will be executed.")
            if config.selected_run_definitions:
                logging.warning(
                    "The selection %s does not match any run definitions of %s.",
                    config.selected_run_definitions,
                    [runSet.real_name for runSet in self.run_sets],
                )
        elif config.selected_run_definitions:
            for selected in config.selected_run_definitions:
                if not any(
                        util.wildcard_match(run_set.real_name, selected)
                        for run_set in self.run_sets):
                    logging.warning(
                        'The selected run definition "%s" is not present in the input file, '
                        "skipping it.",
                        selected,
                    )