Пример #1
0
    def extract_runs_from_xml(self, sourcefilesTagList, global_required_files_pattern):
        '''
        This function builds a list of SourcefileSets (containing filename with options).
        The files and their options are taken from the list of sourcefilesTags.
        '''
        base_dir = self.benchmark.base_dir
        # runs are structured as sourcefile sets, one set represents one sourcefiles tag
        blocks = []

        for index, sourcefilesTag in enumerate(sourcefilesTagList):
            sourcefileSetName = sourcefilesTag.get("name")
            matchName = sourcefileSetName or str(index)
            if self.benchmark.config.selected_sourcefile_sets \
                and not any(util.wildcard_match(matchName, sourcefile_set) for sourcefile_set in self.benchmark.config.selected_sourcefile_sets):
                    continue

            required_files_pattern = global_required_files_pattern.union(
                set(tag.text for tag in sourcefilesTag.findall('requiredfiles')))

            # get lists of filenames
            task_def_files = self.get_task_def_files_from_xml(sourcefilesTag, base_dir)

            # get file-specific options for filenames
            fileOptions = util.get_list_from_xml(sourcefilesTag)
            propertyfile = util.text_or_none(util.get_single_child_from_xml(sourcefilesTag, PROPERTY_TAG))

            # some runs need more than one sourcefile,
            # the first sourcefile is a normal 'include'-file, we use its name as identifier
            # for logfile and result-category all other files are 'append'ed.
            appendFileTags = sourcefilesTag.findall("append")

            currentRuns = []
            for identifier in task_def_files:
                if identifier.endswith('.yml'):
                    if appendFileTags:
                        raise BenchExecException(
                            "Cannot combine <append> and task-definition files in the same <tasks> tag.")
                    run = self.create_run_from_task_definition(
                        identifier, fileOptions, propertyfile, required_files_pattern)
                else:
                    run = self.create_run_for_input_file(
                        identifier, fileOptions, propertyfile, required_files_pattern, appendFileTags)
                if run:
                    currentRuns.append(run)

            # add runs for cases without source files
            for run in sourcefilesTag.findall("withoutfile"):
                currentRuns.append(Run(run.text, [], fileOptions, self, propertyfile, required_files_pattern))

            blocks.append(SourcefileSet(sourcefileSetName, index, currentRuns))

        if self.benchmark.config.selected_sourcefile_sets:
            for selected in self.benchmark.config.selected_sourcefile_sets:
                if not any(util.wildcard_match(sourcefile_set.real_name, selected) for sourcefile_set in blocks):
                    logging.warning(
                        'The selected tasks "%s" are not present in the input file, '
                        'skipping them.',
                        selected)
        return blocks
Пример #2
0
    def extract_runs_from_xml(self, sourcefilesTagList,
                              global_required_files_pattern):
        '''
        This function builds a list of SourcefileSets (containing filename with options).
        The files and their options are taken from the list of sourcefilesTags.
        '''
        # runs are structured as sourcefile sets, one set represents one sourcefiles tag
        blocks = []

        for index, sourcefilesTag in enumerate(sourcefilesTagList):
            sourcefileSetName = sourcefilesTag.get("name")
            matchName = sourcefileSetName or str(index)
            if self.benchmark.config.selected_sourcefile_sets \
                and not any(util.wildcard_match(matchName, sourcefile_set) for sourcefile_set in self.benchmark.config.selected_sourcefile_sets):
                continue

            required_files_pattern = set(
                tag.text for tag in sourcefilesTag.findall('requiredfiles'))

            # get lists of filenames
            tasks = self.get_tasks_from_xml(sourcefilesTag,
                                            self.benchmark.base_dir)

            # get file-specific options for filenames
            fileOptions = util.get_list_from_xml(sourcefilesTag)
            propertyfile = util.text_or_none(
                util.get_single_child_from_xml(sourcefilesTag, PROPERTY_TAG))

            currentRuns = []
            for identifier, sourcefiles in tasks:
                currentRuns.append(
                    Run(
                        identifier, sourcefiles, fileOptions, self,
                        propertyfile,
                        global_required_files_pattern.union(
                            required_files_pattern)))

            blocks.append(SourcefileSet(sourcefileSetName, index, currentRuns))

        if self.benchmark.config.selected_sourcefile_sets:
            for selected in self.benchmark.config.selected_sourcefile_sets:
                if not any(
                        util.wildcard_match(sourcefile_set.real_name, selected)
                        for sourcefile_set in blocks):
                    logging.warning(
                        'The selected tasks "%s" are not present in the input file, '
                        'skipping them.', selected)
        return blocks
Пример #3
0
    def extract_runs_from_xml(self, sourcefilesTagList, global_required_files_pattern):
        """
        This function builds a list of SourcefileSets (containing filename with options).
        The files and their options are taken from the list of sourcefilesTags.
        """
        # runs are structured as sourcefile sets, one set represents one sourcefiles tag
        blocks = []

        for index, sourcefilesTag in enumerate(sourcefilesTagList):
            sourcefileSetName = sourcefilesTag.get("name")
            matchName = sourcefileSetName or str(index)
            if self.benchmark.config.selected_sourcefile_sets and not any(
                util.wildcard_match(matchName, sourcefile_set)
                for sourcefile_set in self.benchmark.config.selected_sourcefile_sets
            ):
                continue

            required_files_pattern = set(tag.text for tag in sourcefilesTag.findall("requiredfiles"))

            # get lists of filenames
            sourcefiles = self.get_sourcefiles_from_xml(sourcefilesTag, self.benchmark.base_dir)

            # get file-specific options for filenames
            fileOptions = util.get_list_from_xml(sourcefilesTag)
            propertyfile = util.text_or_none(util.get_single_child_from_xml(sourcefilesTag, PROPERTY_TAG))

            currentRuns = []
            for sourcefile in sourcefiles:
                currentRuns.append(
                    Run(
                        sourcefile,
                        fileOptions,
                        self,
                        propertyfile,
                        global_required_files_pattern.union(required_files_pattern),
                    )
                )

            blocks.append(SourcefileSet(sourcefileSetName, index, currentRuns))

        if self.benchmark.config.selected_sourcefile_sets:
            for selected in self.benchmark.config.selected_sourcefile_sets:
                if not any(util.wildcard_match(sourcefile_set.real_name, selected) for sourcefile_set in blocks):
                    logging.warning(
                        'The selected tasks "%s" are not present in the input file, ' "skipping them.", selected
                    )
        return blocks
Пример #4
0
 def should_be_executed(self):
     return not self.benchmark.config.selected_run_definitions \
         or any(util.wildcard_match(self.real_name, run_definition) for run_definition in self.benchmark.config.selected_run_definitions)
Пример #5
0
    def __init__(self, benchmark_file, config, start_time):
        """
        The constructor of Benchmark reads the source files, options, columns and the tool
        from the XML in the benchmark_file..
        """
        logging.debug("I'm loading the benchmark %s.", benchmark_file)

        self.config = config
        self.benchmark_file = benchmark_file
        self.base_dir = os.path.dirname(self.benchmark_file)

        # get benchmark-name
        self.name = os.path.basename(benchmark_file)[:-4] # remove ending ".xml"
        if config.name:
            self.name += "."+config.name

        self.start_time = start_time
        self.instance = time.strftime("%Y-%m-%d_%H%M", self.start_time)

        self.output_base_name = config.output_path + self.name + "." + self.instance
        self.log_folder = self.output_base_name + ".logfiles" + os.path.sep
        self.log_zip = self.output_base_name + ".logfiles.zip"
        self.result_files_folder = self.output_base_name + ".files"

        # parse XML
        try:
            rootTag = ElementTree.ElementTree().parse(benchmark_file)
        except ElementTree.ParseError as e:
            sys.exit('Benchmark file {} is invalid: {}'.format(benchmark_file, e))
        if 'benchmark' != rootTag.tag:
            sys.exit("Benchmark file {} is invalid: "
                "It's root element is not named 'benchmark'.".format(benchmark_file))

        # get tool
        tool_name = rootTag.get('tool')
        if not tool_name:
            sys.exit('A tool needs to be specified in the benchmark definition file.')
        (self.tool_module, self.tool) = load_tool_info(tool_name)
        self.tool_name = self.tool.name()
        # will be set from the outside if necessary (may not be the case in SaaS environments)
        self.tool_version = None
        self.executable = None
        self.display_name = rootTag.get('displayName')

        logging.debug("The tool to be benchmarked is %s.", self.tool_name)

        def parse_memory_limit(value):
            try:
                value = int(value)
                logging.warning(
                    'Value "%s" for memory limit interpreted as MB for backwards compatibility, '
                    'specify a unit to make this unambiguous.',
                    value)
                return value * _BYTE_FACTOR * _BYTE_FACTOR
            except ValueError:
                return util.parse_memory_value(value)

        def handle_limit_value(name, key, cmdline_value, parse_fn):
            value = rootTag.get(key, None)
            # override limit from XML with values from command line
            if cmdline_value is not None:
                if cmdline_value.strip() == "-1": # infinity
                    value = None
                else:
                    value = cmdline_value

            if value is not None:
                try:
                    self.rlimits[key] = parse_fn(value)
                except ValueError as e:
                    sys.exit('Invalid value for {} limit: {}'.format(name.lower(), e))
                if self.rlimits[key] <= 0:
                    sys.exit('{} limit "{}" is invalid, it needs to be a positive number '
                         '(or -1 on the command line for disabling it).'.format(name, value))

        self.rlimits = {}
        keys = list(rootTag.keys())
        handle_limit_value("Time", TIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Hard time", HARDTIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Wall time", WALLTIMELIMIT, config.walltimelimit, util.parse_timespan_value)
        handle_limit_value("Memory", MEMLIMIT, config.memorylimit, parse_memory_limit)
        handle_limit_value("Core", CORELIMIT, config.corelimit, int)

        if HARDTIMELIMIT in self.rlimits:
            hardtimelimit = self.rlimits.pop(HARDTIMELIMIT)
            if TIMELIMIT in self.rlimits:
                if hardtimelimit < self.rlimits[TIMELIMIT]:
                    logging.warning(
                        'Hard timelimit %d is smaller than timelimit %d, ignoring the former.',
                        hardtimelimit, self.rlimits[TIMELIMIT])
                elif hardtimelimit > self.rlimits[TIMELIMIT]:
                    self.rlimits[SOFTTIMELIMIT] = self.rlimits[TIMELIMIT]
                    self.rlimits[TIMELIMIT] = hardtimelimit
            else:
                self.rlimits[TIMELIMIT] = hardtimelimit

        # get number of threads, default value is 1
        self.num_of_threads = int(rootTag.get("threads")) if ("threads" in keys) else 1
        if config.num_of_threads != None:
            self.num_of_threads = config.num_of_threads
        if self.num_of_threads < 1:
            logging.error("At least ONE thread must be given!")
            sys.exit()

        # get global options and property file
        self.options = util.get_list_from_xml(rootTag)
        self.propertyfile = util.text_or_none(util.get_single_child_from_xml(rootTag, PROPERTY_TAG))

        # get columns
        self.columns = Benchmark.load_columns(rootTag.find("columns"))

        # get global source files, they are used in all run sets
        globalSourcefilesTags = rootTag.findall("tasks") + rootTag.findall("sourcefiles")

        # get required files
        self._required_files = set()
        for required_files_tag in rootTag.findall('requiredfiles'):
            required_files = util.expand_filename_pattern(required_files_tag.text, self.base_dir)
            if not required_files:
                logging.warning('Pattern %s in requiredfiles tag did not match any file.',
                                required_files_tag.text)
            self._required_files = self._required_files.union(required_files)

        # get requirements
        self.requirements = Requirements(rootTag.findall("require"), self.rlimits, config)

        result_files_tags = rootTag.findall("resultfiles")
        if result_files_tags:
            self.result_files_patterns = [
                os.path.normpath(p.text) for p in result_files_tags if p.text]
            for pattern in self.result_files_patterns:
                if pattern.startswith(".."):
                    sys.exit("Invalid relative result-files pattern '{}'.".format(pattern))
        else:
            # default is "everything below current directory"
            self.result_files_patterns = ["."]

        # get benchmarks
        self.run_sets = []
        for (i, rundefinitionTag) in enumerate(rootTag.findall("rundefinition")):
            self.run_sets.append(RunSet(rundefinitionTag, self, i+1, globalSourcefilesTags))

        if not self.run_sets:
            for (i, rundefinitionTag) in enumerate(rootTag.findall("test")):
                self.run_sets.append(RunSet(rundefinitionTag, self, i+1, globalSourcefilesTags))
            if self.run_sets:
                logging.warning("Benchmark file %s uses deprecated <test> tags. "
                                "Please rename them to <rundefinition>.",
                                benchmark_file)
            else:
                logging.warning("Benchmark file %s specifies no runs to execute "
                                "(no <rundefinition> tags found).",
                                benchmark_file)

        if not any(runSet.should_be_executed() for runSet in self.run_sets):
            logging.warning("No <rundefinition> tag selected, nothing will be executed.")
            if config.selected_run_definitions:
                logging.warning("The selection %s does not match any run definitions of %s.",
                                config.selected_run_definitions,
                                [runSet.real_name for runSet in self.run_sets])
        elif config.selected_run_definitions:
            for selected in config.selected_run_definitions:
                if not any(util.wildcard_match(run_set.real_name, selected) for run_set in self.run_sets):
                    logging.warning(
                        'The selected run definition "%s" is not present in the input file, '
                        'skipping it.',
                        selected)
Пример #6
0
 def should_be_executed(self):
     return not self.benchmark.config.selected_run_definitions or any(
         util.wildcard_match(self.real_name, run_definition)
         for run_definition in self.benchmark.config.selected_run_definitions
     )
Пример #7
0
    def __init__(self, benchmark_file, config, start_time):
        """
        The constructor of Benchmark reads the source files, options, columns and the tool
        from the XML in the benchmark_file..
        """
        logging.debug("I'm loading the benchmark %s.", benchmark_file)

        self.config = config
        self.benchmark_file = benchmark_file
        self.base_dir = os.path.dirname(self.benchmark_file)

        # get benchmark-name
        self.name = os.path.basename(benchmark_file)[:-4]  # remove ending ".xml"
        if config.name:
            self.name += "." + config.name

        self.start_time = start_time
        self.instance = time.strftime("%Y-%m-%d_%H%M", self.start_time)

        self.output_base_name = config.output_path + self.name + "." + self.instance
        self.log_folder = self.output_base_name + ".logfiles" + os.path.sep
        self.log_zip = self.output_base_name + ".logfiles.zip"
        self.result_files_folder = self.output_base_name + ".files"

        # parse XML
        try:
            rootTag = ElementTree.ElementTree().parse(benchmark_file)
        except ElementTree.ParseError as e:
            sys.exit("Benchmark file {} is invalid: {}".format(benchmark_file, e))
        if "benchmark" != rootTag.tag:
            sys.exit(
                "Benchmark file {} is invalid: " "It's root element is not named 'benchmark'.".format(benchmark_file)
            )

        # get tool
        tool_name = rootTag.get("tool")
        if not tool_name:
            sys.exit("A tool needs to be specified in the benchmark definition file.")
        (self.tool_module, self.tool) = load_tool_info(tool_name)
        self.tool_name = self.tool.name()
        # will be set from the outside if necessary (may not be the case in SaaS environments)
        self.tool_version = None
        self.executable = None

        logging.debug("The tool to be benchmarked is %s.", self.tool_name)

        def parse_memory_limit(value):
            try:
                value = int(value)
                logging.warning(
                    'Value "%s" for memory limit interpreted as MB for backwards compatibility, '
                    "specify a unit to make this unambiguous.",
                    value,
                )
                return value * _BYTE_FACTOR * _BYTE_FACTOR
            except ValueError:
                return util.parse_memory_value(value)

        def handle_limit_value(name, key, cmdline_value, parse_fn):
            value = rootTag.get(key, None)
            # override limit from XML with values from command line
            if cmdline_value is not None:
                if cmdline_value.strip() == "-1":  # infinity
                    value = None
                else:
                    value = cmdline_value

            if value is not None:
                try:
                    self.rlimits[key] = parse_fn(value)
                except ValueError as e:
                    sys.exit("Invalid value for {} limit: {}".format(name.lower(), e))
                if self.rlimits[key] <= 0:
                    sys.exit(
                        '{} limit "{}" is invalid, it needs to be a positive number '
                        "(or -1 on the command line for disabling it).".format(name, value)
                    )

        self.rlimits = {}
        keys = list(rootTag.keys())
        handle_limit_value("Time", TIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Hard time", HARDTIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Memory", MEMLIMIT, config.memorylimit, parse_memory_limit)
        handle_limit_value("Core", CORELIMIT, config.corelimit, int)

        if HARDTIMELIMIT in self.rlimits:
            hardtimelimit = self.rlimits.pop(HARDTIMELIMIT)
            if TIMELIMIT in self.rlimits:
                if hardtimelimit < self.rlimits[TIMELIMIT]:
                    logging.warning(
                        "Hard timelimit %d is smaller than timelimit %d, ignoring the former.",
                        hardtimelimit,
                        self.rlimits[TIMELIMIT],
                    )
                elif hardtimelimit > self.rlimits[TIMELIMIT]:
                    self.rlimits[SOFTTIMELIMIT] = self.rlimits[TIMELIMIT]
                    self.rlimits[TIMELIMIT] = hardtimelimit
            else:
                self.rlimits[TIMELIMIT] = hardtimelimit

        # get number of threads, default value is 1
        self.num_of_threads = int(rootTag.get("threads")) if ("threads" in keys) else 1
        if config.num_of_threads != None:
            self.num_of_threads = config.num_of_threads
        if self.num_of_threads < 1:
            logging.error("At least ONE thread must be given!")
            sys.exit()

        # get global options and property file
        self.options = util.get_list_from_xml(rootTag)
        self.propertyfile = util.text_or_none(util.get_single_child_from_xml(rootTag, PROPERTY_TAG))

        # get columns
        self.columns = Benchmark.load_columns(rootTag.find("columns"))

        # get global source files, they are used in all run sets
        globalSourcefilesTags = rootTag.findall("tasks") + rootTag.findall("sourcefiles")

        # get required files
        self._required_files = set()
        for required_files_tag in rootTag.findall("requiredfiles"):
            required_files = util.expand_filename_pattern(required_files_tag.text, self.base_dir)
            if not required_files:
                logging.warning("Pattern %s in requiredfiles tag did not match any file.", required_files_tag.text)
            self._required_files = self._required_files.union(required_files)

        # get requirements
        self.requirements = Requirements(rootTag.findall("require"), self.rlimits, config)

        result_files_tags = rootTag.findall("resultfiles")
        if result_files_tags:
            self.result_files_patterns = [os.path.normpath(p.text) for p in result_files_tags if p.text]
            for pattern in self.result_files_patterns:
                if pattern.startswith(".."):
                    sys.exit("Invalid relative result-files pattern '{}'.".format(pattern))
        else:
            # default is "everything below current directory"
            self.result_files_patterns = ["."]

        # get benchmarks
        self.run_sets = []
        for (i, rundefinitionTag) in enumerate(rootTag.findall("rundefinition")):
            self.run_sets.append(RunSet(rundefinitionTag, self, i + 1, globalSourcefilesTags))

        if not self.run_sets:
            for (i, rundefinitionTag) in enumerate(rootTag.findall("test")):
                self.run_sets.append(RunSet(rundefinitionTag, self, i + 1, globalSourcefilesTags))
            if self.run_sets:
                logging.warning(
                    "Benchmark file %s uses deprecated <test> tags. " "Please rename them to <rundefinition>.",
                    benchmark_file,
                )
            else:
                logging.warning(
                    "Benchmark file %s specifies no runs to execute " "(no <rundefinition> tags found).", benchmark_file
                )

        if not any(runSet.should_be_executed() for runSet in self.run_sets):
            logging.warning("No <rundefinition> tag selected, nothing will be executed.")
            if config.selected_run_definitions:
                logging.warning(
                    "The selection %s does not match any run definitions of %s.",
                    config.selected_run_definitions,
                    [runSet.real_name for runSet in self.run_sets],
                )
        elif config.selected_run_definitions:
            for selected in config.selected_run_definitions:
                if not any(util.wildcard_match(run_set.real_name, selected) for run_set in self.run_sets):
                    logging.warning(
                        'The selected run definition "%s" is not present in the input file, ' "skipping it.", selected
                    )
Пример #8
0
    def __init__(self, benchmark_file, config, start_time):
        """
        The constructor of Benchmark reads the source files, options, columns and the tool
        from the XML in the benchmark_file..
        """
        logging.debug("I'm loading the benchmark %s.", benchmark_file)

        self.config = config
        self.benchmark_file = benchmark_file
        self.base_dir = os.path.dirname(self.benchmark_file)

        # get benchmark-name
        self.name = os.path.basename(
            benchmark_file)[:-4]  # remove ending ".xml"
        if config.name:
            self.name += "." + config.name

        self.description = None
        if config.description_file is not None:
            try:
                self.description = util.read_file(config.description_file)
            except (OSError, UnicodeDecodeError) as e:
                raise BenchExecException(
                    "File '{}' given for description could not be read: {}".
                    format(config.description_file, e))

        self.start_time = start_time
        self.instance = start_time.strftime(util.TIMESTAMP_FILENAME_FORMAT)

        self.output_base_name = config.output_path + self.name + "." + self.instance
        self.log_folder = self.output_base_name + ".logfiles" + os.path.sep
        self.log_zip = self.output_base_name + ".logfiles.zip"
        self.result_files_folder = self.output_base_name + ".files"

        # parse XML
        try:
            rootTag = ElementTree.ElementTree().parse(benchmark_file)
        except ElementTree.ParseError as e:
            sys.exit("Benchmark file {} is invalid: {}".format(
                benchmark_file, e))
        if "benchmark" != rootTag.tag:
            sys.exit("Benchmark file {} is invalid: "
                     "It's root element is not named 'benchmark'.".format(
                         benchmark_file))

        # get tool
        tool_name = rootTag.get("tool")
        if not tool_name:
            sys.exit(
                "A tool needs to be specified in the benchmark definition file."
            )
        (self.tool_module, self.tool) = load_tool_info(tool_name, config)
        self.tool_name = self.tool.name()
        # will be set from the outside if necessary (may not be the case in SaaS environments)
        self.tool_version = None
        self.executable = None
        self.display_name = rootTag.get("displayName")

        def parse_memory_limit(value):
            # In a future BenchExec version, we could treat unit-less limits as bytes
            try:
                value = int(value)
            except ValueError:
                return util.parse_memory_value(value)
            else:
                raise ValueError(
                    "Memory limit must have a unit suffix, e.g., '{} MB'".
                    format(value))

        def handle_limit_value(name, key, cmdline_value, parse_fn):
            value = rootTag.get(key, None)
            # override limit from XML with values from command line
            if cmdline_value is not None:
                if cmdline_value.strip() == "-1":  # infinity
                    value = None
                else:
                    value = cmdline_value

            if value is not None:
                try:
                    self.rlimits[key] = parse_fn(value)
                except ValueError as e:
                    sys.exit("Invalid value for {} limit: {}".format(
                        name.lower(), e))
                if self.rlimits[key] <= 0:
                    sys.exit(
                        '{} limit "{}" is invalid, it needs to be a positive number '
                        "(or -1 on the command line for disabling it).".format(
                            name, value))

        self.rlimits = {}
        handle_limit_value("Time", TIMELIMIT, config.timelimit,
                           util.parse_timespan_value)
        handle_limit_value("Hard time", HARDTIMELIMIT, config.timelimit,
                           util.parse_timespan_value)
        handle_limit_value("Wall time", WALLTIMELIMIT, config.walltimelimit,
                           util.parse_timespan_value)
        handle_limit_value("Memory", MEMLIMIT, config.memorylimit,
                           parse_memory_limit)
        handle_limit_value("Core", CORELIMIT, config.corelimit, int)

        if HARDTIMELIMIT in self.rlimits:
            hardtimelimit = self.rlimits.pop(HARDTIMELIMIT)
            if TIMELIMIT in self.rlimits:
                if hardtimelimit < self.rlimits[TIMELIMIT]:
                    logging.warning(
                        "Hard timelimit %d is smaller than timelimit %d, ignoring the former.",
                        hardtimelimit,
                        self.rlimits[TIMELIMIT],
                    )
                elif hardtimelimit > self.rlimits[TIMELIMIT]:
                    self.rlimits[SOFTTIMELIMIT] = self.rlimits[TIMELIMIT]
                    self.rlimits[TIMELIMIT] = hardtimelimit
            else:
                self.rlimits[TIMELIMIT] = hardtimelimit

        self.num_of_threads = int(rootTag.get("threads", 1))
        if config.num_of_threads is not None:
            self.num_of_threads = config.num_of_threads
        if self.num_of_threads < 1:
            logging.error("At least ONE thread must be given!")
            sys.exit()

        # get global options and property file
        self.options = util.get_list_from_xml(rootTag)
        self.propertytag = get_propertytag(rootTag)

        # get columns
        self.columns = Benchmark.load_columns(rootTag.find("columns"))

        # get global source files, they are used in all run sets
        if rootTag.findall("sourcefiles"):
            sys.exit(
                "Benchmark file {} has unsupported old format. "
                "Rename <sourcefiles> tags to <tasks>.".format(benchmark_file))
        globalSourcefilesTags = rootTag.findall("tasks")

        # get required files
        self._required_files = set()
        for required_files_tag in rootTag.findall("requiredfiles"):
            required_files = util.expand_filename_pattern(
                required_files_tag.text, self.base_dir)
            if not required_files:
                logging.warning(
                    "Pattern %s in requiredfiles tag did not match any file.",
                    required_files_tag.text,
                )
            self._required_files = self._required_files.union(required_files)

        # get requirements
        self.requirements = Requirements(rootTag.findall("require"),
                                         self.rlimits, config)

        result_files_tags = rootTag.findall("resultfiles")
        if result_files_tags:
            self.result_files_patterns = [
                os.path.normpath(p.text) for p in result_files_tags if p.text
            ]
            for pattern in self.result_files_patterns:
                if pattern.startswith(".."):
                    sys.exit(
                        "Invalid relative result-files pattern '{}'.".format(
                            pattern))
        else:
            # default is "everything below current directory"
            self.result_files_patterns = ["."]

        # get benchmarks
        self.run_sets = []
        for (i,
             rundefinitionTag) in enumerate(rootTag.findall("rundefinition")):
            self.run_sets.append(
                RunSet(rundefinitionTag, self, i + 1, globalSourcefilesTags))

        if not self.run_sets:
            logging.warning(
                "Benchmark file %s specifies no runs to execute "
                "(no <rundefinition> tags found).",
                benchmark_file,
            )

        if not any(runSet.should_be_executed() for runSet in self.run_sets):
            logging.warning(
                "No <rundefinition> tag selected, nothing will be executed.")
            if config.selected_run_definitions:
                logging.warning(
                    "The selection %s does not match any run definitions of %s.",
                    config.selected_run_definitions,
                    [runSet.real_name for runSet in self.run_sets],
                )
        elif config.selected_run_definitions:
            for selected in config.selected_run_definitions:
                if not any(
                        util.wildcard_match(run_set.real_name, selected)
                        for run_set in self.run_sets):
                    logging.warning(
                        'The selected run definition "%s" is not present in the input file, '
                        "skipping it.",
                        selected,
                    )