示例#1
0
    def extract_runs_from_xml(self, sourcefilesTagList, global_required_files_pattern):
        '''
        This function builds a list of SourcefileSets (containing filename with options).
        The files and their options are taken from the list of sourcefilesTags.
        '''
        # runs are structured as sourcefile sets, one set represents one sourcefiles tag
        blocks = []

        for index, sourcefilesTag in enumerate(sourcefilesTagList):
            sourcefileSetName = sourcefilesTag.get("name")
            matchName = sourcefileSetName or str(index)
            if self.benchmark.config.selected_sourcefile_sets \
                and matchName not in self.benchmark.config.selected_sourcefile_sets:
                    continue

            required_files_pattern = set(tag.text for tag in sourcefilesTag.findall('requiredfiles'))

            # get lists of filenames
            sourcefiles = self.get_sourcefiles_from_xml(sourcefilesTag, self.benchmark.base_dir)

            # get file-specific options for filenames
            fileOptions = util.get_list_from_xml(sourcefilesTag)
            propertyfile = util.text_or_none(util.get_single_child_from_xml(sourcefilesTag, PROPERTY_TAG))

            currentRuns = []
            for sourcefile in sourcefiles:
                currentRuns.append(Run(sourcefile, fileOptions, self, propertyfile,
                                       global_required_files_pattern.union(required_files_pattern)))

            blocks.append(SourcefileSet(sourcefileSetName, index, currentRuns))
        return blocks
示例#2
0
    def extract_runs_from_xml(self, sourcefilesTagList, global_required_files_pattern):
        '''
        This function builds a list of SourcefileSets (containing filename with options).
        The files and their options are taken from the list of sourcefilesTags.
        '''
        base_dir = self.benchmark.base_dir
        # runs are structured as sourcefile sets, one set represents one sourcefiles tag
        blocks = []

        for index, sourcefilesTag in enumerate(sourcefilesTagList):
            sourcefileSetName = sourcefilesTag.get("name")
            matchName = sourcefileSetName or str(index)
            if self.benchmark.config.selected_sourcefile_sets \
                and not any(util.wildcard_match(matchName, sourcefile_set) for sourcefile_set in self.benchmark.config.selected_sourcefile_sets):
                    continue

            required_files_pattern = global_required_files_pattern.union(
                set(tag.text for tag in sourcefilesTag.findall('requiredfiles')))

            # get lists of filenames
            task_def_files = self.get_task_def_files_from_xml(sourcefilesTag, base_dir)

            # get file-specific options for filenames
            fileOptions = util.get_list_from_xml(sourcefilesTag)
            propertyfile = util.text_or_none(util.get_single_child_from_xml(sourcefilesTag, PROPERTY_TAG))

            # some runs need more than one sourcefile,
            # the first sourcefile is a normal 'include'-file, we use its name as identifier
            # for logfile and result-category all other files are 'append'ed.
            appendFileTags = sourcefilesTag.findall("append")

            currentRuns = []
            for identifier in task_def_files:
                if identifier.endswith('.yml'):
                    if appendFileTags:
                        raise BenchExecException(
                            "Cannot combine <append> and task-definition files in the same <tasks> tag.")
                    run = self.create_run_from_task_definition(
                        identifier, fileOptions, propertyfile, required_files_pattern)
                else:
                    run = self.create_run_for_input_file(
                        identifier, fileOptions, propertyfile, required_files_pattern, appendFileTags)
                if run:
                    currentRuns.append(run)

            # add runs for cases without source files
            for run in sourcefilesTag.findall("withoutfile"):
                currentRuns.append(Run(run.text, [], fileOptions, self, propertyfile, required_files_pattern))

            blocks.append(SourcefileSet(sourcefileSetName, index, currentRuns))

        if self.benchmark.config.selected_sourcefile_sets:
            for selected in self.benchmark.config.selected_sourcefile_sets:
                if not any(util.wildcard_match(sourcefile_set.real_name, selected) for sourcefile_set in blocks):
                    logging.warning(
                        'The selected tasks "%s" are not present in the input file, '
                        'skipping them.',
                        selected)
        return blocks
示例#3
0
    def __init__(self, rundefinitionTag, benchmark, index, globalSourcefilesTags=[]):
        """
        The constructor of RunSet reads run-set name and the source files from rundefinitionTag.
        Source files can be included or excluded, and imported from a list of
        names in another file. Wildcards and variables are expanded.
        @param rundefinitionTag: a rundefinitionTag from the XML file
        """

        self.benchmark = benchmark

        # get name of run set, name is optional, the result can be "None"
        self.real_name = rundefinitionTag.get("name")

        # index is the number of the run set
        self.index = index

        self.log_folder = benchmark.log_folder
        self.result_files_folder = benchmark.result_files_folder
        if self.real_name:
            self.log_folder += self.real_name + "."
            self.result_files_folder = os.path.join(self.result_files_folder, self.real_name)

        # get all run-set-specific options from rundefinitionTag
        self.options = benchmark.options + util.get_list_from_xml(rundefinitionTag)
        self.propertyfile = util.text_or_none(util.get_single_child_from_xml(rundefinitionTag, PROPERTY_TAG)) or benchmark.propertyfile

        # get run-set specific required files
        required_files_pattern = set(tag.text for tag in rundefinitionTag.findall('requiredfiles'))

        # get all runs, a run contains one sourcefile with options
        self.blocks = self.extract_runs_from_xml(
            globalSourcefilesTags + rundefinitionTag.findall("tasks") + rundefinitionTag.findall("sourcefiles"),
            required_files_pattern)
        self.runs = [run for block in self.blocks for run in block.runs]

        names = [self.real_name]
        if len(self.blocks) == 1:
            # there is exactly one source-file set to run, append its name to run-set name
            names.append(self.blocks[0].real_name)
        self.name = '.'.join(filter(None, names))
        self.full_name = self.benchmark.name + (("." + self.name) if self.name else "")

        # Currently we store logfiles as "basename.log",
        # so we cannot distinguish sourcefiles in different folder with same basename.
        # For a 'local benchmark' this causes overriding of logfiles after reading them,
        # so the result is correct, only the logfile is gone.
        # For 'cloud-mode' the logfile is overridden before reading it,
        # so the result will be wrong and every measured value will be missing.
        if self.should_be_executed():
            sourcefilesSet = set()
            for run in self.runs:
                base = os.path.basename(run.identifier)
                if base in sourcefilesSet:
                    logging.warning("Input file with name '%s' appears twice in runset. "
                                    "This could cause problems with equal logfile-names.",
                                    base)
                else:
                    sourcefilesSet.add(base)
            del sourcefilesSet
示例#4
0
文件: model.py 项目: FArian/tbf
    def __init__(self, rundefinitionTag, benchmark, index, globalSourcefilesTags=[]):
        """
        The constructor of RunSet reads run-set name and the source files from rundefinitionTag.
        Source files can be included or excluded, and imported from a list of
        names in another file. Wildcards and variables are expanded.
        @param rundefinitionTag: a rundefinitionTag from the XML file
        """

        self.benchmark = benchmark

        # get name of run set, name is optional, the result can be "None"
        self.real_name = rundefinitionTag.get("name")

        # index is the number of the run set
        self.index = index

        self.log_folder = benchmark.log_folder
        self.result_files_folder = benchmark.result_files_folder
        if self.real_name:
            self.log_folder += self.real_name + "."
            self.result_files_folder = os.path.join(self.result_files_folder, self.real_name)

        # get all run-set-specific options from rundefinitionTag
        self.options = benchmark.options + util.get_list_from_xml(rundefinitionTag)
        self.propertyfile = util.text_or_none(util.get_single_child_from_xml(rundefinitionTag, PROPERTY_TAG)) or benchmark.propertyfile

        # get run-set specific required files
        required_files_pattern = set(tag.text for tag in rundefinitionTag.findall('requiredfiles'))

        # get all runs, a run contains one sourcefile with options
        self.blocks = self.extract_runs_from_xml(
            globalSourcefilesTags + rundefinitionTag.findall("tasks") + rundefinitionTag.findall("sourcefiles"),
            required_files_pattern)
        self.runs = [run for block in self.blocks for run in block.runs]

        names = [self.real_name]
        if len(self.blocks) == 1:
            # there is exactly one source-file set to run, append its name to run-set name
            names.append(self.blocks[0].real_name)
        self.name = '.'.join(filter(None, names))
        self.full_name = self.benchmark.name + (("." + self.name) if self.name else "")

        # Currently we store logfiles as "basename.log",
        # so we cannot distinguish sourcefiles in different folder with same basename.
        # For a 'local benchmark' this causes overriding of logfiles after reading them,
        # so the result is correct, only the logfile is gone.
        # For 'cloud-mode' the logfile is overridden before reading it,
        # so the result will be wrong and every measured value will be missing.
        if self.should_be_executed():
            sourcefilesSet = set()
            for run in self.runs:
                base = os.path.basename(run.identifier)
                if base in sourcefilesSet:
                    logging.warning("Input file with name '%s' appears twice in runset. "
                                    "This could cause problems with equal logfile-names.",
                                    base)
                else:
                    sourcefilesSet.add(base)
            del sourcefilesSet
示例#5
0
def get_propertytag(parent):
    tag = util.get_single_child_from_xml(parent, "propertyfile")
    if tag is None:
        return None
    expected_verdict = tag.get("expectedverdict")
    if (expected_verdict is not None and expected_verdict
            not in _EXPECTED_RESULT_FILTER_VALUES.values()
            and not re.match("false(.*)", expected_verdict)):
        raise BenchExecException(
            "Invalid value '{}' for expectedverdict of <propertyfile> in tag <{}>: "
            "Only 'true', 'false', 'false(<subproperty>)' and 'unknown' "
            "are allowed!".format(expected_verdict, parent.tag))
    return tag
示例#6
0
    def extract_runs_from_xml(self, sourcefilesTagList,
                              global_required_files_pattern):
        '''
        This function builds a list of SourcefileSets (containing filename with options).
        The files and their options are taken from the list of sourcefilesTags.
        '''
        # runs are structured as sourcefile sets, one set represents one sourcefiles tag
        blocks = []

        for index, sourcefilesTag in enumerate(sourcefilesTagList):
            sourcefileSetName = sourcefilesTag.get("name")
            matchName = sourcefileSetName or str(index)
            if self.benchmark.config.selected_sourcefile_sets \
                and not any(util.wildcard_match(matchName, sourcefile_set) for sourcefile_set in self.benchmark.config.selected_sourcefile_sets):
                continue

            required_files_pattern = set(
                tag.text for tag in sourcefilesTag.findall('requiredfiles'))

            # get lists of filenames
            tasks = self.get_tasks_from_xml(sourcefilesTag,
                                            self.benchmark.base_dir)

            # get file-specific options for filenames
            fileOptions = util.get_list_from_xml(sourcefilesTag)
            propertyfile = util.text_or_none(
                util.get_single_child_from_xml(sourcefilesTag, PROPERTY_TAG))

            currentRuns = []
            for identifier, sourcefiles in tasks:
                currentRuns.append(
                    Run(
                        identifier, sourcefiles, fileOptions, self,
                        propertyfile,
                        global_required_files_pattern.union(
                            required_files_pattern)))

            blocks.append(SourcefileSet(sourcefileSetName, index, currentRuns))

        if self.benchmark.config.selected_sourcefile_sets:
            for selected in self.benchmark.config.selected_sourcefile_sets:
                if not any(
                        util.wildcard_match(sourcefile_set.real_name, selected)
                        for sourcefile_set in blocks):
                    logging.warning(
                        'The selected tasks "%s" are not present in the input file, '
                        'skipping them.', selected)
        return blocks
示例#7
0
    def extract_runs_from_xml(self, sourcefilesTagList, global_required_files_pattern):
        """
        This function builds a list of SourcefileSets (containing filename with options).
        The files and their options are taken from the list of sourcefilesTags.
        """
        # runs are structured as sourcefile sets, one set represents one sourcefiles tag
        blocks = []

        for index, sourcefilesTag in enumerate(sourcefilesTagList):
            sourcefileSetName = sourcefilesTag.get("name")
            matchName = sourcefileSetName or str(index)
            if self.benchmark.config.selected_sourcefile_sets and not any(
                util.wildcard_match(matchName, sourcefile_set)
                for sourcefile_set in self.benchmark.config.selected_sourcefile_sets
            ):
                continue

            required_files_pattern = set(tag.text for tag in sourcefilesTag.findall("requiredfiles"))

            # get lists of filenames
            sourcefiles = self.get_sourcefiles_from_xml(sourcefilesTag, self.benchmark.base_dir)

            # get file-specific options for filenames
            fileOptions = util.get_list_from_xml(sourcefilesTag)
            propertyfile = util.text_or_none(util.get_single_child_from_xml(sourcefilesTag, PROPERTY_TAG))

            currentRuns = []
            for sourcefile in sourcefiles:
                currentRuns.append(
                    Run(
                        sourcefile,
                        fileOptions,
                        self,
                        propertyfile,
                        global_required_files_pattern.union(required_files_pattern),
                    )
                )

            blocks.append(SourcefileSet(sourcefileSetName, index, currentRuns))

        if self.benchmark.config.selected_sourcefile_sets:
            for selected in self.benchmark.config.selected_sourcefile_sets:
                if not any(util.wildcard_match(sourcefile_set.real_name, selected) for sourcefile_set in blocks):
                    logging.warning(
                        'The selected tasks "%s" are not present in the input file, ' "skipping them.", selected
                    )
        return blocks
示例#8
0
    def __init__(self, benchmark_file, config, start_time):
        """
        The constructor of Benchmark reads the source files, options, columns and the tool
        from the XML in the benchmark_file..
        """
        logging.debug("I'm loading the benchmark %s.", benchmark_file)

        self.config = config
        self.benchmark_file = benchmark_file
        self.base_dir = os.path.dirname(self.benchmark_file)

        # get benchmark-name
        self.name = os.path.basename(benchmark_file)[:-4] # remove ending ".xml"
        if config.name:
            self.name += "."+config.name

        self.start_time = start_time
        self.instance = time.strftime("%Y-%m-%d_%H%M", self.start_time)

        self.output_base_name = config.output_path + self.name + "." + self.instance
        self.log_folder = self.output_base_name + ".logfiles" + os.path.sep
        self.log_zip = self.output_base_name + ".logfiles.zip"
        self.result_files_folder = self.output_base_name + ".files"

        # parse XML
        try:
            rootTag = ElementTree.ElementTree().parse(benchmark_file)
        except ElementTree.ParseError as e:
            sys.exit('Benchmark file {} is invalid: {}'.format(benchmark_file, e))
        if 'benchmark' != rootTag.tag:
            sys.exit("Benchmark file {} is invalid: "
                "It's root element is not named 'benchmark'.".format(benchmark_file))

        # get tool
        tool_name = rootTag.get('tool')
        if not tool_name:
            sys.exit('A tool needs to be specified in the benchmark definition file.')
        (self.tool_module, self.tool) = load_tool_info(tool_name)
        self.tool_name = self.tool.name()
        # will be set from the outside if necessary (may not be the case in SaaS environments)
        self.tool_version = None
        self.executable = None
        self.display_name = rootTag.get('displayName')

        logging.debug("The tool to be benchmarked is %s.", self.tool_name)

        def parse_memory_limit(value):
            try:
                value = int(value)
                logging.warning(
                    'Value "%s" for memory limit interpreted as MB for backwards compatibility, '
                    'specify a unit to make this unambiguous.',
                    value)
                return value * _BYTE_FACTOR * _BYTE_FACTOR
            except ValueError:
                return util.parse_memory_value(value)

        def handle_limit_value(name, key, cmdline_value, parse_fn):
            value = rootTag.get(key, None)
            # override limit from XML with values from command line
            if cmdline_value is not None:
                if cmdline_value.strip() == "-1": # infinity
                    value = None
                else:
                    value = cmdline_value

            if value is not None:
                try:
                    self.rlimits[key] = parse_fn(value)
                except ValueError as e:
                    sys.exit('Invalid value for {} limit: {}'.format(name.lower(), e))
                if self.rlimits[key] <= 0:
                    sys.exit('{} limit "{}" is invalid, it needs to be a positive number '
                         '(or -1 on the command line for disabling it).'.format(name, value))

        self.rlimits = {}
        keys = list(rootTag.keys())
        handle_limit_value("Time", TIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Hard time", HARDTIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Wall time", WALLTIMELIMIT, config.walltimelimit, util.parse_timespan_value)
        handle_limit_value("Memory", MEMLIMIT, config.memorylimit, parse_memory_limit)
        handle_limit_value("Core", CORELIMIT, config.corelimit, int)

        if HARDTIMELIMIT in self.rlimits:
            hardtimelimit = self.rlimits.pop(HARDTIMELIMIT)
            if TIMELIMIT in self.rlimits:
                if hardtimelimit < self.rlimits[TIMELIMIT]:
                    logging.warning(
                        'Hard timelimit %d is smaller than timelimit %d, ignoring the former.',
                        hardtimelimit, self.rlimits[TIMELIMIT])
                elif hardtimelimit > self.rlimits[TIMELIMIT]:
                    self.rlimits[SOFTTIMELIMIT] = self.rlimits[TIMELIMIT]
                    self.rlimits[TIMELIMIT] = hardtimelimit
            else:
                self.rlimits[TIMELIMIT] = hardtimelimit

        # get number of threads, default value is 1
        self.num_of_threads = int(rootTag.get("threads")) if ("threads" in keys) else 1
        if config.num_of_threads != None:
            self.num_of_threads = config.num_of_threads
        if self.num_of_threads < 1:
            logging.error("At least ONE thread must be given!")
            sys.exit()

        # get global options and property file
        self.options = util.get_list_from_xml(rootTag)
        self.propertyfile = util.text_or_none(util.get_single_child_from_xml(rootTag, PROPERTY_TAG))

        # get columns
        self.columns = Benchmark.load_columns(rootTag.find("columns"))

        # get global source files, they are used in all run sets
        globalSourcefilesTags = rootTag.findall("tasks") + rootTag.findall("sourcefiles")

        # get required files
        self._required_files = set()
        for required_files_tag in rootTag.findall('requiredfiles'):
            required_files = util.expand_filename_pattern(required_files_tag.text, self.base_dir)
            if not required_files:
                logging.warning('Pattern %s in requiredfiles tag did not match any file.',
                                required_files_tag.text)
            self._required_files = self._required_files.union(required_files)

        # get requirements
        self.requirements = Requirements(rootTag.findall("require"), self.rlimits, config)

        result_files_tags = rootTag.findall("resultfiles")
        if result_files_tags:
            self.result_files_patterns = [
                os.path.normpath(p.text) for p in result_files_tags if p.text]
            for pattern in self.result_files_patterns:
                if pattern.startswith(".."):
                    sys.exit("Invalid relative result-files pattern '{}'.".format(pattern))
        else:
            # default is "everything below current directory"
            self.result_files_patterns = ["."]

        # get benchmarks
        self.run_sets = []
        for (i, rundefinitionTag) in enumerate(rootTag.findall("rundefinition")):
            self.run_sets.append(RunSet(rundefinitionTag, self, i+1, globalSourcefilesTags))

        if not self.run_sets:
            for (i, rundefinitionTag) in enumerate(rootTag.findall("test")):
                self.run_sets.append(RunSet(rundefinitionTag, self, i+1, globalSourcefilesTags))
            if self.run_sets:
                logging.warning("Benchmark file %s uses deprecated <test> tags. "
                                "Please rename them to <rundefinition>.",
                                benchmark_file)
            else:
                logging.warning("Benchmark file %s specifies no runs to execute "
                                "(no <rundefinition> tags found).",
                                benchmark_file)

        if not any(runSet.should_be_executed() for runSet in self.run_sets):
            logging.warning("No <rundefinition> tag selected, nothing will be executed.")
            if config.selected_run_definitions:
                logging.warning("The selection %s does not match any run definitions of %s.",
                                config.selected_run_definitions,
                                [runSet.real_name for runSet in self.run_sets])
        elif config.selected_run_definitions:
            for selected in config.selected_run_definitions:
                if not any(util.wildcard_match(run_set.real_name, selected) for run_set in self.run_sets):
                    logging.warning(
                        'The selected run definition "%s" is not present in the input file, '
                        'skipping it.',
                        selected)
示例#9
0
    def __init__(self, benchmark_file, config, start_time):
        """
        The constructor of Benchmark reads the source files, options, columns and the tool
        from the XML in the benchmark_file..
        """
        logging.debug("I'm loading the benchmark %s.", benchmark_file)

        self.config = config
        self.benchmark_file = benchmark_file
        self.base_dir = os.path.dirname(self.benchmark_file)

        # get benchmark-name
        self.name = os.path.basename(benchmark_file)[:-4]  # remove ending ".xml"
        if config.name:
            self.name += "." + config.name

        self.start_time = start_time
        self.instance = time.strftime("%Y-%m-%d_%H%M", self.start_time)

        self.output_base_name = config.output_path + self.name + "." + self.instance
        self.log_folder = self.output_base_name + ".logfiles" + os.path.sep
        self.log_zip = self.output_base_name + ".logfiles.zip"
        self.result_files_folder = self.output_base_name + ".files"

        # parse XML
        try:
            rootTag = ElementTree.ElementTree().parse(benchmark_file)
        except ElementTree.ParseError as e:
            sys.exit("Benchmark file {} is invalid: {}".format(benchmark_file, e))
        if "benchmark" != rootTag.tag:
            sys.exit(
                "Benchmark file {} is invalid: " "It's root element is not named 'benchmark'.".format(benchmark_file)
            )

        # get tool
        tool_name = rootTag.get("tool")
        if not tool_name:
            sys.exit("A tool needs to be specified in the benchmark definition file.")
        (self.tool_module, self.tool) = load_tool_info(tool_name)
        self.tool_name = self.tool.name()
        # will be set from the outside if necessary (may not be the case in SaaS environments)
        self.tool_version = None
        self.executable = None

        logging.debug("The tool to be benchmarked is %s.", self.tool_name)

        def parse_memory_limit(value):
            try:
                value = int(value)
                logging.warning(
                    'Value "%s" for memory limit interpreted as MB for backwards compatibility, '
                    "specify a unit to make this unambiguous.",
                    value,
                )
                return value * _BYTE_FACTOR * _BYTE_FACTOR
            except ValueError:
                return util.parse_memory_value(value)

        def handle_limit_value(name, key, cmdline_value, parse_fn):
            value = rootTag.get(key, None)
            # override limit from XML with values from command line
            if cmdline_value is not None:
                if cmdline_value.strip() == "-1":  # infinity
                    value = None
                else:
                    value = cmdline_value

            if value is not None:
                try:
                    self.rlimits[key] = parse_fn(value)
                except ValueError as e:
                    sys.exit("Invalid value for {} limit: {}".format(name.lower(), e))
                if self.rlimits[key] <= 0:
                    sys.exit(
                        '{} limit "{}" is invalid, it needs to be a positive number '
                        "(or -1 on the command line for disabling it).".format(name, value)
                    )

        self.rlimits = {}
        keys = list(rootTag.keys())
        handle_limit_value("Time", TIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Hard time", HARDTIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Memory", MEMLIMIT, config.memorylimit, parse_memory_limit)
        handle_limit_value("Core", CORELIMIT, config.corelimit, int)

        if HARDTIMELIMIT in self.rlimits:
            hardtimelimit = self.rlimits.pop(HARDTIMELIMIT)
            if TIMELIMIT in self.rlimits:
                if hardtimelimit < self.rlimits[TIMELIMIT]:
                    logging.warning(
                        "Hard timelimit %d is smaller than timelimit %d, ignoring the former.",
                        hardtimelimit,
                        self.rlimits[TIMELIMIT],
                    )
                elif hardtimelimit > self.rlimits[TIMELIMIT]:
                    self.rlimits[SOFTTIMELIMIT] = self.rlimits[TIMELIMIT]
                    self.rlimits[TIMELIMIT] = hardtimelimit
            else:
                self.rlimits[TIMELIMIT] = hardtimelimit

        # get number of threads, default value is 1
        self.num_of_threads = int(rootTag.get("threads")) if ("threads" in keys) else 1
        if config.num_of_threads != None:
            self.num_of_threads = config.num_of_threads
        if self.num_of_threads < 1:
            logging.error("At least ONE thread must be given!")
            sys.exit()

        # get global options and property file
        self.options = util.get_list_from_xml(rootTag)
        self.propertyfile = util.text_or_none(util.get_single_child_from_xml(rootTag, PROPERTY_TAG))

        # get columns
        self.columns = Benchmark.load_columns(rootTag.find("columns"))

        # get global source files, they are used in all run sets
        globalSourcefilesTags = rootTag.findall("tasks") + rootTag.findall("sourcefiles")

        # get required files
        self._required_files = set()
        for required_files_tag in rootTag.findall("requiredfiles"):
            required_files = util.expand_filename_pattern(required_files_tag.text, self.base_dir)
            if not required_files:
                logging.warning("Pattern %s in requiredfiles tag did not match any file.", required_files_tag.text)
            self._required_files = self._required_files.union(required_files)

        # get requirements
        self.requirements = Requirements(rootTag.findall("require"), self.rlimits, config)

        result_files_tags = rootTag.findall("resultfiles")
        if result_files_tags:
            self.result_files_patterns = [os.path.normpath(p.text) for p in result_files_tags if p.text]
            for pattern in self.result_files_patterns:
                if pattern.startswith(".."):
                    sys.exit("Invalid relative result-files pattern '{}'.".format(pattern))
        else:
            # default is "everything below current directory"
            self.result_files_patterns = ["."]

        # get benchmarks
        self.run_sets = []
        for (i, rundefinitionTag) in enumerate(rootTag.findall("rundefinition")):
            self.run_sets.append(RunSet(rundefinitionTag, self, i + 1, globalSourcefilesTags))

        if not self.run_sets:
            for (i, rundefinitionTag) in enumerate(rootTag.findall("test")):
                self.run_sets.append(RunSet(rundefinitionTag, self, i + 1, globalSourcefilesTags))
            if self.run_sets:
                logging.warning(
                    "Benchmark file %s uses deprecated <test> tags. " "Please rename them to <rundefinition>.",
                    benchmark_file,
                )
            else:
                logging.warning(
                    "Benchmark file %s specifies no runs to execute " "(no <rundefinition> tags found).", benchmark_file
                )

        if not any(runSet.should_be_executed() for runSet in self.run_sets):
            logging.warning("No <rundefinition> tag selected, nothing will be executed.")
            if config.selected_run_definitions:
                logging.warning(
                    "The selection %s does not match any run definitions of %s.",
                    config.selected_run_definitions,
                    [runSet.real_name for runSet in self.run_sets],
                )
        elif config.selected_run_definitions:
            for selected in config.selected_run_definitions:
                if not any(util.wildcard_match(run_set.real_name, selected) for run_set in self.run_sets):
                    logging.warning(
                        'The selected run definition "%s" is not present in the input file, ' "skipping it.", selected
                    )