コード例 #1
0
def is_turbo_boost_enabled():
    """
    Check whether Turbo Boost (scaling CPU frequency beyond nominal frequency)
    is active on this system.
    @return: A bool, or None if Turbo Boost is not supported.
    """
    try:
        if os.path.exists(_TURBO_BOOST_FILE):
            boost_enabled = int(util.read_file(_TURBO_BOOST_FILE))
            if not (0 <= boost_enabled <= 1):
                raise ValueError(
                    "Invalid value {} for turbo boost activation".format(
                        boost_enabled))
            return boost_enabled != 0
        if os.path.exists(_TURBO_BOOST_FILE_PSTATE):
            boost_disabled = int(util.read_file(_TURBO_BOOST_FILE_PSTATE))
            if not (0 <= boost_disabled <= 1):
                raise ValueError(
                    "Invalid value {} for turbo boost activation".format(
                        boost_disabled))
            return boost_disabled != 1
    except ValueError as e:
        sys.exit(
            "Could not read turbo-boost information from kernel: {0}".format(
                e))
コード例 #2
0
    def generate_tables_and_compare_csv(self, args, table_prefix, result_prefix=None,
                                        diff_prefix=None, result_diff_prefix=None,
                                        expected_counts=None):
        output, csv_file, csv_diff_file = \
            self.generate_tables_and_check_produced_files(args, table_prefix, diff_prefix)

        generated = util.read_file(csv_file)
        expected_file = [here, 'expected', (result_prefix or table_prefix) + '.csv']
        if OVERWRITE_MODE:
            util.write_file(generated, *expected_file)
        else:
            self.assertMultiLineEqual(generated, util.read_file(*expected_file))

        if diff_prefix:
            generated_diff = util.read_file(csv_diff_file)
            expected_diff_file = [here, 'expected', (result_diff_prefix or diff_prefix) + '.csv']
            if OVERWRITE_MODE:
                util.write_file(generated_diff, *expected_diff_file)
            else:
                self.assertMultiLineEqual(generated_diff, util.read_file(*expected_diff_file))

        if expected_counts:
            # output of table-generator should end with statistics about regressions
            counts = output[output.find('REGRESSIONS'):].strip()
            self.assertMultiLineEqual(expected_counts, counts)
        else:
            self.assertNotIn('REGRESSIONS', output)
            self.assertNotIn('STATS', output)
コード例 #3
0
    def generate_tables_and_compare_content(self, args, table_prefix, result_prefix=None,
                                        diff_prefix=None, result_diff_prefix=None,
                                        expected_counts=None):
        def expected_file_name(ending):
            return [here, 'expected', (result_prefix or table_prefix) + '.' + ending]
        def expected_diff_file_name(ending):
            return [here, 'expected', (result_diff_prefix or diff_prefix) + '.' + ending]

        output, html_file, html_diff_file, csv_file, csv_diff_file = \
            self.generate_tables_and_check_produced_files(args, table_prefix, diff_prefix)

        generated_csv = util.read_file(csv_file)
        self.assert_file_content_equals(generated_csv, expected_file_name('csv'))

        generated_html = self.read_table_from_html(html_file)
        self.assert_file_content_equals(generated_html, expected_file_name('html'))

        if diff_prefix:
            generated_csv_diff = util.read_file(csv_diff_file)
            self.assert_file_content_equals(generated_csv_diff, expected_diff_file_name('csv'))

            generated_html_diff = self.read_table_from_html(html_diff_file)
            self.assert_file_content_equals(generated_html_diff, expected_diff_file_name('html'))

        if expected_counts:
            # output of table-generator should end with statistics about regressions
            counts = output[output.find('REGRESSIONS'):].strip()
            self.assertMultiLineEqual(expected_counts, counts)
        else:
            self.assertNotIn('REGRESSIONS', output)
            self.assertNotIn('STATS', output)
コード例 #4
0
    def generate_tables_and_compare_content(
        self,
        args,
        table_prefix,
        result_prefix=None,
        diff_prefix=None,
        result_diff_prefix=None,
        expected_counts=None,
    ):
        def expected_file_name(ending):
            return [here, "expected", (result_prefix or table_prefix) + "." + ending]

        def expected_diff_file_name(ending):
            return [
                here,
                "expected",
                (result_diff_prefix or diff_prefix) + "." + ending,
            ]

        (
            output,
            html_file,
            html_diff_file,
            csv_file,
            csv_diff_file,
        ) = self.generate_tables_and_check_produced_files(
            args + ["--static-table"], table_prefix, diff_prefix
        )

        generated_csv = util.read_file(csv_file)
        self.assert_file_content_equals(generated_csv, expected_file_name("csv"))

        generated_html = self.read_table_from_html(html_file)
        self.assert_file_content_equals(generated_html, expected_file_name("html"))

        if diff_prefix:
            generated_csv_diff = util.read_file(csv_diff_file)
            self.assert_file_content_equals(
                generated_csv_diff, expected_diff_file_name("csv")
            )

            generated_html_diff = self.read_table_from_html(html_diff_file)
            self.assert_file_content_equals(
                generated_html_diff, expected_diff_file_name("html")
            )

        if expected_counts:
            # output of table-generator should end with statistics about regressions
            counts = output[output.find("REGRESSIONS") :].strip()
            self.assertMultiLineEqual(expected_counts, counts)
        else:
            self.assertNotIn("REGRESSIONS", output)
            self.assertNotIn("STATS", output)
コード例 #5
0
ファイル: resources.py プロジェクト: letonchanh/benchexec
def get_cpu_cores_per_run(coreLimit, num_of_threads, my_cgroups):
    """
    Calculate an assignment of the available CPU cores to a number
    of parallel benchmark executions such that each run gets its own cores
    without overlapping of cores between runs.
    In case the machine has hyper-threading, this method tries to avoid
    putting two different runs on the same physical core
    (but it does not guarantee this if the number of parallel runs is too high to avoid it).
    In case the machine has multiple CPUs, this method avoids
    splitting a run across multiple CPUs if the number of cores per run
    is lower than the number of cores per CPU
    (splitting a run over multiple CPUs provides worse performance).
    It will also try to split the runs evenly across all available CPUs.

    A few theoretically-possible cases are not implemented,
    for example assigning three 10-core runs on a machine
    with two 16-core CPUs (this would have unfair core assignment
    and thus undesirable performance characteristics anyway).

    The list of available cores is read from the cgroup file system,
    such that the assigned cores are a subset of the cores
    that the current process is allowed to use.
    This script does currently not support situations
    where the available cores are asymmetrically split over CPUs,
    e.g. 3 cores on one CPU and 5 on another.

    @param coreLimit: the number of cores for each run
    @param num_of_threads: the number of parallel benchmark executions
    @return a list of lists, where each inner list contains the cores for one run
    """
    try:
        # read list of available CPU cores
        allCpus = util.parse_int_list(my_cgroups.get_value(cgroups.CPUSET, 'cpus'))
        logging.debug("List of available CPU cores is %s.", allCpus)

        # read mapping of core to CPU ("physical package")
        physical_packages = [int(util.read_file('/sys/devices/system/cpu/cpu{0}/topology/physical_package_id'.format(core))) for core in allCpus]
        cores_of_package = collections.defaultdict(list)
        for core, package in zip(allCpus, physical_packages):
            cores_of_package[package].append(core)
        logging.debug("Physical packages of cores are %s.", cores_of_package)

        # read hyper-threading information (sibling cores sharing the same physical core)
        siblings_of_core = {}
        for core in allCpus:
            siblings = util.parse_int_list(util.read_file('/sys/devices/system/cpu/cpu{0}/topology/thread_siblings_list'.format(core)))
            siblings_of_core[core] = siblings
        logging.debug("Siblings of cores are %s.", siblings_of_core)
    except ValueError as e:
        sys.exit("Could not read CPU information from kernel: {0}".format(e))

    return _get_cpu_cores_per_run0(coreLimit, num_of_threads, allCpus, cores_of_package, siblings_of_core)
コード例 #6
0
 def read_table_from_html(self, file):
     content = util.read_file(file)
     # only keep table
     content = content[
         content.index('<table id="dataTable">') : content.index("</table>") + 8
     ]
     return content
コード例 #7
0
 def get_value(self, subsystem, option):
     """
     Read the given value from the given subsystem.
     Do not include the subsystem name in the option name.
     Only call this method if the given subsystem is available.
     """
     assert subsystem in self, "Subsystem {} is missing".format(subsystem)
     return util.read_file(self.per_subsystem[subsystem], subsystem + "." + option)
コード例 #8
0
ファイル: cgroups.py プロジェクト: jhensel/benchexec
 def get_value(self, subsystem, option):
     """
     Read the given value from the given subsystem.
     Do not include the subsystem name in the option name.
     Only call this method if the given subsystem is available.
     """
     assert subsystem in self, 'Subsystem {} is missing'.format(subsystem)
     return util.read_file(self.per_subsystem[subsystem], subsystem + '.' + option)
コード例 #9
0
ファイル: cgroups.py プロジェクト: nianzelee/benchexec
 def get_value(self, subsystem, option):
     """
     Read the given value from the given subsystem.
     Do not include the subsystem name in the option name.
     Only call this method if the given subsystem is available.
     """
     assert subsystem in self, f"Subsystem {subsystem} is missing"
     return util.read_file(self.per_subsystem[subsystem],
                           f"{subsystem}.{option}")
コード例 #10
0
 def read_table_from_html(self, file):
     content = util.read_file(file)
     # only keep table
     content = content[
         content.index("const data = {") + 13 : content.index("\n};") + 2
     ]
     # Pretty-print JSON for better diffs
     content = json.dumps(json.loads(content), indent=" ", sort_keys=True)
     return content
コード例 #11
0
 def test_output_stdout(self):
     output, _, _, _, _ = self.generate_tables_and_check_produced_files(
         [result_file('test.2015-03-03_1613.results.predicateAnalysis.xml'),
          '-f', 'csv', '-q'],
         table_prefix='test.2015-03-03_1613.results.predicateAnalysis',
         formats=[],
         output_path='-',
         )
     expected = util.read_file(here, 'expected', 'test.2015-03-03_1613.results.predicateAnalysis' + '.csv')
     self.assertMultiLineEqual(output.strip(), expected)
コード例 #12
0
ファイル: systeminfo.py プロジェクト: FArian/tbf
def is_turbo_boost_enabled():
    """
    Check whether Turbo Boost (scaling CPU frequency beyond nominal frequency)
    is active on this system.
    @return: A bool, or None if Turbo Boost is not supported.
    """
    try:
        if os.path.exists(_TURBO_BOOST_FILE):
            boost_enabled = int(util.read_file(_TURBO_BOOST_FILE))
            if not (0 <= boost_enabled <= 1):
                raise ValueError('Invalid value {} for turbo boost activation'.format(boost_enabled))
            return boost_enabled != 0
        if os.path.exists(_TURBO_BOOST_FILE_PSTATE):
            boost_disabled = int(util.read_file(_TURBO_BOOST_FILE_PSTATE))
            if not (0 <= boost_disabled <= 1):
                raise ValueError('Invalid value {} for turbo boost activation'.format(boost_enabled))
            return boost_disabled != 1
    except ValueError as e:
        sys.exit("Could not read turbo-boost information from kernel: {0}".format(e))
コード例 #13
0
ファイル: systeminfo.py プロジェクト: FArian/tbf
 def __init__(self, cores=None):
     """
     Create an instance that monitors the given list of cores (or all CPUs).
     """
     self.cpu_throttle_count = {}
     cpu_pattern = '[{0}]'.format(','.join(map(str, cores))) if cores else '*'
     for file in glob.glob('/sys/devices/system/cpu/cpu{}/thermal_throttle/*_throttle_count'.format(cpu_pattern)):
         try:
             self.cpu_throttle_count[file] = int(util.read_file(file))
         except Exception as e:
             logging.warning('Cannot read throttling count of CPU from kernel: %s', e)
コード例 #14
0
 def read_table_from_html(self, file):
     content = util.read_file(file)
     # only keep table
     content = content[content.index("const data = {") +
                       13:content.index("\n};") + 2]
     # Pretty-print JSON for better diffs
     content = json.dumps(json.loads(content), indent=" ", sort_keys=True)
     content = content.replace(
         '\n "version": "{}"\n'.format(benchexec.__version__),
         '\n "version": "(test)"\n',
     )
     return content
コード例 #15
0
ファイル: systeminfo.py プロジェクト: FArian/tbf
 def has_throttled(self):
     """
     Check whether any of the CPU cores monitored by this instance has
     throttled since this instance was created.
     @return a boolean value
     """
     for file, value in self.cpu_throttle_count.items():
         try:
             new_value = int(util.read_file(file))
             if new_value > value:
                 return True
         except Exception as e:
             logging.warning('Cannot read throttling count of CPU from kernel: %s', e)
     return False
コード例 #16
0
 def __init__(self, cores=None):
     """
     Create an instance that monitors the given list of cores (or all CPUs).
     """
     self.cpu_throttle_count = {}
     cpu_pattern = "[{0}]".format(",".join(map(str,
                                               cores))) if cores else "*"
     for file in glob.glob(
             "/sys/devices/system/cpu/cpu{}/thermal_throttle/*_throttle_count"
             .format(cpu_pattern)):
         try:
             self.cpu_throttle_count[file] = int(util.read_file(file))
         except Exception as e:
             logging.warning(
                 "Cannot read throttling count of CPU from kernel: %s", e)
コード例 #17
0
ファイル: systeminfo.py プロジェクト: ultimate-pa/benchexec
 def has_throttled(self):
     """
     Check whether any of the CPU cores monitored by this instance has
     throttled since this instance was created.
     @return a boolean value
     """
     for file, value in self.cpu_throttle_count.items():
         try:
             new_value = int(util.read_file(file))
             if new_value > value:
                 return True
         except Exception as e:
             logging.warning(
                 "Cannot read throttling count of CPU from kernel: %s", e)
     return False
コード例 #18
0
ファイル: systeminfo.py プロジェクト: ultimate-pa/benchexec
 def __init__(self, cores=None):
     """
     Create an instance that monitors the given list of cores (or all CPUs).
     """
     self.cpu_throttle_count = {}
     cores = [str(core) for core in cores] if cores else ["*"]
     for core in cores:
         for file in glob.iglob(
                 f"/sys/devices/system/cpu/cpu{core}/thermal_throttle/*_throttle_count"
         ):
             try:
                 self.cpu_throttle_count[file] = int(util.read_file(file))
             except Exception as e:
                 logging.warning(
                     "Cannot read throttling count of CPU from kernel: %s",
                     e)
コード例 #19
0
 def test_output_stdout(self):
     output, _, _, _, _ = self.generate_tables_and_check_produced_files(
         [
             result_file("test.2015-03-03_1613.results.predicateAnalysis.xml"),
             "-f",
             "csv",
             "-q",
         ],
         table_prefix="test.2015-03-03_1613.results.predicateAnalysis",
         formats=[],
         output_path="-",
     )
     expected = util.read_file(
         here, "expected", "test.2015-03-03_1613.results.predicateAnalysis" + ".csv"
     )
     self.assertMultiLineEqual(output.strip(), expected)
コード例 #20
0
def get_cores_of_same_package_as(core):
    return util.parse_int_list(
        util.read_file(
            f"/sys/devices/system/cpu/cpu{core}/topology/core_siblings_list"))
コード例 #21
0
def get_cpu_package_for_core(core):
    """Get the number of the physical package (socket) a core belongs to."""
    return int(
        util.read_file(
            f"/sys/devices/system/cpu/cpu{core}/topology/physical_package_id"))
コード例 #22
0
def get_cpu_cores_per_run(coreLimit,
                          num_of_threads,
                          use_hyperthreading,
                          my_cgroups,
                          coreSet=None):
    """
    Calculate an assignment of the available CPU cores to a number
    of parallel benchmark executions such that each run gets its own cores
    without overlapping of cores between runs.
    In case the machine has hyper-threading, this method tries to avoid
    putting two different runs on the same physical core
    (but it does not guarantee this if the number of parallel runs is too high to avoid it).
    In case the machine has multiple CPUs, this method avoids
    splitting a run across multiple CPUs if the number of cores per run
    is lower than the number of cores per CPU
    (splitting a run over multiple CPUs provides worse performance).
    It will also try to split the runs evenly across all available CPUs.

    A few theoretically-possible cases are not implemented,
    for example assigning three 10-core runs on a machine
    with two 16-core CPUs (this would have unfair core assignment
    and thus undesirable performance characteristics anyway).

    The list of available cores is read from the cgroup file system,
    such that the assigned cores are a subset of the cores
    that the current process is allowed to use.
    This script does currently not support situations
    where the available cores are asymmetrically split over CPUs,
    e.g. 3 cores on one CPU and 5 on another.

    @param coreLimit: the number of cores for each run
    @param num_of_threads: the number of parallel benchmark executions
    @param coreSet: the list of CPU cores identifiers provided by a user, None makes benchexec using all cores
    @return a list of lists, where each inner list contains the cores for one run
    """
    try:
        # read list of available CPU cores
        allCpus = util.parse_int_list(
            my_cgroups.get_value(cgroups.CPUSET, "cpus"))

        # Filter CPU cores according to the list of identifiers provided by a user
        if coreSet:
            invalid_cores = sorted(set(coreSet).difference(set(allCpus)))
            if len(invalid_cores) > 0:
                raise ValueError(
                    "The following provided CPU cores are not available: " +
                    ", ".join(map(str, invalid_cores)))
            allCpus = [core for core in allCpus if core in coreSet]

        logging.debug("List of available CPU cores is %s.", allCpus)

        # read mapping of core to memory region
        cores_of_memory_region = collections.defaultdict(list)
        for core in allCpus:
            coreDir = f"/sys/devices/system/cpu/cpu{core}/"
            memory_regions = _get_memory_banks_listed_in_dir(coreDir)
            if memory_regions:
                cores_of_memory_region[memory_regions[0]].append(core)
            else:
                # If some cores do not have NUMA information, skip using it completely
                logging.warning(
                    "Kernel does not have NUMA support. Use benchexec at your own risk."
                )
                cores_of_memory_region = {}
                break
        logging.debug("Memory regions of cores are %s.",
                      cores_of_memory_region)

        # read mapping of core to CPU ("physical package")
        cores_of_package = collections.defaultdict(list)
        for core in allCpus:
            package = get_cpu_package_for_core(core)
            cores_of_package[package].append(core)
        logging.debug("Physical packages of cores are %s.", cores_of_package)

        # select the more fine grained division among memory regions and physical package
        if len(cores_of_memory_region) >= len(cores_of_package):
            cores_of_unit = cores_of_memory_region
            logging.debug(
                "Using memory regions as the basis for cpu core division")
        else:
            cores_of_unit = cores_of_package
            logging.debug(
                "Using physical packages as the basis for cpu core division")

        # read hyper-threading information (sibling cores sharing the same physical core)
        siblings_of_core = {}
        for core in allCpus:
            siblings = util.parse_int_list(
                util.read_file(
                    f"/sys/devices/system/cpu/cpu{core}/topology/thread_siblings_list"
                ))
            siblings_of_core[core] = siblings
        logging.debug("Siblings of cores are %s.", siblings_of_core)
    except ValueError as e:
        sys.exit(f"Could not read CPU information from kernel: {e}")
    return _get_cpu_cores_per_run0(
        coreLimit,
        num_of_threads,
        use_hyperthreading,
        allCpus,
        cores_of_unit,
        siblings_of_core,
    )
コード例 #23
0
ファイル: __init__.py プロジェクト: mllg/benchexec
 def assert_file_content_equals(self, content, file):
     if OVERWRITE_MODE:
         util.write_file(content, *file)
     else:
         self.assertMultiLineEqual(content, util.read_file(*file))
コード例 #24
0
def mock_load_task_def_file(f):
    content = util.read_file(os.path.join(test_dir, f))
    return yaml.safe_load(content)
コード例 #25
0
ファイル: integration_test.py プロジェクト: seahorn/benchexec
 def read_table_from_html(self, file):
     content = util.read_file(file)
     # only keep table
     content = content[content.index('<table id="dataTable">'):content.index('</table>') + 8]
     return content
コード例 #26
0
ファイル: integration_test.py プロジェクト: seahorn/benchexec
 def assert_file_content_equals(self, content, file):
     if OVERWRITE_MODE:
         util.write_file(content, *file)
     else:
         self.assertMultiLineEqual(content, util.read_file(*file))
コード例 #27
0
def get_cores_of_same_package_as(core):
    return util.parse_int_list(util.read_file('/sys/devices/system/cpu/cpu{0}/topology/core_siblings_list'.format(core)))
コード例 #28
0
def get_cpu_package_for_core(core):
    """Get the number of the physical package (socket) a core belongs to."""
    return int(util.read_file('/sys/devices/system/cpu/cpu{0}/topology/physical_package_id'.format(core)))
コード例 #29
0
    def __init__(self, benchmark_file, config, start_time):
        """
        The constructor of Benchmark reads the source files, options, columns and the tool
        from the XML in the benchmark_file..
        """
        logging.debug("I'm loading the benchmark %s.", benchmark_file)

        self.config = config
        self.benchmark_file = benchmark_file
        self.base_dir = os.path.dirname(self.benchmark_file)

        # get benchmark-name
        self.name = os.path.basename(
            benchmark_file)[:-4]  # remove ending ".xml"
        if config.name:
            self.name += "." + config.name

        self.description = None
        if config.description_file is not None:
            try:
                self.description = util.read_file(config.description_file)
            except (OSError, UnicodeDecodeError) as e:
                raise BenchExecException(
                    "File '{}' given for description could not be read: {}".
                    format(config.description_file, e))

        self.start_time = start_time
        self.instance = start_time.strftime(util.TIMESTAMP_FILENAME_FORMAT)

        self.output_base_name = config.output_path + self.name + "." + self.instance
        self.log_folder = self.output_base_name + ".logfiles" + os.path.sep
        self.log_zip = self.output_base_name + ".logfiles.zip"
        self.result_files_folder = self.output_base_name + ".files"

        # parse XML
        try:
            rootTag = ElementTree.ElementTree().parse(benchmark_file)
        except ElementTree.ParseError as e:
            sys.exit("Benchmark file {} is invalid: {}".format(
                benchmark_file, e))
        if "benchmark" != rootTag.tag:
            sys.exit("Benchmark file {} is invalid: "
                     "It's root element is not named 'benchmark'.".format(
                         benchmark_file))

        # get tool
        tool_name = rootTag.get("tool")
        if not tool_name:
            sys.exit(
                "A tool needs to be specified in the benchmark definition file."
            )
        (self.tool_module, self.tool) = load_tool_info(tool_name, config)
        self.tool_name = self.tool.name()
        # will be set from the outside if necessary (may not be the case in SaaS environments)
        self.tool_version = None
        self.executable = None
        self.display_name = rootTag.get("displayName")

        def parse_memory_limit(value):
            # In a future BenchExec version, we could treat unit-less limits as bytes
            try:
                value = int(value)
            except ValueError:
                return util.parse_memory_value(value)
            else:
                raise ValueError(
                    "Memory limit must have a unit suffix, e.g., '{} MB'".
                    format(value))

        def handle_limit_value(name, key, cmdline_value, parse_fn):
            value = rootTag.get(key, None)
            # override limit from XML with values from command line
            if cmdline_value is not None:
                if cmdline_value.strip() == "-1":  # infinity
                    value = None
                else:
                    value = cmdline_value

            if value is not None:
                try:
                    self.rlimits[key] = parse_fn(value)
                except ValueError as e:
                    sys.exit("Invalid value for {} limit: {}".format(
                        name.lower(), e))
                if self.rlimits[key] <= 0:
                    sys.exit(
                        '{} limit "{}" is invalid, it needs to be a positive number '
                        "(or -1 on the command line for disabling it).".format(
                            name, value))

        self.rlimits = {}
        handle_limit_value("Time", TIMELIMIT, config.timelimit,
                           util.parse_timespan_value)
        handle_limit_value("Hard time", HARDTIMELIMIT, config.timelimit,
                           util.parse_timespan_value)
        handle_limit_value("Wall time", WALLTIMELIMIT, config.walltimelimit,
                           util.parse_timespan_value)
        handle_limit_value("Memory", MEMLIMIT, config.memorylimit,
                           parse_memory_limit)
        handle_limit_value("Core", CORELIMIT, config.corelimit, int)

        if HARDTIMELIMIT in self.rlimits:
            hardtimelimit = self.rlimits.pop(HARDTIMELIMIT)
            if TIMELIMIT in self.rlimits:
                if hardtimelimit < self.rlimits[TIMELIMIT]:
                    logging.warning(
                        "Hard timelimit %d is smaller than timelimit %d, ignoring the former.",
                        hardtimelimit,
                        self.rlimits[TIMELIMIT],
                    )
                elif hardtimelimit > self.rlimits[TIMELIMIT]:
                    self.rlimits[SOFTTIMELIMIT] = self.rlimits[TIMELIMIT]
                    self.rlimits[TIMELIMIT] = hardtimelimit
            else:
                self.rlimits[TIMELIMIT] = hardtimelimit

        self.num_of_threads = int(rootTag.get("threads", 1))
        if config.num_of_threads is not None:
            self.num_of_threads = config.num_of_threads
        if self.num_of_threads < 1:
            logging.error("At least ONE thread must be given!")
            sys.exit()

        # get global options and property file
        self.options = util.get_list_from_xml(rootTag)
        self.propertytag = get_propertytag(rootTag)

        # get columns
        self.columns = Benchmark.load_columns(rootTag.find("columns"))

        # get global source files, they are used in all run sets
        if rootTag.findall("sourcefiles"):
            sys.exit(
                "Benchmark file {} has unsupported old format. "
                "Rename <sourcefiles> tags to <tasks>.".format(benchmark_file))
        globalSourcefilesTags = rootTag.findall("tasks")

        # get required files
        self._required_files = set()
        for required_files_tag in rootTag.findall("requiredfiles"):
            required_files = util.expand_filename_pattern(
                required_files_tag.text, self.base_dir)
            if not required_files:
                logging.warning(
                    "Pattern %s in requiredfiles tag did not match any file.",
                    required_files_tag.text,
                )
            self._required_files = self._required_files.union(required_files)

        # get requirements
        self.requirements = Requirements(rootTag.findall("require"),
                                         self.rlimits, config)

        result_files_tags = rootTag.findall("resultfiles")
        if result_files_tags:
            self.result_files_patterns = [
                os.path.normpath(p.text) for p in result_files_tags if p.text
            ]
            for pattern in self.result_files_patterns:
                if pattern.startswith(".."):
                    sys.exit(
                        "Invalid relative result-files pattern '{}'.".format(
                            pattern))
        else:
            # default is "everything below current directory"
            self.result_files_patterns = ["."]

        # get benchmarks
        self.run_sets = []
        for (i,
             rundefinitionTag) in enumerate(rootTag.findall("rundefinition")):
            self.run_sets.append(
                RunSet(rundefinitionTag, self, i + 1, globalSourcefilesTags))

        if not self.run_sets:
            logging.warning(
                "Benchmark file %s specifies no runs to execute "
                "(no <rundefinition> tags found).",
                benchmark_file,
            )

        if not any(runSet.should_be_executed() for runSet in self.run_sets):
            logging.warning(
                "No <rundefinition> tag selected, nothing will be executed.")
            if config.selected_run_definitions:
                logging.warning(
                    "The selection %s does not match any run definitions of %s.",
                    config.selected_run_definitions,
                    [runSet.real_name for runSet in self.run_sets],
                )
        elif config.selected_run_definitions:
            for selected in config.selected_run_definitions:
                if not any(
                        util.wildcard_match(run_set.real_name, selected)
                        for run_set in self.run_sets):
                    logging.warning(
                        'The selected run definition "%s" is not present in the input file, '
                        "skipping it.",
                        selected,
                    )
コード例 #30
0
ファイル: systeminfo.py プロジェクト: ultimate-pa/benchexec
    def __init__(self):
        """
        This function finds some information about the computer.
        """
        # get info about OS
        self.hostname = platform.node()
        self.os = platform.platform(aliased=True)

        # get info about CPU
        cpuInfo = {}
        self.cpu_max_frequency = "unknown"
        cpuInfoFilename = "/proc/cpuinfo"
        self.cpu_number_of_cores = "unknown"
        if os.path.isfile(cpuInfoFilename) and os.access(
                cpuInfoFilename, os.R_OK):
            cpuInfoFile = open(cpuInfoFilename, "rt")
            cpuInfoLines = [
                tuple(line.split(":")) for line in cpuInfoFile.read().replace(
                    "\n\n", "\n").replace("\t", "").strip("\n").split("\n")
            ]
            cpuInfo = dict(cpuInfoLines)
            cpuInfoFile.close()
            self.cpu_number_of_cores = str(
                len([line for line in cpuInfoLines if line[0] == "processor"]))
        self.cpu_model = (cpuInfo.get("model name", "unknown").strip().replace(
            "(R)", "").replace("(TM)", "").replace("(tm)", ""))
        if "cpu MHz" in cpuInfo:
            freq_hz = Decimal(
                cpuInfo["cpu MHz"]) * 1000 * 1000  # convert to Hz
            self.cpu_max_frequency = int((freq_hz).to_integral_value())

        # modern cpus may not work with full speed the whole day
        # read the number from cpufreq and overwrite cpu_max_frequency from above
        freqInfoFilename = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"
        try:
            cpu_max_frequency = util.read_file(freqInfoFilename)
            self.cpu_max_frequency = int(
                cpu_max_frequency) * 1000  # convert to Hz
        except OSError:
            pass  # does not necessarily exist

        self.cpu_turboboost = is_turbo_boost_enabled()

        # get info about memory
        memInfo = {}
        memInfoFilename = "/proc/meminfo"
        if os.path.isfile(memInfoFilename) and os.access(
                memInfoFilename, os.R_OK):
            memInfoFile = open(memInfoFilename, "rt")
            memInfo = dict(
                tuple(s.split(": ")) for s in memInfoFile.read().replace(
                    "\t", "").strip("\n").split("\n"))
            memInfoFile.close()
        self.memory = memInfo.get("MemTotal", "unknown").strip()
        if self.memory.endswith(" kB"):
            # kernel uses KiB but names them kB, convert to Byte
            self.memory = int(self.memory[:-3]) * 1024

        self.environment = os.environ.copy()
        # The following variables are overridden by runexec anyway.
        self.environment.pop("HOME", None)
        self.environment.pop("TMPDIR", None)
        self.environment.pop("TMP", None)
        self.environment.pop("TEMPDIR", None)
        self.environment.pop("TEMP", None)