def __init__(self, data: t.Dict[str, t.List[Number]] = None, attributes: t.Dict[str, str] = None, recorded_error: RecordedError = None, external: bool = False): """ Initializes a new run data object. :param data: optional dictionary mapping each property to a list of actual values :param attributes: dictionary of optional attributes that describe its program block :param recorded_error: either program error or internal error :param external: does the data come from a prior benchmarking? """ typecheck(data, E(None) | Dict(unknown_keys=True)) typecheck(attributes, Exact(None) | Dict(key_type=Str(), unknown_keys=True)) self.external = external # type: bool """ Does the data come from a prior benchmarking? """ self.properties = [] # type: t.List[str] """ List of measured properties. They might not all be measured the same number of times. """ self.data = {} # type: t.Dict[str, t.List[Number]] """ Raw benchmarking data, mapping properties to their corresponding values """ if data is not None and len(data) > 0: self.add_data_block(data) self.attributes = attributes or {} # type: t.Dict[str, str] """ Dictionary of optional attributes that describe its program block """ self.tags = attributes["tags"] if "tags" in self.attributes else None self.max_runs = get_for_tags("run/max_runs_per_tag", "run/max_runs", self.tags, min) self.recorded_error = recorded_error self.discarded = False
def process_options(options: CmdOptionList, one_line=False): typecheck(options, CmdOptionList) strs = [] for option in sorted(options): multiple = isinstance(option.type_scheme, List) or isinstance( option.type_scheme, ListOrTuple) rounds = 10 if multiple else 1 # hack to allow multiple applications of an option assert isinstance(option, CmdOption) descr = "{}".format( option.description ) if option.description is not None else "Undoc" option_str = "--{}".format(option.option_name) if option.has_short: option_str = "{{-{},--{}}}".format(option.short, option.option_name) if option.is_flag: option_str = "{{--{o},--no-{o}}}".format(o=option.option_name) new_completion = "" if option.has_completion_hints and "zsh" in option.completion_hints: new_completion = '{option_str}\"[{descr}]: :{hint}"'.format( option_str=option_str, descr=descr, hint=option.completion_hints["zsh"]) else: format_str = '{option_str}\"[{descr}]"' if option.is_flag else '{option_str}\"[{descr}]: :()"' new_completion = format_str.format(option_str=option_str, descr=descr) for i in range(rounds): strs.append(new_completion) if one_line: return " ".join(strs) return "\n\t".join(strs)
def extend(self, lines: t.List[Line]): """ Extend the lines of this section by the passed lines. :param lines: appended lines """ typecheck(lines, List(Line)) self.lines.extend(lines)
def randomize_segment(self, segment_name: str): """ Randomizes the segment part in the current section by splitting it into label induced subsections and shuffling them. :param segment_name: bss, data or rodata (text doesn't make any sense) """ typecheck(segment_name, ExactEither("bss", "data", "rodata")) i = 0 while i < len(self.lines): possible_starts = ["." + segment_name, ".section " + segment_name] while i < len(self.lines) and \ not any(self.lines[i].startswith(x) for x in possible_starts): i += 1 if i == len(self.lines): return j = i + 1 while j < len( self.lines) and not self.lines[i].split_section_before(): j += 1 if j == len(self.lines): return parts_to_shuffle = self.lines[i + 1:j] # split the lines at the labels and shuffle these subsections subsections = [[]] for line in parts_to_shuffle: if line.is_label() and len(subsections[-1]) > 0: subsections.append([]) subsections[-1].append(line) random.shuffle(subsections) parts_to_shuffle = [x for sublist in subsections for x in sublist] self.lines[i + 1:j] = parts_to_shuffle i = j
def process_options(options: CmdOptionList, one_line=False): typecheck(options, CmdOptionList) strs = [] for option in sorted(options): multiple = isinstance(option.type_scheme, List) or isinstance(option.type_scheme, ListOrTuple) rounds = 10 if multiple else 1 # hack to allow multiple applications of an option assert isinstance(option, CmdOption) descr = "{}".format(option.description) if option.description is not None else "Undoc" option_str = "--{}".format(option.option_name) if option.has_short: option_str = "{{-{},--{}}}".format(option.short, option.option_name) if option.is_flag: option_str = "{{--{o},--no-{o}}}".format(o=option.option_name) new_completion = "" if option.has_completion_hints and "zsh" in option.completion_hints: new_completion = '{option_str}"[{descr}]: :{hint}"'.format( option_str=option_str, descr=descr, hint=option.completion_hints["zsh"] ) else: format_str = '{option_str}"[{descr}]"' if option.is_flag else '{option_str}"[{descr}]: :()"' new_completion = format_str.format(option_str=option_str, descr=descr) for i in range(rounds): strs.append(new_completion) if one_line: return " ".join(strs) return "\n\t".join(strs)
def from_registry(cls, registry: type, name_prefix: str = None) -> 'CmdOptionList': """ Creates a list of CmdOption objects from an registry. It creates an activation flag (--OPT/--no-OPT) for each registered plugin and creates for each plugin preference an option with name OPT_PREF. Deeper nesting is intentionally not supported. :param registry: used registry :param name_prefix: prefix of each option name (usable to avoid ambiguity problems) :return: list of CmdOptions """ assert issubclass(registry, AbstractRegistry) typecheck_locals(name_prefix=Str()|E(None)) name_prefix = name_prefix if name_prefix is not None else "" ret_list = CmdOptionList() for plugin in registry.registry: active_key = "{}_active".format("/".join([registry.settings_key_path, plugin])) ret_list.append(CmdOption( option_name=name_prefix + plugin, settings_key=active_key )) misc_key = "{}_misc".format("/".join(registry.settings_key_path.split("/") + [plugin])) misc = Settings().get_type_scheme(misc_key) typecheck(misc, Dict) for misc_sub_key in misc.data: misc_sub = misc[misc_sub_key] if not isinstance(misc_sub, Dict): ret_list.append(CmdOption( option_name="{}{}_{}".format(name_prefix, plugin, misc_sub_key), settings_key="{}/{}".format(misc_key, misc_sub_key) )) return ret_list
def get_sub_set(self, set_id: int) -> str: """ Gets the name of the benchmarking cpu set with the given id / number (starting at zero). """ if self.parallel == 0: return CONTROLLER_SUB_BENCH_SET if self.active: typecheck(set_id, Int(range=range(0, self.parallel_number))) return SUB_BENCH_SET.format(set_id)
def randomize_segment(self, segment_name: str): """ Randomizes the segment part in the current section by splitting it into label induced subsections and shuffling them. :param segment_name: bss, data or rodata (text doesn't make any sense) """ typecheck(segment_name, ExactEither("bss", "data", "rodata")) i = 0 while i < len(self.lines): possible_starts = ["." + segment_name, ".section " + segment_name] while i < len(self.lines) and \ not any(self.lines[i].startswith(x) for x in possible_starts): i += 1 if i == len(self.lines): return j = i + 1 while j < len(self.lines) and not self.lines[i].split_section_before(): j += 1 if j == len(self.lines): return parts_to_shuffle = self.lines[i + 1:j] # split the lines at the labels and shuffle these subsections subsections = [[]] for line in parts_to_shuffle: if line.is_label() and len(subsections[-1]) > 0: subsections.append([]) subsections[-1].append(line) random.shuffle(subsections) parts_to_shuffle = [x for sublist in subsections for x in sublist] self.lines[i + 1:j] = parts_to_shuffle i = j
def __init__(self, data: t.Dict[str, t.List[Number]] = None, attributes: t.Dict[str, str] = None, external: bool = False): """ Initializes a new run data object. :param data: optional dictionary mapping each property to a list of actual values :param attributes: dictionary of optional attributes that describe its program block :param external: does the data come from a prior benchmarking? :param property_descriptions: dictionary containing short descriptions for some properties """ typecheck(data, E(None) | Dict(all_keys=False)) typecheck(attributes, Exact(None) | Dict(key_type=Str(), all_keys=False)) self.external = external # type: bool """ Does the data come from a prior benchmarking? """ self.properties = [] # type: t.List[str] """ List of measured properties. They might not all be measured the same number of times. """ self.data = {} # type: t.Dict[str, t.List[Number]] """ Raw benchmarking data, mapping properties to their corresponding values """ if data is not None and len(data) > 0: self.add_data_block(data) self.attributes = attributes or {} # type: t.Dict[str, str] """ Dictionary of optional attributes that describe its program block """
def modify_setting(self, key: str, type_scheme: Type): """ Modifies the setting with the given key and adds it if it doesn't exist. :param key: key of the setting :param type_scheme: Type of the setting :param default_value: default value of the setting :raises: SettingsError if the settings domain (the key without the last element) doesn't exist :raises: TypeError if the default value doesn't adhere the type scheme """ if self.is_obsolete(key): logging.info("Using obsolete setting {!r}: {}".format( key, self.obsoleteness_reason(key))) return path = key.split("/") domain = "/".join(path[:-1]) if len(path) > 1 and not self.validate_key_path(path[:-1]) \ and not isinstance(self.get(domain), dict): raise SettingsError( "Setting domain {} doesn't exist".format(domain)) tmp_typ = self.type_scheme tmp_prefs = self.prefs for subkey in path[:-1]: tmp_typ = tmp_typ[subkey] tmp_prefs = tmp_prefs[subkey] tmp_typ[path[-1]] = type_scheme if path[-1] in tmp_prefs: if type_scheme.typecheck_default: typecheck(tmp_prefs[path[-1]], type_scheme) tmp_typ[path[-1]] = type_scheme else: tmp_prefs[path[-1]] = type_scheme.get_default()
def __init__(self, runs: t.List[RunData], tester: Tester = None, external_count: int = 0, property_descriptions: t.Dict[str, str] = None): """ Don't use the constructor use init_from_dicts if possible. :param runs: list of run data objects :param tester: used tester or tester that is set in the settings :param external_count: Number of external program blocks (blocks for which the data was obtained in a different benchmarking session) :param property_descriptions: mapping of some properties to their descriptions or longer versions """ self.tester = tester or TesterRegistry.get_for_name( TesterRegistry.get_used(), # type: Tester Settings()["stats/uncertainty_range"]) """ Used statistical tester """ typecheck(runs, List(T(RunData))) self.runs = runs # type: t.List[RunData] """ Data of serveral runs from several measured program blocks """ self.external_count = external_count # type: int """ Number of external program blocks (blocks for which the data was obtained in a different benchmarking session) """ self.property_descriptions = property_descriptions or { } # type: t.Dict[str, str]
def modify_setting(self, key: str, type_scheme: Type): """ Modifies the setting with the given key and adds it if it doesn't exist. :param key: key of the setting :param type_scheme: Type of the setting :param default_value: default value of the setting :raises: SettingsError if the settings domain (the key without the last element) doesn't exist :raises: TypeError if the default value doesn't adhere the type scheme """ path = key.split("/") domain = "/".join(path[:-1]) if len(path) > 1 and not self.validate_key_path(path[:-1]) \ and not isinstance(self.get(domain), dict): raise SettingsError("Setting domain {} doesn't exist".format(domain)) tmp_typ = self.type_scheme tmp_prefs = self.prefs for subkey in path[:-1]: tmp_typ = tmp_typ[subkey] tmp_prefs = tmp_prefs[subkey] tmp_typ[path[-1]] = type_scheme if path[-1] in tmp_prefs: if type_scheme.typecheck_default: typecheck(tmp_prefs[path[-1]], type_scheme) tmp_typ[path[-1]] = type_scheme else: tmp_prefs[path[-1]] = type_scheme.get_default()
def typecheck(self): """ Check that the own properties not starting with ``_`` and excluding the ``id``` property match the ``config_type``. :raises TypeError: if the check fails """ typecheck(self.serialize(exclude_id=True), self.config_type)
def _create_cpuset(self, name: str, cpus: t.List[int]): """ Create the cpuset with the given name and assign the given cpu cores to it """ typecheck(cpus, List(Int())) cpu_range = self._ints_to_str(cpus) path = [] for part in name.split("/"): path.append(part) self._cset("set --cpu {} {} ".format(cpu_range, "/".join(path)))
def append(self, line: Line): """ Append the passed line to the lines of this section. :param line: appended line """ typecheck(line, Line) self.lines.append(line)
def __init__(self, config: t.Dict[str, t.Union[int, bool]]): """ Creates an AssemblyProcessor from the passed configuration dictionary. :param config: passed configuration dictionary """ self.config = self.config_scheme.get_default() # type: t.Dict[str, t.Union[int, bool]] self.config.update(config) typecheck(self.config, self.config_scheme)
def process_options(options: CmdOptionList) -> str: typecheck(options, CmdOptionList) strs = [] for option in sorted(options.options): strs.append("--" + option.option_name) if option.short is not None: strs.append("-" + option.short) if option.is_flag: strs.append("--no-" + option.option_name) return "\n\t".join(strs)
def __init__(self, config: t.Dict[str, t.Union[int, bool]]): """ Creates an AssemblyProcessor from the passed configuration dictionary. :param config: passed configuration dictionary """ self.config = self.config_scheme.get_default( ) # type: t.Dict[str, t.Union[int, bool]] self.config.update(config) typecheck(self.config, self.config_scheme)
def default(self, value: t.Optional[t.Any], key: str): """ Returns the passed value if isn't None else the settings value under the passed key. :param value: passed value :param key: passed settings key """ if value is None: return self[key] typecheck(value, self.get_type_scheme(key)) return value
def __init__(self, content: str, number: int): """ Constructs a new Line object. :param content: content of the line (without line separator) :param number: line number (starting at 0) """ typecheck(content, Str()) typecheck(number, Int()) self.content = content # type: str """ Content of this line """ self.number = number # type: int """ Number of this line (starting at zero) in the original assembler file """
def from_lines(cls, lines: t.List[Line]) -> 'Section': """ Creates a new section from the passed lines. A FunctionSection is created if any of the lines seems to be a function label or a function starting comment. :param lines: passed lines :return: created FunctionSection or Section object """ typecheck(lines, List(T(Line))) libfirm_begin_pattern = re.compile("#[-\ ]* Begin ") if any(line.is_function_label() or libfirm_begin_pattern.match(line.content) for line in lines): return FunctionSection(lines) section = Section(lines) return section
def add_data_block(self, data_block: t.Dict[str, t.List[Number]]): """ Adds a block of data. :param data_block: maps each of the run datas properties to list of actual values (from each benchmarking run). """ typecheck(data_block, Dict(key_type=Str(), value_type= List(Int() | Float()), all_keys=False)) self.properties = set(self.properties).union(set(data_block.keys())) for prop in data_block: if prop not in self.data: self.data[prop] = [] self.properties.add(prop) self.data[prop].extend(data_block[prop]) self.properties = sorted(list(self.properties))
def add_lines(self, lines: t.List[t.Union[Line, str]]): """ Add the passed assembly lines. :param lines: either list of Lines or strings """ typecheck(lines, List(T(Line) | Str())) start_num = len(self._lines) for (i, line) in enumerate(lines): if isinstance(line, T(Line)): line.number = i + start_num self._lines.append(line) else: self._lines.append(Line(line, i + start_num)) self._init_sections()
def _number_of_parallel_sets(self, base_core_number: int, parallel: bool, sub_core_number: int) -> int: """ Calculates the number of possible parallel sets. """ typecheck([base_core_number, parallel, sub_core_number], List(Int())) if base_core_number + 1 + sub_core_number > self.av_cores and self.active: raise ValueError( "Invalid values for base_core_number and sub_core_number " "on system with just {} cores. Note: The benchmark controller" "needs a cpuset too.".format(self.av_cores)) av_cores_for_par = self.av_cores - base_core_number - 1 if parallel: return av_cores_for_par // sub_core_number return 1
def _number_of_parallel_sets(self, base_core_number: int, parallel: bool, sub_core_number: int) -> int: """ Calculates the number of possible parallel sets. """ typecheck([base_core_number, parallel, sub_core_number], List(Int())) if base_core_number + 1 + sub_core_number > self.av_cores and self.active: raise ValueError( "Invalid values for base_core_number and sub_core_number " "on system with just {} cores. Note: The benchmark controller" "needs a cpuset too.".format(self.av_cores) ) av_cores_for_par = self.av_cores - base_core_number - 1 if parallel: return av_cores_for_par // sub_core_number return 1
def add_lines(self, lines: t.List[t.Union[Line, str]]): """ Add the passed assembly lines. :param lines: either list of Lines or strings """ typecheck(lines, List(T(Line)|Str())) start_num = len(self._lines) for (i, line) in enumerate(lines): if isinstance(line, T(Line)): line.number = i + start_num self._lines.append(line) else: self._lines.append(Line(line, i + start_num)) self._init_sections()
def cmd_option(option: t.Union[CmdOption, CmdOptionList], name_prefix: str = None, validate: bool = None) \ -> t.Callable[[t.Callable], t.Callable]: """ Wrapper around click.option that works with CmdOption objects. If option is a list of CmdOptions then the type_scheme_option decorators are chained. Also supports nested lists in the same manner. :param option: CmdOption or (possibly nested) list of CmdOptions :param name_prefix: prefix of all options :param validate: validate setting or validate only for outer most if None :return: click.option(...) like decorator """ typecheck(option, T(CmdOption) | T(CmdOptionList)) name_prefix = name_prefix or "" typecheck(name_prefix, Str()) if isinstance(option, CmdOption): return type_scheme_option(option_name=name_prefix + option.option_name, type_scheme=option.type_scheme, short=option.short, is_flag=option.is_flag, callback=option.callback, with_default=option.has_default, default=option.default, validate_settings=validate) def func(f: t.Callable): name = f.__name__ #args = f.__arguments__ annotations = f.__annotations__ module = f.__module__ doc = f.__doc__ qname = f.__qualname__ for i, opt in enumerate(sorted(option.options)): validate = None if isinstance(opt, CmdOption) and i == 0: validate = True f = cmd_option(opt, name_prefix, validate=validate)(f) f.__name__ = name[0:-2] if name.endswith("_") else name f.__qualname__ = qname[0:-2] if qname.endswith("_") else qname #f.__args__ = args f.__annotations__ = annotations f.__module__ = module f.__doc__ = doc return f return func
def move_process_to_set(self, pid: int, set_id: int): """ Moves the process with the passed id to the parallel sub cpuset with the passed id. :param pid: passed process id :param set_id: passed parallel sub cpuset id """ if not self.active: return try: typecheck(pid, Int()) typecheck(set_id, Int(range=range(0, self.parallel_number))) self._move_process_to_set(SUB_BENCH_SET.format(set_id), pid) except BaseException: logging.error("Forced teardown of CPUSet") self.teardown() raise
def process_misc_commands_case(): ret_str = "" for misc_cmd in misc_commands: args = [] if "sub_commands" in misc_commands[misc_cmd]: args = " ".join(sorted(misc_commands[misc_cmd]["sub_commands"].keys())) else: typecheck(misc_commands[misc_cmd], CmdOptionList) args = process_options(misc_commands[misc_cmd].append(common_options)) ret_str += """ {misc_cmd}) args=({sub_cmds}) ;; """.format( misc_cmd=misc_cmd, sub_cmds=args ) return ret_str
def process_misc_commands_case(): ret_str = "" for misc_cmd in misc_commands: args = [] if "sub_commands" in misc_commands[misc_cmd]: args = " ".join( sorted(misc_commands[misc_cmd]["sub_commands"].keys())) else: typecheck(misc_commands[misc_cmd], CmdOptionList) args = process_options( misc_commands[misc_cmd].append(common_options)) ret_str += """ {misc_cmd}) args=({sub_cmds}) ;; """.format(misc_cmd=misc_cmd, sub_cmds=args) return ret_str
def init_from_dicts(cls, runs: t.List[t.Union[t.Dict[str, str], t.Dict[str, t.List[Number]]]] = None, external: bool = False) -> 'RunDataStatsHelper': """ Expected structure of the stats settings and the runs parameter:: "stats": { "tester": ..., "properties": ["prop1", ...], # or "properties": ["prop1", ...], "uncertainty_range": (0.1, 0.3) } "runs": [ {"attributes": {"attr1": ..., ...}, "data": {"__ov-time": [...], ...} ["property_descriptions": {"__ov-time": "Overall time"}]}, ... ] :param runs: list of dictionaries representing the benchmarking runs for each program block :param external: are the passed runs not from this benchmarking session but from another? :raises ValueError: if the stats of the runs parameter have not the correct structure """ typecheck(runs, List(Dict({ "data": Dict(key_type=Str(), value_type=List(Int()|Float()), all_keys=False) | NonExistent(), "attributes": Dict(key_type=Str(), all_keys=False) }, all_keys=False)| Dict({ "property_descriptions": NonExistent() | Dict(key_type=Str(), value_type=Str(), all_keys=False)})), value_name="runs parameter") run_datas = [] runs = runs or [] # type: t.List[dict] prop_descrs = {} # type: t.Dict[str, str] for run in runs: props = {} if "property_descriptions" in run: prop_descrs.update(run["property_descriptions"]) else: if "data" not in run: run["data"] = {} run_datas.append(RunData(run["data"], run["attributes"], external=external)) return RunDataStatsHelper(run_datas, external_count=len(run_datas) if external else 0, property_descriptions=prop_descrs)
def add_data_block(self, data_block: t.Dict[str, t.List[Number]]): """ Adds a block of data. :param data_block: maps each of the run datas properties to list of actual values (from each benchmarking run). """ typecheck( data_block, Dict(key_type=Str(), value_type=List(Int() | Float()), unknown_keys=True)) self.properties = set(self.properties).union(set(data_block.keys())) for prop in data_block: if prop not in self.data: self.data[prop] = [] self.properties.add(prop) self.data[prop].extend(data_block[prop]) self.properties = sorted(list(self.properties))
def __init__(self, runs: t.List[RunData], tester: Tester = None, external_count: int = 0, property_descriptions: t.Dict[str, str] = None, errorneous_runs: t.List[RunData] = None, included_blocks: str = None, env_info: FORMATTED_ENV_INFO = None): """ Don't use the constructor use init_from_dicts if possible. :param runs: list of run data objects :param tester: used tester or tester that is set in the settings :param external_count: Number of external program blocks (blocks for which the data was obtained in a different benchmarking session) :param property_descriptions: mapping of some properties to their descriptions or longer versions :param errorneous_runs: runs that resulted in errors :param included_blocks: include query :param env_info: formatted environment info """ self.tester = tester or TesterRegistry.get_for_name( TesterRegistry.get_used(), Settings()["stats/uncertainty_range"]) # type: Tester """ Used statistical tester """ typecheck(runs, List(T(RunData))) self.runs = filter_runs( runs, included_blocks or Settings()["report/included_blocks"]) # type: t.List[RunData] self.errorneous_runs = errorneous_runs or [ r for r in self.runs if r.has_error() ] self.runs = [ r for r in self.runs if not r.has_error() or (any( len(v) > 0 for v, p in r.data.items())) ] """ Data of serveral runs from several measured program blocks """ self.external_count = external_count # type: int """ Number of external program blocks (blocks for which the data was obtained in a different benchmarking session) """ self.property_descriptions = property_descriptions or { } # type: t.Dict[str, str] self.env_info = env_info or []
def _number_of_parallel_sets(self, base_core_number: int, parallel: bool, sub_core_number: int) -> int: """ Calculates the number of possible parallel sets. """ typecheck([base_core_number, parallel, sub_core_number], List(Int())) if base_core_number + ( 0 if self.temci_in_base_set else 1) + sub_core_number > self.av_cores and self.active: raise ValueError( "Invalid values for base_core_number and sub_core_number " "on system with just {} cores.{}".format( self.av_cores, "" if self.temci_in_base_set else "Note: temci needs a cpuset too.")) av_cores_for_par = self.av_cores - base_core_number - ( 0 if self.temci_in_base_set else 1) if parallel: return av_cores_for_par // sub_core_number return 1
def cmd_option(option: t.Union[CmdOption, CmdOptionList], name_prefix: str = None) \ -> t.Callable[[t.Callable], t.Callable]: """ Wrapper around click.option that works with CmdOption objects. If option is a list of CmdOptions then the type_scheme_option decorators are chained. Also supports nested lists in the same manner. :param option: CmdOption or (possibly nested) list of CmdOptions :param name_prefix: prefix of all options :return: click.option(...) like decorator """ typecheck(option, T(CmdOption) | T(CmdOptionList)) name_prefix = name_prefix or "" typecheck(name_prefix, Str()) if isinstance(option, CmdOption): return type_scheme_option(option_name=name_prefix + option.option_name, type_scheme=option.type_scheme, short=option.short, is_flag=option.is_flag, callback=option.callback, with_default=option.has_default, default=option.default ) def func(f: t.Callable): name = f.__name__ #args = f.__arguments__ annotations = f.__annotations__ module = f.__module__ doc = f.__doc__ qname = f.__qualname__ for opt in sorted(option.options): f = cmd_option(opt, name_prefix)(f) f.__name__ = name[0:-2] if name.endswith("_") else name f.__qualname__ = qname[0:-2] if qname.endswith("_") else qname #f.__args__ = args f.__annotations__ = annotations f.__module__ = module f.__doc__ = doc return f return func
def __setitem__(self, key: t.Union[Key, KeySubKey], value: dict): key, subkey = self._key_subkey(key, normalize=False) key_n = self._normalize_key(key) if key_n not in self._data: #from temci.package.action import Action self._data[key_n] = {"value": {}, "entry_type": "any"} if not isinstance(key, str): if key.name not in self._entry_types: self.add_entry_type(key.name, key.db_entry_type) self._data[key_n]["entry_type"] = key.name if key.db_entry_type.has_default(): self._data[key_n]["value"] = key.db_entry_type.get_default( ) entry_type = self._entry_types[self._data[key_n]["entry_type"]] if subkey: typecheck_locals(value=entry_type[subkey]) self._data[key_n]["value"][subkey] = value else: val = entry_type.get_default() if entry_type.has_default() else {} val.update(value) typecheck(val, entry_type) self._data[key_n]["value"] = val
def __init__(self, runs: t.List[RunData], tester: Tester = None, external_count: int = 0, property_descriptions: t.Dict[str, str] = None): """ Don't use the constructor use init_from_dicts if possible. :param runs: list of run data objects :param tester: used tester or tester that is set in the settings :param external_count: Number of external program blocks (blocks for which the data was obtained in a different benchmarking session) :param property_descriptions: mapping of some properties to their descriptions or longer versions """ self.tester = tester or TesterRegistry.get_for_name(TesterRegistry.get_used(), # type: Tester Settings()["stats/uncertainty_range"]) """ Used statistical tester """ typecheck(runs, List(T(RunData))) self.runs = runs # type: t.List[RunData] """ Data of serveral runs from several measured program blocks """ self.external_count = external_count # type: int """ Number of external program blocks (blocks for which the data was obtained in a different benchmarking session) """ self.property_descriptions = property_descriptions or {} # type: t.Dict[str, str]
def __setitem__(self, key: t.Union[Key, KeySubKey], value: dict): key, subkey = self._key_subkey(key, normalize=False) key_n = self._normalize_key(key) if key_n not in self._data: #from temci.package.action import Action self._data[key_n] = { "value": {}, "entry_type": "any" } if not isinstance(key, str): if key.name not in self._entry_types: self.add_entry_type(key.name, key.db_entry_type) self._data[key_n]["entry_type"] = key.name if key.db_entry_type.has_default(): self._data[key_n]["value"] = key.db_entry_type.get_default() entry_type = self._entry_types[self._data[key_n]["entry_type"]] if subkey: typecheck_locals(value=entry_type[subkey]) self._data[key_n]["value"][subkey] = value else: val = entry_type.get_default() if entry_type.has_default() else {} val.update(value) typecheck(val, entry_type) self._data[key_n]["value"] = val
def init_from_dicts(cls, runs: t.List[t.Union[t.Dict[str, str], t.Dict[str, t.List[Number]]]] = None, external: bool = False, included_blocks: str = None) -> 'RunDataStatsHelper': """ Expected structure of the stats settings and the runs parameter:: "stats": { "tester": ..., "properties": ["prop1", ...], # or "properties": ["prop1", ...], "uncertainty_range": (0.1, 0.3) } "runs": [ {"attributes": {"attr1": ..., ..., ["description": …], ["tags": …]}, "data": {"__ov-time": [...], ...}, "error": {"return_code": …, "output": "…", "error_output": "…"}, "internal_error": {"message": "…"} (either "error" or "internal_error" might be present) ["property_descriptions": {"__ov-time": "Overall time", …}] ["env_info": … ] }, ... ] :param runs: list of dictionaries representing the benchmarking runs for each program block :param external: are the passed runs not from this benchmarking session but from another? :param included_blocks: include query :raises ValueError: if runs parameter has an incorrect structure :return: created stats helper """ typecheck(runs, List( Dict( { "data": Dict(key_type=Str(), value_type=List(Int() | Float()), unknown_keys=True) | NonExistent(), "run_config": Dict(unknown_keys=True) }, unknown_keys=True) | RunData.block_type_scheme | RunData.property_descriptions_scheme | RunData.env_info_scheme), value_name="runs parameter") run_datas = [] runs = runs or [] # type: t.List[dict] prop_descrs = {} # type: t.Dict[str, str] env_info = [] for run in runs: props = {} if "property_descriptions" in run: prop_descrs.update(run["property_descriptions"]) elif "env_info" in run: env_info = run["env_info"] else: if "data" not in run: run["data"] = {} error = None if "error" in run: error = RecordedProgramError(run["error"]["message"], run["error"]["output"], run["error"]["error_output"], run["error"]["return_code"]) elif "internal_error" in run: error = RecordedInternalError( run["internal_error"]["message"]) run_datas.append( RunData(run["data"], run["attributes"] if "attributes" in run else {}, recorded_error=error, external=external)) return RunDataStatsHelper( run_datas, external_count=len(run_datas) if external else 0, property_descriptions=prop_descrs, included_blocks=included_blocks, env_info=env_info)
def __init__( self, active: bool = has_root_privileges(), base_core_number: int = None, parallel: int = None, sub_core_number: int = None, ): """ Initializes the cpu sets an determines the number of parallel programs (parallel_number variable). :param active: are cpu sets actually used? :param base_core_number: number of cpu cores for the base (remaining part of the) system :param parallel: 0: benchmark sequential, > 0: benchmark parallel with n instances, -1: determine n automatically :param sub_core_number: number of cpu cores per parallel running program :raises ValueError: if the passed parameters don't work together on the current platform :raises EnvironmentError: if the environment can't be setup properly (e.g. no root privileges) """ # self.bench_set = "bench.set" self.active = active and has_root_privileges() # type: bool """ Are cpu sets actually used? """ self.base_core_number = Settings().default(base_core_number, "run/cpuset/base_core_number") # type: int """ Number of cpu cores for the base (remaining part of the) system """ self.parallel = Settings().default(parallel, "run/cpuset/parallel") # type: int """ 0: benchmark sequential, > 0: benchmark parallel with n instances, -1: determine n automatically """ self.sub_core_number = Settings().default(sub_core_number, "run/cpuset/sub_core_number") # type: int """ Number of cpu cores per parallel running program """ self.av_cores = len(self._cpus_of_set("")) if active else multiprocessing.cpu_count() # zype: int """ Number of available cpu cores """ self.parallel_number = 0 # type: int """ Number of used parallel instances, zero if the benchmarking is done sequentially """ if self.parallel != 0: if self.parallel == -1: self.parallel_number = self._number_of_parallel_sets(self.base_core_number, True, self.sub_core_number) else: self.parallel_number = self.parallel if ( self.parallel > self._number_of_parallel_sets(self.base_core_number, True, self.sub_core_number) and self.active ): raise ValueError( "Invalid values for base_core_number and sub_core_number " "on system with just {} cores. Note: The benchmark controller " "needs a cpuset too.".format(self.av_cores) ) self.base_core_number = self.av_cores - self.sub_core_number * self.parallel_number - 1 if not active: if not has_root_privileges(): logging.warning("CPUSet functionality is disabled because root privileges are missing.") return logging.info("Initialize CPUSet") typecheck(self.base_core_number, PositiveInt()) typecheck(self.parallel_number, NaturalNumber()) self.own_sets = [SUB_BENCH_SET.format(i) for i in range(0, self.parallel_number)] + [ CONTROLLER_SUB_BENCH_SET, NEW_ROOT_SET, BENCH_SET, ] try: self._init_cpuset() except BaseException: logging.error("Forced teardown of CPUSet") self.teardown() raise logging.info("Finished initializing CPUSet")
def __init__(self, runs: t.List[dict] = None, append: bool = None, show_report: bool = None): """ Creates an instance and setup everything. :param runs: list of dictionaries that represent run program blocks if None Settings()["run/in"] is used :param append: append to the old benchmarks if there are any in the result file? :param show_report: show a short report after finishing the benchmarking? """ if runs is None: typecheck(Settings()["run/in"], ValidYamlFileName()) with open(Settings()["run/in"], "r") as f: runs = yaml.load(f) typecheck(runs, List(Dict({ "attributes": Dict(all_keys=False, key_type=Str()), "run_config": Dict(all_keys=False) }))) self.runs = runs # type: t.List[dict] """ List of dictionaries that represent run program blocks """ self.run_blocks = [] # type: t.List[RunProgramBlock] """ Run program blocks for each dictionary in ``runs```""" for (id, run) in enumerate(runs): self.run_blocks.append(RunProgramBlock.from_dict(id, copy.deepcopy(run))) self.append = Settings().default(append, "run/append") # type: bool """ Append to the old benchmarks if there are any in the result file? """ self.show_report = Settings().default(show_report, "run/show_report") # type: bool """ Show a short report after finishing the benchmarking? """ self.stats_helper = None # type: RunDataStatsHelper """ Used stats helper to help with measurements """ typecheck(Settings()["run/out"], FileName()) if self.append: run_data = [] try: if os.path.exists(Settings()["run/out"]): with open(Settings()["run/out"], "r") as f: run_data = yaml.load(f) self.stats_helper = RunDataStatsHelper.init_from_dicts(run_data, external=True) for run in runs: self.stats_helper.runs.append(RunData(attributes=run["attributes"])) except: self.teardown() raise else: self.stats_helper = RunDataStatsHelper.init_from_dicts(copy.deepcopy(runs)) #if Settings()["run/remote"]: # self.pool = RemoteRunWorkerPool(Settings()["run/remote"], Settings()["run/remote_port"]) if os.path.exists(Settings()["run/out"]): os.remove(Settings()["run/out"]) self.pool = None # type: AbstractRunWorkerPool """ Used run worker pool that abstracts the benchmarking """ if Settings()["run/cpuset/parallel"] == 0: self.pool = RunWorkerPool() else: self.pool = ParallelRunWorkerPool() self.run_block_size = Settings()["run/run_block_size"] # type: int """ Number of benchmarking runs that are done together """ self.discarded_runs = Settings()["run/discarded_runs"] # type: int """ First n runs that are discarded """ self.max_runs = Settings()["run/max_runs"] # type: int """ Maximum number of benchmarking runs """ self.min_runs = Settings()["run/min_runs"] # type: int """ Minimum number of benchmarking runs """ if self.min_runs > self.max_runs: logging.warning("min_runs ({}) is bigger than max_runs ({}), therefore they are swapped." .format(self.min_runs, self.max_runs)) tmp = self.min_runs self.min_runs = self.max_runs self.max_runs = tmp self.shuffle = Settings()["run/shuffle"] # type: bool """ Randomize the order in which the program blocks are benchmarked. """ self.fixed_runs = Settings()["run/runs"] != -1 # type: bool """ Do a fixed number of benchmarking runs? """ if self.fixed_runs: self.min_runs = self.max_runs = self.min_runs = Settings()["run/runs"] self.start_time = round(time.time()) # type: float """ Unix time stamp of the start of the benchmarking """ self.end_time = None # type: float """ Unix time stamp of the point in time that the benchmarking can at most reach """ try: self.end_time = self.start_time + pytimeparse.parse(Settings()["run/max_time"]) except: self.teardown() raise self.store_often = Settings()["run/store_often"] # type: bool """ Store the result file after each set of blocks is benchmarked """ self.block_run_count = 0 # type: int """ Number of benchmarked blocks """ self.erroneous_run_blocks = [] # type: t.List[t.Tuple[int, BenchmarkingResultBlock]] """ List of all failing run blocks (id and results till failing) """
def __init__(self, option_name: str, settings_key: str = None, type_scheme: Type = None, short: str = None, completion_hints: t.Dict[str, t.Any] = None, is_flag: bool = None): """ Initializes a option either based on a setting (via settings key) or on a type scheme. If this is backed by a settings key, the setting is automatically set. If is_flag is None, it is set True if type_scheme is an instance of Bool() or BoolOrNone() :param option_name: name of the option :param settings_key: settings key of the option :param type_scheme: type scheme with default value :param short: short version of the option (ignored if is_flag=True) :param completion_hints: additional completion hints (dict with keys for each shell) :param is_flag: is the option a "--ABC/--no-ABC" flag like option? """ typecheck(option_name, Str()) self.option_name = option_name # type: str """ Name of this option """ self.settings_key = settings_key # type: t.Optional[str] """ Settings key of this option """ self.short = short # type: t.Optional[str] """ Short version of the option (ignored if is_flag=True) """ self.completion_hints = completion_hints # type: t.Optional[t.Dict[str, t.Any]] """ Additional completion hints (dict with keys for each shell) """ if (settings_key is None) == (type_scheme is None): raise ValueError("settings_key and type_scheme are both None (or not None)") self.type_scheme = type_scheme # type: Type """ Type scheme with default value """ if not self.type_scheme: self.type_scheme = Settings().get_type_scheme(settings_key) #self.callback = lambda a, b: None #""" Callback that sets the setting """ self.callback = None # type: t.Optional[t.Callable[[click.Option, t.Any], None]] """ Callback that sets the setting """ if type_scheme is not None and not isinstance(type_scheme, click.ParamType): self.callback = lambda a, b: None if settings_key is not None and not isinstance(self.type_scheme, click.ParamType): def callback(param: click.Option, val): try: Settings()[settings_key] = val except SettingsError as err: logging.error("Error while processing the passed value ({val}) of option {opt}: {msg}".format( val=repr(val), opt=option_name, msg=str(err) )) exit(1) self.callback = callback else: self.callback = None self.description = self.type_scheme.description.strip().split("\n")[0] # type: str """ Description of this option """ self.has_description = self.description not in [None, ""] # type: bool """ Does this option has a description? """ if not self.has_description: warnings.warn("Option {} is without documentation.".format(option_name)) self.has_default = True # type: bool """ Does this option has a default value? """ self.default = None # type: t.Any """ Default value of this option """ try: self.default = self.type_scheme.get_default() except ValueError: self.has_default = False if settings_key: self.default = Settings()[settings_key] if hasattr(self.type_scheme, "completion_hints") and self.completion_hints is None: self.completion_hints = self.type_scheme.completion_hints self.is_flag = is_flag is True or (is_flag is None and type(self.type_scheme) in [Bool, BoolOrNone]) # type: bool """ Is this option flag like? """ if self.is_flag: self.completion_hints = None self.short = None def callback(param, val): if val is not None: try: Settings()[settings_key] = val except SettingsError as err: logging.error("Error while processing the passed value ({val}) of option {opt}: {msg}".format( val=val, opt=option_name, msg=str(err) )) return val self.callback = callback self.has_completion_hints = self.completion_hints is not None # type: bool """ Does this option has completion hints? """ self.has_short = short is not None # type: bool """ Does this option has a short version? """
def __lt__(self, other) -> bool: """ Compare by option_name. """ typecheck(other, CmdOption) return self.option_name < other.option_name
def func(decorated_func): used_raw_type = None multiple = False type_scheme = __type_scheme _type_scheme = type_scheme while isinstance(type_scheme, Either): type_scheme = type_scheme.types[0] while isinstance(type_scheme, Constraint) or isinstance(type_scheme, NonErrorConstraint): type_scheme = type_scheme.constrained_type if isinstance(type_scheme, List) or isinstance(type_scheme, ListOrTuple): multiple = True type_scheme = type_scheme.elem_type if isinstance(type_scheme, click.ParamType): used_raw_type = type_scheme elif isinstance(type_scheme, ExactEither): used_raw_type = click.Choice(type_scheme.exp_values) elif isinstance(type_scheme, Exact): used_raw_type = click.Choice(type_scheme.exp_value) elif isinstance(type_scheme, Tuple): used_raw_type = tuple([raw_type(x) for x in type_scheme.elem_types]) elif isinstance(type_scheme, Any): used_raw_type = object elif isinstance(type_scheme, T): used_raw_type = type_scheme.native_type elif isinstance(type_scheme, Str): used_raw_type = str else: used_raw_type = raw_type(type_scheme) option_args = { "type": used_raw_type, "callback": None, "multiple": multiple } if has_default: option_args["default"] = default_value option_args["show_default"] = True #else: # option_args["show_default"] = False if not isinstance(option_args["type"], click.ParamType): option_args["callback"] = validate(_type_scheme) if not isinstance(option_args["type"], Either(T(tuple), T(str))): option_args["type"] = raw_type(option_args["type"]) if callback is not None: if option_args["callback"] is None: option_args["callback"] = lambda ctx, param, value: callback(param, value) else: old_callback = option_args["callback"] option_args["callback"] = lambda ctx, param, value: callback(param, old_callback(ctx, param, value)) if is_flag: option_args["is_flag"] = True #print(type(option_args["callback"]), option_name, type_scheme) opt = None if help_text is not None: typecheck(help_text, Str()) option_args["help"] = help_text if is_flag: del(option_args["type"]) opt = click.option("--{name}/--no-{name}".format(name=option_name), **option_args)(decorated_func) if __short is not None: opt = click.option("--{}".format(option_name), "-" + __short, **option_args)(decorated_func) else: opt = click.option("--{}".format(option_name), **option_args)(decorated_func) return opt
def get_sub_set(self, set_id: int) -> str: """ Gets the name of the benchmarking cpu set with the given id / number (starting at zero). """ if self.active: typecheck(set_id, Int(range=range(0, self.parallel_number))) return SUB_BENCH_SET.format(set_id)
def __init__(self, option_name: str, settings_key: str = None, type_scheme: Type = None, short: str = None, completion_hints: t.Dict[str, t.Any] = None, is_flag: bool = None): """ Initializes a option either based on a setting (via settings key) or on a type scheme. If this is backed by a settings key, the setting is automatically set. If is_flag is None, it is set True if type_scheme is an instance of Bool() or BoolOrNone() :param option_name: name of the option :param settings_key: settings key of the option :param type_scheme: type scheme with default value :param short: short version of the option (ignored if is_flag=True) :param completion_hints: additional completion hints (dict with keys for each shell) :param is_flag: is the option a "--ABC/--no-ABC" flag like option? """ typecheck(option_name, Str()) self.option_name = option_name # type: str """ Name of this option """ self.settings_key = settings_key # type: t.Optional[str] """ Settings key of this option """ self.short = short # type: t.Optional[str] """ Short version of the option (ignored if is_flag=True) """ self.completion_hints = completion_hints # type: t.Optional[t.Dict[str, t.Any]] """ Additional completion hints (dict with keys for each shell) """ if (settings_key is None) == (type_scheme is None): raise ValueError( "settings_key and type_scheme are both None (or not None)") self.type_scheme = type_scheme # type: Type """ Type scheme with default value """ if not self.type_scheme: self.type_scheme = Settings().get_type_scheme(settings_key) #self.callback = lambda a, b: None #""" Callback that sets the setting """ self.callback = None # type: t.Optional[t.Callable[[click.Context, click.Option, t.Any], None]] """ Callback that sets the setting """ if type_scheme is not None and not isinstance(type_scheme, click.ParamType): self.callback = lambda a, b: None if settings_key is not None and ( not isinstance(self.type_scheme, click.ParamType) or isinstance(self.type_scheme, Type)): def callback(context: Context, param: click.Option, val): try: if context.get_parameter_source( param.name) != ParameterSource.DEFAULT: Settings().set(settings_key, val, validate=False) except SettingsError as err: logging.error( "Error while processing the passed value ({val}) of option {opt}: {msg}" .format(val=repr(val), opt=option_name, msg=str(err))) logging.debug("".join( traceback.format_exception(None, err, err.__traceback__))) exit(1) self.callback = callback else: self.callback = None self.description = self.type_scheme.description.strip().split("\n")[ 0] # type: str """ Description of this option """ self.has_description = self.description not in [None, ""] # type: bool """ Does this option has a description? """ if not self.has_description: warnings.warn( "Option {} is without documentation.".format(option_name)) self.has_default = True # type: bool """ Does this option has a default value? """ self.default = None # type: t.Any """ Default value of this option """ try: self.default = self.type_scheme.get_default() except ValueError: self.has_default = False if settings_key: self.default = Settings()[settings_key] if hasattr(self.type_scheme, "completion_hints") and self.completion_hints is None: self.completion_hints = self.type_scheme.completion_hints self.is_flag = is_flag is True or ( is_flag is None and type(self.type_scheme) in [Bool, BoolOrNone] ) # type: bool """ Is this option flag like? """ if self.is_flag and settings_key is not None: self.completion_hints = None self.short = None def callback(context: Context, param, val): if val is not None and context.get_parameter_source( param.name) != ParameterSource.DEFAULT: try: Settings().set(settings_key, val, validate=False) except SettingsError as err: logging.error( "Error while processing the passed value ({val}) of option {opt}: {msg}" .format(val=val, opt=option_name, msg=str(err))) return val self.callback = callback self.has_completion_hints = self.completion_hints is not None # type: bool """ Does this option has completion hints? """ self.has_short = short is not None # type: bool """ Does this option has a short version? """
def func(decorated_func): used_raw_type = None multiple = False type_scheme = __type_scheme _type_scheme = type_scheme while isinstance(type_scheme, Either): type_scheme = type_scheme.types[0] while isinstance(type_scheme, Constraint) or isinstance( type_scheme, NonErrorConstraint): type_scheme = type_scheme.constrained_type if isinstance(type_scheme, List) or isinstance(type_scheme, ListOrTuple): multiple = True type_scheme = type_scheme.elem_type if isinstance(type_scheme, click.ParamType): used_raw_type = type_scheme elif isinstance(type_scheme, ExactEither): used_raw_type = click.Choice(type_scheme.exp_values) elif isinstance(type_scheme, Exact): used_raw_type = click.Choice(type_scheme.exp_value) elif isinstance(type_scheme, Tuple): used_raw_type = tuple( [raw_type(x) for x in type_scheme.elem_types]) elif isinstance(type_scheme, Any): used_raw_type = object elif isinstance(type_scheme, T): used_raw_type = type_scheme.native_type elif isinstance(type_scheme, Str): used_raw_type = str else: used_raw_type = raw_type(type_scheme) option_args = { "type": used_raw_type, "callback": None, "multiple": multiple } if has_default: option_args["default"] = default_value option_args["show_default"] = True #else: # option_args["show_default"] = False if not isinstance(option_args["type"], click.ParamType): option_args["callback"] = validate(_type_scheme) if not isinstance(option_args["type"], Either(T(tuple), T(str))): option_args["type"] = raw_type(option_args["type"]) if callback is not None: if option_args["callback"] is None: option_args["callback"] = lambda ctx, param, value: callback( ctx, param, value) else: old_callback = option_args["callback"] option_args["callback"] = lambda ctx, param, value: callback( ctx, param, old_callback(ctx, param, value)) if validate_settings: without_check = option_args["callback"] option_args["callback"] = lambda ctx, param, value: Settings( ).validate() or without_check(ctx, param, value) if is_flag: option_args["is_flag"] = True #print(type(option_args["callback"]), option_name, type_scheme) opt = None if help_text is not None: typecheck(help_text, Str()) option_args["help"] = help_text if is_flag: del (option_args["type"]) opt = click.option("--{name}/--no-{name}".format(name=option_name), **option_args)(decorated_func) if __short is not None: opt = click.option("--{}".format(option_name), "-" + __short, **option_args)(decorated_func) else: opt = click.option("--{}".format(option_name), **option_args)(decorated_func) return opt