def is_uncertain(self, p_val: float) -> bool: """ Does the passed probability of the null hypothesis for two samples lie in the uncertainty range? :param p_val: passed probability of the null hypothesis """ return min(*Settings()["stats/uncertainty_range"]) <= p_val <= max( *Settings()["stats/uncertainty_range"])
def store_and_teardown(self): """ Teardown everything, store the result file, print a short report and send an email if configured to do so. """ self.teardown() if not self.pool.run_driver.store_files: return self.store() if len(self.stats_helper.valid_runs()) > 0 \ and all(x.benchmarks() > 0 for x in self.stats_helper.valid_runs()): report = "" if not in_standalone_mode: report = ReporterRegistry.get_for_name("console", self.stats_helper)\ .report(with_tester_results=False, to_string=True) subject = "Finished " + join_strs([repr(run.description()) for run in self.stats_helper.valid_runs()]) send_mail(Settings()["run/send_mail"], subject, report, [Settings()["run/out"]]) if self.recorded_error(): descrs = [] msgs = [] for (i, result) in self.erroneous_run_blocks: descr = self.run_blocks[i].description() descrs.append(descr) msg = descr + ":\n\t" + "\n\t".join(str(result.error).split("\n")) msgs.append(msg) subject = "Errors while benchmarking " + join_strs(descrs) send_mail(Settings()["run/send_mail"], subject, "\n\n".join(msgs), [Settings()["run/in"] + ".erroneous.yaml"])
def _func2(run_file, **kwargs): Settings()["run/driver"] = driver Settings()["run/in"] = run_file try: RunProcessor().benchmark() except KeyboardInterrupt: logging.error("KeyboardInterrupt. Cleaned up everything.")
def from_non_plugin_settings(cls, settings_domain: str, exclude: t.List[Str] = None, name_prefix: str = None) -> 'CmdOptionList': """ Creates a list of CmdOption object from all sub settings (in the settings domain). It excludes all sub settings that are either in the exclude list or end with "_active" or "_misc" (used for plugin settings). Also every setting that is of type Dict is ignored. :param settings_domain: settings domain to look into (or "" for the root domain) :param exclude: list of sub keys to exclude :return: list of CmdOptions """ exclude = exclude or [] name_prefix = name_prefix or "" typecheck_locals(settings_domain=str, exclude=List(Str()), name_prefix=Str()) domain = Settings().type_scheme if settings_domain != "": domain = Settings().get_type_scheme(settings_domain) ret_list = [] if isinstance(domain, Obsolete): return CmdOptionList() for sub_key in domain.data: if domain.is_obsolete(sub_key): continue if sub_key not in exclude and all(not sub_key.endswith(suf) for suf in ["_active", "_misc"]) \ and not isinstance(domain[sub_key], Dict): ret_list.append( CmdOption(option_name=name_prefix + sub_key, settings_key=settings_domain + "/" + sub_key if settings_domain != "" else sub_key)) return CmdOptionList(*ret_list)
def __init__(self, cmd: str, working_dir: str = normalize_path("."), send_mail: bool = None, mail_address: str = None, mail_header: str = None): """ Creates an instance. :param cmd: command to execute :param working_dir: directory in which the command is executed :param send_mail: send a mail after the execution of the program? :param mail_address: recipient of the mail :param mail_header: header of the mail """ super().__init__() if send_mail == None: send_mail = Settings()["package/send_mail"] != "" if mail_address == None: mail_address = Settings()["package/send_mail"] if mail_address == "": mail_address = None assert mail_address or not send_mail self.cmd = cmd # type: str """ Command to execute """ self.working_dir = working_dir # type: str """ Directory in which the command is executed """ self.send_mail = send_mail # type: bool """ Send a mail after the execution of the program? """ self.mail_address = mail_address # type: str """ Recipient of the mail """ self.mail_header = mail_header or "Executed command {!r}".format( self.cmd) # type: str """ Header of the mail """ self.typecheck()
def store(self): """ Store the result file """ try: self.stats_helper.add_property_descriptions(self.pool.run_driver.get_property_descriptions()) except (IOError, OSError) as ex: logging.error(ex) if (len(self.stats_helper.valid_runs()) > 0 and all(x.benchmarks() > 0 for x in self.stats_helper.valid_runs())) \ or Settings()["run/record_errors_in_file"]: with open(Settings()["run/out"], "w") as f: self.stats_helper.update_env_info(), f.write(yaml.dump(self.stats_helper.serialize())) chown(f)
def temci__short__shell(command: str, **kwargs): Settings()["run/driver"] = "shell" Settings()["run/runs"] = 1 Settings()["run/discarded_runs"] = 0 Settings()["run/cpuset/parallel"] = 0 benchmark_and_exit([{ "run_config": { "run_cmd": command }, "attributes": { "description": command } }])
def get_env_setting() -> Dict[str, str]: env = Settings()["env"].copy() if env["USER"] == "": env["USER"] = get_bench_user() if env["PATH"] == "": env["PATH"] = os.getenv("PATH", "") return env
def from_registry(cls, registry: type, name_prefix: str = None) -> 'CmdOptionList': """ Creates a list of CmdOption objects from an registry. It creates an activation flag (--OPT/--no-OPT) for each registered plugin and creates for each plugin preference an option with name OPT_PREF. Deeper nesting is intentionally not supported. :param registry: used registry :param name_prefix: prefix of each option name (usable to avoid ambiguity problems) :return: list of CmdOptions """ assert issubclass(registry, AbstractRegistry) typecheck_locals(name_prefix=Str()|E(None)) name_prefix = name_prefix if name_prefix is not None else "" ret_list = CmdOptionList() for plugin in registry.registry: active_key = "{}_active".format("/".join([registry.settings_key_path, plugin])) ret_list.append(CmdOption( option_name=name_prefix + plugin, settings_key=active_key )) misc_key = "{}_misc".format("/".join(registry.settings_key_path.split("/") + [plugin])) misc = Settings().get_type_scheme(misc_key) typecheck(misc, Dict) for misc_sub_key in misc.data: misc_sub = misc[misc_sub_key] if not isinstance(misc_sub, Dict): ret_list.append(CmdOption( option_name="{}{}_{}".format(name_prefix, plugin, misc_sub_key), settings_key="{}/{}".format(misc_key, misc_sub_key) )) return ret_list
def temci__short__exec(with_description: list = None, without_description: list = None, **kwargs): runs = [] if with_description is not None: for (descr, cmd) in with_description: runs.append({ "run_config": { "run_cmd": [cmd] }, "attributes": { "description": descr } }) if without_description is not None: for cmd in without_description: runs.append({ "run_config": { "run_cmd": [cmd] }, "attributes": { "description": cmd } }) Settings()["run/driver"] = "exec" try: RunProcessor(runs).benchmark() except KeyboardInterrupt: logging.error("KeyboardInterrupt. Cleaned up everything.")
def _setup_block(self, block: RunProgramBlock): if isinstance(block["run_cmd"], List(Str())): block["run_cmds"] = block["run_cmd"] else: block["run_cmds"] = [block["run_cmd"]] if isinstance(block["cwd"], List(Str())): if len(block["cwd"]) != len(block["run_cmd"]) and not isinstance( block["run_cmd"], str): raise ValueError( "Number of passed working directories {} " "is unequal with number of passed run commands {}".format( len(block["cwd"]), len(block["run_cmd"]))) block["cwds"] = block["cwd"] else: block["cwds"] = [block["cwd"]] * len(block["run_cmds"]) self.uses_vcs = block["revision"] != -1 self.vcs_driver = None self.tmp_dir = "" if self.uses_vcs and block.id not in self._dirs: self.vcs_driver = VCSDriver.get_suited_vcs(".") self.tmp_dir = os.path.join( Settings()["tmp_dir"], datetime.datetime.now().strftime("%s%f")) os.mkdir(self.tmp_dir) self._dirs[block.id] = os.path.join(self.tmp_dir, str(block.id)) os.mkdir(self._dirs[block.id]) self.vcs_driver.copy_revision(block["revision"], ".", self._dirs[block.id]) block["working_dir"] = self._dirs[block.id] if self.misc_settings["runner"] != "": block["runner"] = self.misc_settings["runner"] super()._setup_block(block)
def temci__short__exec(commands: list, with_description: list = None, without_description: list = None, **kwargs): runs = [] def create_run_config(cmd: str) -> dict: res = {"run_cmd": [cmd]} return res if with_description is not None: for (descr, cmd) in with_description: runs.append({ "run_config": create_run_config(cmd), "attributes": { "description": descr } }) for cmd in commands + (list(without_description) or []): runs.append({ "run_config": create_run_config(cmd), "attributes": { "description": cmd } }) for run in runs: con = run["run_config"] if "$ARGUMENT" not in con["run_cmd"]: con["run_cmd"][0] += " $ARGUMENT" Settings()["run/driver"] = "exec" benchmark_and_exit(runs)
def __init__(self, number: Number, rel_deviation: Number = None, abs_deviation: Number = None, is_percent: bool = None, scientific_notation: bool = None, parentheses_mode: t.Union[str, ParenthesesMode] = None, parentheses: bool = None): from temci.utils.settings import Settings self.settings = Settings()["report/number"] self.number = number # type: Number assert not (rel_deviation is not None and abs_deviation is not None) self.deviation = None # type: t.Optional[Number] """ Relative deviation """ if abs_deviation is not None: if number != 0: self.deviation = abs(abs_deviation / number) else: self.deviation = 0 elif rel_deviation is not None: self.deviation = abs(rel_deviation) self.is_percent = is_percent if is_percent is not None else self.settings[ "percentages"] self.scientific_notation = scientific_notation if scientific_notation is not None \ else self.settings["scientific_notation"] self.parentheses_mode = ParenthesesMode.map(parentheses_mode if parentheses_mode is not None \ else self.settings["parentheses_mode"]) self.parentheses = parentheses if parentheses is not None \ else self.settings["parentheses"]
def __init__(self, runs: t.List[RunData], tester: Tester = None, external_count: int = 0, property_descriptions: t.Dict[str, str] = None): """ Don't use the constructor use init_from_dicts if possible. :param runs: list of run data objects :param tester: used tester or tester that is set in the settings :param external_count: Number of external program blocks (blocks for which the data was obtained in a different benchmarking session) :param property_descriptions: mapping of some properties to their descriptions or longer versions """ self.tester = tester or TesterRegistry.get_for_name( TesterRegistry.get_used(), # type: Tester Settings()["stats/uncertainty_range"]) """ Used statistical tester """ typecheck(runs, List(T(RunData))) self.runs = runs # type: t.List[RunData] """ Data of serveral runs from several measured program blocks """ self.external_count = external_count # type: int """ Number of external program blocks (blocks for which the data was obtained in a different benchmarking session) """ self.property_descriptions = property_descriptions or { } # type: t.Dict[str, str]
def benchmark(self): """ Benchmark and teardown. """ try: show_progress = Settings().has_log_level("info") and \ ("exec" != RunDriverRegistry.get_used() or "start_stop" not in ExecRunDriver.get_used()) showed_progress_before = False discard_label = "Make the {} discarded benchmarks".format( self.discarded_runs) if self.fixed_runs: label = "Benchmark {} times".format(self.max_runs) else: label = "Benchmark {} to {} times".format( self.min_runs, self.max_runs) start_label = discard_label if self.discarded_runs > 0 else label label_format = "{:32s}" if show_progress: with click.progressbar( range(0, self.max_runs + self.discarded_runs), label=label_format.format(start_label)) as runs: for run in runs: if run < self.discarded_runs: self._benchmarking_block_run(block_size=1, discard=True) else: if self._finished(): break self._benchmarking_block_run() if run == self.discarded_runs - 1: runs.label = label_format.format(label) else: time_per_run = self._make_discarded_runs() last_round_time = time.time() if time_per_run != None: last_round_time -= time_per_run * self.run_block_size while not self._finished(): self._benchmarking_block_run() except BaseException as ex: logging.error("Forced teardown of RunProcessor") self.store_and_teardown() if isinstance(ex, KeyboardInterrupt) and Settings()["log_level"] == "info" and self.block_run_count > 0\ and self.show_report: self.print_report() raise self.store_and_teardown()
def _fail(self, message: str): """ Fail with the given error message and send an error mail if configured to do so :param message: given error message """ logging.error(message) send_mail(Settings()["package/send_mail"], "Error", message) exit(1)
def temci__build(build_file: str, **kwargs): try: Settings()["build/in"] = build_file BuildProcessor().build() except KeyboardInterrupt: logging.error("Aborted") except BaseException as err: print(err) logging.error(str(err))
def store_in_db(self, db: Database): """ Serializes this instance into a file and includes this file in the passed database. :param db: passed database """ filename = os.path.join(Settings()["tmp_dir"], "actions.yaml") self.store_in_file(filename) db.store_file("actions", "file", filename) os.remove(filename)
def callback(param: click.Option, val): try: Settings()[settings_key] = val except SettingsError as err: logging.error("Error while processing the passed value ({val}) of option {opt}: {msg}".format( val=repr(val), opt=option_name, msg=str(err) )) exit(1)
def __init__(self, seconds: int = Settings()["package/actions/sleep"]): """ Creates an instance. :param seconds: seconds to sleep """ super().__init__() self.seconds = seconds # type: int """ Seconds to sleep """ self.typecheck()
def load_from_db(self, db: Database): """ Load the actions from the passed database. :param db: passed database """ filename = os.path.join(Settings()["tmp_dir"], "actions.yaml") db.retrieve_file("actions", "file", filename) self.load_from_file(filename) os.remove(filename)
def __init__(self, runs: t.List[RunData], tester: Tester = None, external_count: int = 0, property_descriptions: t.Dict[str, str] = None, errorneous_runs: t.List[RunData] = None, included_blocks: str = None, env_info: FORMATTED_ENV_INFO = None): """ Don't use the constructor use init_from_dicts if possible. :param runs: list of run data objects :param tester: used tester or tester that is set in the settings :param external_count: Number of external program blocks (blocks for which the data was obtained in a different benchmarking session) :param property_descriptions: mapping of some properties to their descriptions or longer versions :param errorneous_runs: runs that resulted in errors :param included_blocks: include query :param env_info: formatted environment info """ self.tester = tester or TesterRegistry.get_for_name( TesterRegistry.get_used(), Settings()["stats/uncertainty_range"]) # type: Tester """ Used statistical tester """ typecheck(runs, List(T(RunData))) self.runs = filter_runs( runs, included_blocks or Settings()["report/included_blocks"]) # type: t.List[RunData] self.errorneous_runs = errorneous_runs or [ r for r in self.runs if r.has_error() ] self.runs = [ r for r in self.runs if not r.has_error() or (any( len(v) > 0 for v, p in r.data.items())) ] """ Data of serveral runs from several measured program blocks """ self.external_count = external_count # type: int """ Number of external program blocks (blocks for which the data was obtained in a different benchmarking session) """ self.property_descriptions = property_descriptions or { } # type: t.Dict[str, str] self.env_info = env_info or []
def callback(context: Context, param, val): if val is not None and context.get_parameter_source( param.name) != ParameterSource.DEFAULT: try: Settings().set(settings_key, val, validate=False) except SettingsError as err: logging.error( "Error while processing the passed value ({val}) of option {opt}: {msg}" .format(val=val, opt=option_name, msg=str(err))) return val
def callback(param, val): if val is not None: try: Settings()[settings_key] = val except SettingsError as err: logging.error("Error while processing the passed value ({val}) of option {opt}: {msg}".format( val=val, opt=option_name, msg=str(err) )) return val
def store(self): """ Store the result file """ try: self.stats_helper.add_property_descriptions( self.pool.run_driver.get_property_descriptions()) except (IOError, OSError) as ex: logging.error(ex) if len(self.stats_helper.valid_runs()) > 0 \ and all(x.benchmarks() > 0 for x in self.stats_helper.valid_runs()): with open(Settings()["run/out"], "w") as f: f.write(yaml.dump(self.stats_helper.serialize()))
def execute(self, db: Database): """ Execute this action. :param db: used database """ if Settings()["package/dry_run"]: msg = self._dry_run_message() if msg: logging.info("{} action: {}".format(self.name, msg)) else: self._execute(db)
def store(self, filename: str, compression_level: int = None): """ Store the whole database as a compressed archive under the given file name. :param filename: passed file name :param compression_level: used compression level, from -1 (low) to -9 (high) """ compression_level = compression_level or Settings( )["package/compression/level"] self._store_yaml() filename = abspath(filename) used_prog = "gzip" av_programs = ["pixz", "xz"] if Settings( )["package/compression/program"] == "xz" else ["pigz", "gzip"] for prog in av_programs: if does_command_succeed(prog + " --version"): used_prog = prog break cmd = "cd {dir}; XZ={l} GZIP={l} tar cf '{dest}' . --use-compress-program={prog}"\ .format(l=compression_level, dest=filename, dir=self.tmp_dir, prog=used_prog) res = subprocess.check_output(["/bin/sh", "-c", cmd])
def store_erroneous(self): """ Store the failing program blocks in a file ending with ``.erroneous.yaml``. """ if len(self.erroneous_run_blocks) == 0: return file_name = Settings()["run/in"] + ".erroneous.yaml" try: blocks = [self.runs[x[0]] for x in self.erroneous_run_blocks] with open(file_name, "w") as f: f.write(yaml.dump(blocks)) except IOError as err: logging.error("Can't write erroneous program blocks to " + file_name)
def callback(context: Context, param: click.Option, val): try: if context.get_parameter_source( param.name) != ParameterSource.DEFAULT: Settings().set(settings_key, val, validate=False) except SettingsError as err: logging.error( "Error while processing the passed value ({val}) of option {opt}: {msg}" .format(val=repr(val), opt=option_name, msg=str(err))) logging.debug("".join( traceback.format_exception(None, err, err.__traceback__))) exit(1)
def run(package_file: str, reverse_file: str = None): """ Execute the package and create a package that can be executed afterwards that reverses (most of the) made changes. :param package_file: name of the used package file :param reverse_file: name of the reverse package file or None if the setting ``package/reverse_file`` should be used. """ reverse_file = reverse_file or Settings()["package/reverse_file"] db = load(package_file) rev_db = Database() actions.reverse_and_store_all_in_db(rev_db) rev_db.store(reverse_file) rev_db.clean() actions.execute_all(db)