def run(self, *args, **kwargs) -> int: """ Benchmarks the execution 20 times and stores the last 10 results (to avoid side effects) in self.trigger.result. Runs at most 100 times before deciding the run is a failure. :param args: additional arguments :param kwargs: additional keyword arguments :return: 0|1 on success|failure """ logging.verbose(self.trigger.cmd) results = [] tries = 0 while len(results ) < self.expected_results and tries < self.maximum_tries: try: results += timeit.repeat(self.benchmark_helper, repeat=1, number=1) except subprocess.CalledProcessError: logging.warning("A trigger failed, retrying one more time") tries += 1 show_progress(len(results), self.expected_results, section="trigger") if tries >= 100: # We failed in 100 iterations return 1 logging.verbose("Run times : %(time)s secs", dict(time=results)) self.trigger.returned_information = results[self.expected_results - self.kept_runs:] return 0
def run(self, *args, **kwargs) -> int: """ Benchmarks the execution time of 20 runs and stores the last 10 results (to avoid side effects) in self.trigger.result. Runs at most 100 times before deciding the run is a failure. :param args: additional arguments :param kwargs: additional keyword arguments :return: 0|1 on success|failure """ results = [] tries = 0 while len(results ) < self.expected_results and tries < self.maximum_tries: tries += 1 try: proc_start = self.trigger.Server(self.trigger.cmd) proc_start.start() time.sleep(self.trigger.delay) results_queue = multiprocessing.Queue() # pylint: disable=no-member self.triggers = [] for command in self.trigger.helper_commands: self.triggers.append( self.trigger.helper(command, results=results_queue, **self.trigger.named_helper_args)) result = timeit.repeat(self.client_run, number=1, repeat=1) finally: with suppress(subprocess.CalledProcessError): launch_and_log(self.trigger.stop_cmd.split(" ")) for thread in self.triggers: thread.terminate() values = [] for _ in self.triggers: values.append(results_queue.get_nowait()) if self.trigger.check_success(values) != 0: logging.warning("Trigger did not work, retrying") continue results += result show_progress(len(results), self.expected_results, section="trigger") time.sleep(2) if tries >= 100: return 1 logging.verbose("Run times : {} secs".format(results)) self.trigger.returned_information = results[self.expected_results - self.kept_runs:] return 0
def run(self, *args, **kwargs) -> int: """ Benchmarks the execution 20 times and stores the last 10 results (to avoid side effects) in self.trigger.result. Runs at most 100 times before deciding the run is a failure. :param args: additional arguments :param kwargs: additional keyword arguments :return: 0|1 on success|failure """ logging.verbose(self.trigger.cmd) results = [] tries = 0 while len(results) < self.expected_results and tries < self.maximum_tries: try: results += timeit.repeat(self.benchmark_helper, repeat=1, number=1) except subprocess.CalledProcessError: logging.warning("A trigger failed, retrying one more time") tries += 1 show_progress(len(results), self.expected_results, section="trigger") if tries >= 100: # We failed in 100 iterations return 1 logging.verbose("Run times : %(time)s secs", dict(time=results)) self.trigger.returned_information = results[self.expected_results - self.kept_runs:] return 0
def run(self, *args, **kwargs) -> int: """ Benchmarks the execution time of 20 runs and stores the last 10 results (to avoid side effects) in self.trigger.result. Runs at most 100 times before deciding the run is a failure. :param args: additional arguments :param kwargs: additional keyword arguments :return: 0|1 on success|failure """ results = [] tries = 0 while len(results) < self.expected_results and tries < self.maximum_tries: tries += 1 try: proc_start = self.trigger.Server(self.trigger.cmd) proc_start.start() time.sleep(self.trigger.delay) results_queue = multiprocessing.Queue() # pylint: disable=no-member self.triggers = [] for command in self.trigger.helper_commands: self.triggers.append( self.trigger.helper(command, results=results_queue, **self.trigger.named_helper_args) ) result = timeit.repeat(self.client_run, number=1, repeat=1) finally: with suppress(subprocess.CalledProcessError): launch_and_log(self.trigger.stop_cmd.split(" ")) for thread in self.triggers: thread.terminate() values = [] for _ in self.triggers: values.append(results_queue.get_nowait()) if self.trigger.check_success(values) != 0: logging.warning("Trigger did not work, retrying") continue results += result show_progress(len(results), self.expected_results, section="trigger") time.sleep(2) if tries >= 100: return 1 logging.verbose("Run times : {} secs".format(results)) self.trigger.returned_information = results[self.expected_results - self.kept_runs:] return 0
def main(programs, force_installation, processes, **kwargs): """ the main function. runs installers :param programs: the programs to install :param force_installation: if the installation must be done if the program was already installed :param kwargs: additional parameters to pass to the plugins """ # pylint: disable=no-member,too-few-public-methods class InstallerProcess(multiprocessing.Process): """ An Installer for a list of programs. :param _programs: the list of programs to launch :param _report_queue: the queue where to report the return value :param _max_tasks: the semaphore to acquire at the end to release a new worker """ def __init__(self, _programs: list, _report_queue: multiprocessing.Queue, _max_tasks: multiprocessing.Semaphore): super().__init__() self.programs = _programs self.report_queue = _report_queue self.max_tasks = _max_tasks def run(self): """ Installs the programs and reports the value """ error = None try: for _installer in self.programs: try: if (not _installer.run()) and _installer.conf.get( "executable", None): hooks.create_executables(installer=_installer) hooks.post_install_run(installer=_installer, **kwargs) except InstallationErrorException as exception: logging.error(exception.error_message) logging.error( "Won't install %(program)s", dict(program=_installer.conf.get("name"))) error = constants.INSTALL_FAIL except Exception as exc: # pylint: disable=broad-except error = constants.INSTALL_FAIL logging.error(exc) logging.debug("".join(traceback.format_tb(exc.__traceback__))) finally: logging.verbose("Cleaning environment") hooks.post_install_clean(**kwargs) self.max_tasks.release() self.report_queue.put( (error or 0, self.programs[0].conf.get("name"))) installers = [] report_queue = multiprocessing.Queue() max_tasks = multiprocessing.Semaphore(processes) for program in programs: max_tasks.acquire() lib.logger.start_new_log_section(program, "installation") program_conf = get_program_conf(program) installer = InstallerProcess([ Installer.factory(program_conf[prog], force_installation) for prog in program_conf.sections() ], report_queue, max_tasks) installer.start() installers.append(installer) return_value = 0 counter = 0 for _ in installers: counter += 1 value, program = report_queue.get(block=True) if value: return_value = value logging.error("%(prog)s failed to compile correctly", dict(prog=program)) show_progress(counter, len(installers)) return return_value
def main(programs, force_installation, processes, **kwargs): """ the main function. runs installers :param programs: the programs to install :param force_installation: if the installation must be done if the program was already installed :param kwargs: additional parameters to pass to the plugins """ # pylint: disable=no-member,too-few-public-methods class InstallerProcess(multiprocessing.Process): """ An Installer for a list of programs. :param _programs: the list of programs to launch :param _report_queue: the queue where to report the return value :param _max_tasks: the semaphore to acquire at the end to release a new worker """ def __init__( self, _programs: list, _report_queue: multiprocessing.Queue, _max_tasks: multiprocessing.Semaphore ): super().__init__() self.programs = _programs self.report_queue = _report_queue self.max_tasks = _max_tasks def run(self): """ Installs the programs and reports the value """ error = None try: for _installer in self.programs: try: if (not _installer.run()) and _installer.conf.get("executable", None): hooks.create_executables(installer=_installer) hooks.post_install_run(installer=_installer, **kwargs) except InstallationErrorException as exception: logging.error(exception.error_message) logging.error("Won't install %(program)s", dict(program=_installer.conf.get("name"))) error = constants.INSTALL_FAIL except Exception as exc: # pylint: disable=broad-except error = constants.INSTALL_FAIL logging.error(exc) logging.debug("".join(traceback.format_tb(exc.__traceback__))) finally: logging.verbose("Cleaning environment") hooks.post_install_clean(**kwargs) self.max_tasks.release() self.report_queue.put((error or 0, self.programs[0].conf.get("name"))) installers = [] report_queue = multiprocessing.Queue() max_tasks = multiprocessing.Semaphore(processes) for program in programs: max_tasks.acquire() lib.logger.start_new_log_section(program, "installation") program_conf = get_program_conf(program) installer = InstallerProcess( [Installer.factory(program_conf[prog], force_installation) for prog in program_conf.sections()], report_queue, max_tasks, ) installer.start() installers.append(installer) return_value = 0 counter = 0 for _ in installers: counter += 1 value, program = report_queue.get(block=True) if value: return_value = value logging.error("%(prog)s failed to compile correctly", dict(prog=program)) show_progress(counter, len(installers)) return return_value