def _run_global_explorers_parallel(self, out_path): self.log.debug("Running global explorers in %s parallel jobs", self.jobs) self.log.trace("Multiprocessing start method is %s", multiprocessing.get_start_method()) self.log.trace("Starting multiprocessing Pool for global explorers" " run") args = [ (e, out_path, ) for e in self.list_global_explorer_names() ] mp_pool_run(self._run_global_explorer, args, jobs=self.jobs) self.log.trace("Multiprocessing run for global explorers finished")
def _run_global_explorers_parallel(self, out_path): self.log.debug("Running global explorers in {} parallel jobs".format( self.jobs)) self.log.trace("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.trace(("Starting multiprocessing Pool for global " "explorers run")) args = [ (e, out_path, ) for e in self.list_global_explorer_names() ] mp_pool_run(self._run_global_explorer, args, jobs=self.jobs) self.log.trace(("Multiprocessing run for global explorers " "finished"))
def _run_global_explorers_parallel(self, out_path): self.log.info("Running global explorers in {} parallel jobs".format( self.jobs)) self.log.debug("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.debug(("Starting multiprocessing Pool for global " "explorers run")) args = [( e, out_path, ) for e in self.list_global_explorer_names()] mp_pool_run(self._run_global_explorer, args, jobs=self.jobs) self.log.debug(("Multiprocessing run for global explorers " "finished"))
def _transfer_dir_parallel(self, source, destination, jobs): """Transfer a directory to the remote side in parallel mode.""" self.log.debug("Remote transfer in {} parallel jobs".format(jobs)) self.log.trace("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.trace(("Starting multiprocessing Pool for parallel " "remote transfer")) args = [ (command, ) for command in self._transfer_dir_commands(source, destination) ] if len(args) == 1: self.log.debug("Only one dir entry, transfering sequentially") self._run_command(args[0]) else: mp_pool_run(self._run_command, args, jobs=jobs) self.log.trace(("Multiprocessing for parallel transfer " "finished"))
def _transfer_dir_parallel(self, source, destination, jobs): """Transfer a directory to the remote side in parallel mode.""" self.log.info("Remote transfer in {} parallel jobs".format(jobs)) self.log.debug("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.debug(("Starting multiprocessing Pool for parallel " "remote transfer")) args = [] for f in glob.glob1(source, '*'): command = self._copy.split() path = os.path.join(source, f) command.extend([ path, '{0}:{1}'.format(_wrap_addr(self.target_host[0]), destination) ]) args.append((command, )) mp_pool_run(self._run_command, args, jobs=jobs) self.log.debug(("Multiprocessing for parallel transfer " "finished"))
def _transfer_dir_parallel(self, source, destination, jobs): """Transfer a directory to the remote side in parallel mode.""" self.log.debug("Remote transfer in {} parallel jobs".format( jobs)) self.log.trace("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.trace(("Starting multiprocessing Pool for parallel " "remote transfer")) args = [ (command, ) for command in self._transfer_dir_commands(source, destination) ] if len(args) == 1: self.log.debug("Only one dir entry, transfering sequentially") self._run_command(args[0]) else: mp_pool_run(self._run_command, args, jobs=jobs) self.log.trace(("Multiprocessing for parallel transfer " "finished"))
def _iterate_once_parallel(self): self.log.info("Iteration in parallel mode in {} jobs".format( self.jobs)) objects_changed = False cargo = [] for cdist_object in self.object_list(): if cdist_object.requirements_unfinished(cdist_object.requirements): """We cannot do anything for this poor object""" continue if cdist_object.state == core.CdistObject.STATE_UNDEF: """Prepare the virgin object""" # self.object_prepare(cdist_object) # objects_changed = True cargo.append(cdist_object) n = len(cargo) if n == 1: self.log.debug("Only one object, preparing sequentially") self.object_prepare(cargo[0]) objects_changed = True elif cargo: self.log.debug("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.debug(("Starting multiprocessing Pool for {} parallel " "objects preparation".format(n))) args = [(c, ) for c in cargo] mp_pool_run(self.object_prepare, args, jobs=self.jobs) self.log.debug(("Multiprocessing for parallel object " "preparation finished")) objects_changed = True del cargo[:] for cdist_object in self.object_list(): if cdist_object.requirements_unfinished(cdist_object.requirements): """We cannot do anything for this poor object""" continue if cdist_object.state == core.CdistObject.STATE_PREPARED: if cdist_object.requirements_unfinished( cdist_object.autorequire): """The previous step created objects we depend on - wait for them """ continue # self.object_run(cdist_object) # objects_changed = True cargo.append(cdist_object) n = len(cargo) if n == 1: self.log.debug("Only one object, running sequentially") self.object_run(cargo[0]) objects_changed = True elif cargo: self.log.debug("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.debug(("Starting multiprocessing Pool for {} parallel " "object run".format(n))) args = [(c, ) for c in cargo] mp_pool_run(self.object_run, args, jobs=self.jobs) self.log.debug(("Multiprocessing for parallel object " "run finished")) objects_changed = True return objects_changed
def _iterate_once_parallel(self): self.log.debug("Iteration in parallel mode in {} jobs".format( self.jobs)) objects_changed = False cargo = [] for cdist_object in self.object_list(): if cdist_object.requirements_unfinished(cdist_object.requirements): """We cannot do anything for this poor object""" continue if cdist_object.state == core.CdistObject.STATE_UNDEF: """Prepare the virgin object""" # self.object_prepare(cdist_object) # objects_changed = True cargo.append(cdist_object) n = len(cargo) if n == 1: self.log.debug("Only one object, preparing sequentially") self.object_prepare(cargo[0]) objects_changed = True elif cargo: self.log.trace("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.trace("Multiprocessing cargo: %s", cargo) cargo_types = set() for c in cargo: cargo_types.add(c.cdist_type) self.log.trace("Multiprocessing cargo_types: %s", cargo_types) nt = len(cargo_types) if nt == 1: self.log.debug(("Only one type, transferring explorers " "sequentially")) self.explorer.transfer_type_explorers(cargo_types.pop()) else: self.log.trace( ("Starting multiprocessing Pool for {} " "parallel types explorers transferring".format(nt))) args = [(ct, ) for ct in cargo_types] mp_pool_run(self.explorer.transfer_type_explorers, args, jobs=self.jobs) self.log.trace(("Multiprocessing for parallel transferring " "types' explorers finished")) self.log.trace(("Starting multiprocessing Pool for {} parallel " "objects preparation".format(n))) args = [( c, False, ) for c in cargo] mp_pool_run(self.object_prepare, args, jobs=self.jobs) self.log.trace(("Multiprocessing for parallel object " "preparation finished")) objects_changed = True del cargo[:] for cdist_object in self.object_list(): if cdist_object.requirements_unfinished(cdist_object.requirements): """We cannot do anything for this poor object""" continue if cdist_object.state == core.CdistObject.STATE_PREPARED: if cdist_object.requirements_unfinished( cdist_object.autorequire): """The previous step created objects we depend on - wait for them """ continue # self.object_run(cdist_object) # objects_changed = True # put objects in chuncks of distinct types # so that there is no more than one object # of the same type in one chunk because there is a # possibility of object's process locking which # prevents parallel execution at remote # and do this only for nonparallel marked types for chunk in cargo: for obj in chunk: if (obj.cdist_type == cdist_object.cdist_type and cdist_object.cdist_type.is_nonparallel): break else: chunk.append(cdist_object) break else: chunk = [ cdist_object, ] cargo.append(chunk) for chunk in cargo: self.log.trace("Running chunk: %s", chunk) n = len(chunk) if n == 1: self.log.debug("Only one object, running sequentially") self.object_run(chunk[0]) objects_changed = True elif chunk: self.log.trace("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.trace(("Starting multiprocessing Pool for {} " "parallel object run".format(n))) args = [(c, ) for c in chunk] mp_pool_run(self.object_run, args, jobs=self.jobs) self.log.trace(("Multiprocessing for parallel object " "run finished")) objects_changed = True return objects_changed
def commandline(cls, args): """Configure remote system""" if (args.parallel and args.parallel != 1) or args.jobs: if args.timestamp: cdist.log.setupTimestampingParallelLogging() else: cdist.log.setupParallelLogging() elif args.timestamp: cdist.log.setupTimestampingLogging() log = logging.getLogger("config") # No new child process if only one host at a time. if args.parallel == 1: log.debug("Only 1 parallel process, doing it sequentially") args.parallel = 0 if args.parallel: import signal signal.signal(signal.SIGTERM, mp_sig_handler) signal.signal(signal.SIGHUP, mp_sig_handler) cls._check_and_prepare_args(args) failed_hosts = [] time_start = time.time() cls.construct_remote_exec_copy_patterns(args) base_root_path = cls.create_base_root_path(args.out_path) hostcnt = 0 cfg = cdist.configuration.Configuration(args) configuration = cfg.get_config(section='GLOBAL') if args.tag or args.all_tagged_hosts: inventory.determine_default_inventory_dir(args, configuration) if args.all_tagged_hosts: inv_list = inventory.InventoryList( hosts=None, istag=True, hostfile=None, db_basedir=args.inventory_dir) else: inv_list = inventory.InventoryList( hosts=args.host, istag=True, hostfile=args.hostfile, db_basedir=args.inventory_dir, has_all_tags=args.has_all_tags) it = inv_list.entries() else: it = itertools.chain(cls.hosts(args.host), cls.hosts(args.hostfile)) process_args = [] if args.parallel: log.trace("Processing hosts in parallel") else: log.trace("Processing hosts sequentially") for entry in it: if isinstance(entry, tuple): # if configuring by specified tags host = entry[0] host_tags = entry[1] else: # if configuring by host then check inventory for tags host = entry inventory.determine_default_inventory_dir(args, configuration) inv_list = inventory.InventoryList( hosts=(host, ), db_basedir=args.inventory_dir) inv = tuple(inv_list.entries()) if inv: # host is present in inventory and has tags host_tags = inv[0][1] else: # host is not present in inventory or has no tags host_tags = None host_base_path, hostdir = cls.create_host_base_dirs( host, base_root_path) log.debug("Base root path for target host \"{}\" is \"{}\"".format( host, host_base_path)) hostcnt += 1 if args.parallel: pargs = (host, host_tags, host_base_path, hostdir, args, True, configuration) log.trace(("Args for multiprocessing operation " "for host {}: {}".format(host, pargs))) process_args.append(pargs) else: try: cls.onehost(host, host_tags, host_base_path, hostdir, args, parallel=False, configuration=configuration) except cdist.Error as e: failed_hosts.append(host) if args.parallel and len(process_args) == 1: log.debug("Only 1 host for parallel processing, doing it " "sequentially") try: cls.onehost(*process_args[0]) except cdist.Error as e: failed_hosts.append(host) elif args.parallel: log.trace("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) log.trace(("Starting multiprocessing Pool for {} " "parallel host operation".format(args.parallel))) results = mp_pool_run(cls.onehost, process_args, jobs=args.parallel) log.trace(("Multiprocessing for parallel host operation " "finished")) log.trace( "Multiprocessing for parallel host operation " "results: %s", results) failed_hosts = [host for host, result in results if not result] time_end = time.time() log.verbose("Total processing time for %s host(s): %s", hostcnt, (time_end - time_start)) if len(failed_hosts) > 0: raise cdist.Error("Failed to configure the following hosts: " + " ".join(failed_hosts)) elif not args.out_path: # If tmp out path created then remove it, but only if no failed # hosts. shutil.rmtree(base_root_path)
def _iterate_once_parallel(self): self.log.debug("Iteration in parallel mode in {} jobs".format( self.jobs)) objects_changed = False cargo = [] for cdist_object in self.object_list(): if cdist_object.requirements_unfinished(cdist_object.requirements): """We cannot do anything for this poor object""" continue if cdist_object.state == core.CdistObject.STATE_UNDEF: """Prepare the virgin object""" # self.object_prepare(cdist_object) # objects_changed = True cargo.append(cdist_object) n = len(cargo) if n == 1: self.log.debug("Only one object, preparing sequentially") self.object_prepare(cargo[0]) objects_changed = True elif cargo: self.log.trace("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.trace("Multiprocessing cargo: %s", cargo) cargo_types = set() for c in cargo: cargo_types.add(c.cdist_type) self.log.trace("Multiprocessing cargo_types: %s", cargo_types) nt = len(cargo_types) if nt == 1: self.log.debug(("Only one type, transfering explorers " "sequentially")) self.explorer.transfer_type_explorers(cargo_types.pop()) else: self.log.trace(("Starting multiprocessing Pool for {} " "parallel transfering types' explorers".format( nt))) args = [ (ct, ) for ct in cargo_types ] mp_pool_run(self.explorer.transfer_type_explorers, args, jobs=self.jobs) self.log.trace(("Multiprocessing for parallel transfering " "types' explorers finished")) self.log.trace(("Starting multiprocessing Pool for {} parallel " "objects preparation".format(n))) args = [ (c, False, ) for c in cargo ] mp_pool_run(self.object_prepare, args, jobs=self.jobs) self.log.trace(("Multiprocessing for parallel object " "preparation finished")) objects_changed = True del cargo[:] for cdist_object in self.object_list(): if cdist_object.requirements_unfinished(cdist_object.requirements): """We cannot do anything for this poor object""" continue if cdist_object.state == core.CdistObject.STATE_PREPARED: if cdist_object.requirements_unfinished( cdist_object.autorequire): """The previous step created objects we depend on - wait for them """ continue # self.object_run(cdist_object) # objects_changed = True # put objects in chuncks of distinct types # so that there is no more than one object # of the same type in one chunk because there is a # possibility of object's process locking which # prevents parallel execution at remote # and do this only for nonparallel marked types for chunk in cargo: for obj in chunk: if (obj.cdist_type == cdist_object.cdist_type and cdist_object.cdist_type.is_nonparallel): break else: chunk.append(cdist_object) break else: chunk = [cdist_object, ] cargo.append(chunk) for chunk in cargo: self.log.trace("Running chunk: %s", chunk) n = len(chunk) if n == 1: self.log.debug("Only one object, running sequentially") self.object_run(chunk[0]) objects_changed = True elif chunk: self.log.trace("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.trace(("Starting multiprocessing Pool for {} " "parallel object run".format(n))) args = [ (c, ) for c in chunk ] mp_pool_run(self.object_run, args, jobs=self.jobs) self.log.trace(("Multiprocessing for parallel object " "run finished")) objects_changed = True return objects_changed
def commandline(cls, args): """Configure remote system""" # FIXME: Refactor relict - remove later log = logging.getLogger("cdist") # No new child process if only one host at a time. if args.parallel == 1: log.debug("Only 1 parallel process, doing it sequentially") args.parallel = 0 if args.parallel or args.jobs: # If parallel execution then also log process id cdist.log.setupParallelLogging() log = logging.getLogger("cdist") if args.parallel: import signal signal.signal(signal.SIGTERM, mp_sig_handler) signal.signal(signal.SIGHUP, mp_sig_handler) cls._check_and_prepare_args(args) failed_hosts = [] time_start = time.time() cls.construct_remote_exec_copy_patterns(args) base_root_path = cls.create_base_root_path(args.out_path) hostcnt = 0 cfg = cdist.configuration.Configuration(args) configuration = cfg.get_config(section='GLOBAL') if args.tag or args.all_tagged_hosts: inventory.determine_default_inventory_dir(args, configuration) if args.all_tagged_hosts: inv_list = inventory.InventoryList( hosts=None, istag=True, hostfile=None, db_basedir=args.inventory_dir) else: inv_list = inventory.InventoryList( hosts=args.host, istag=True, hostfile=args.hostfile, db_basedir=args.inventory_dir, has_all_tags=args.has_all_tags) it = inv_list.entries() else: it = itertools.chain(cls.hosts(args.host), cls.hosts(args.hostfile)) process_args = [] if args.parallel: log.trace("Processing hosts in parallel") else: log.trace("Processing hosts sequentially") for entry in it: if isinstance(entry, tuple): # if configuring by specified tags host = entry[0] host_tags = entry[1] else: # if configuring by host then check inventory for tags host = entry inventory.determine_default_inventory_dir(args, configuration) inv_list = inventory.InventoryList( hosts=(host,), db_basedir=args.inventory_dir) inv = tuple(inv_list.entries()) if inv: # host is present in inventory and has tags host_tags = inv[0][1] else: # host is not present in inventory or has no tags host_tags = None host_base_path, hostdir = cls.create_host_base_dirs( host, base_root_path) log.debug("Base root path for target host \"{}\" is \"{}\"".format( host, host_base_path)) hostcnt += 1 if args.parallel: pargs = (host, host_tags, host_base_path, hostdir, args, True, configuration) log.trace(("Args for multiprocessing operation " "for host {}: {}".format(host, pargs))) process_args.append(pargs) else: try: cls.onehost(host, host_tags, host_base_path, hostdir, args, parallel=False, configuration=configuration) except cdist.Error as e: failed_hosts.append(host) if args.parallel and len(process_args) == 1: log.debug("Only 1 host for parallel processing, doing it " "sequentially") try: cls.onehost(*process_args[0]) except cdist.Error as e: failed_hosts.append(host) elif args.parallel: log.trace("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) log.trace(("Starting multiprocessing Pool for {} " "parallel host operation".format(args.parallel))) results = mp_pool_run(cls.onehost, process_args, jobs=args.parallel) log.trace(("Multiprocessing for parallel host operation " "finished")) log.trace("Multiprocessing for parallel host operation " "results: %s", results) failed_hosts = [host for host, result in results if not result] time_end = time.time() log.verbose("Total processing time for %s host(s): %s", hostcnt, (time_end - time_start)) if len(failed_hosts) > 0: raise cdist.Error("Failed to configure the following hosts: " + " ".join(failed_hosts)) elif not args.out_path: # If tmp out path created then remove it, but only if no failed # hosts. shutil.rmtree(base_root_path)