def _run_command(self, command, env=None, return_output=False): """Run the given command with the given environment. Return the output as a string. """ assert isinstance( command, (list, tuple)), "list or tuple argument expected, got: %s" % command # export target_host for use in __remote_{exec,copy} scripts os_environ = os.environ.copy() os_environ['__target_host'] = self.target_host self.log.debug("Remote run: %s", command) try: if return_output: return subprocess.check_output(command, env=os_environ).decode() else: subprocess.check_call(command, env=os_environ) except subprocess.CalledProcessError: raise cdist.Error("Command failed: " + " ".join(command)) except OSError as error: raise cdist.Error(" ".join(command) + ": " + error.args[1]) except UnicodeDecodeError: raise DecodeError(command)
def run(self, command, env=None, return_output=False): """Run the given command with the given environment. Return the output as a string. """ assert isinstance( command, (list, tuple)), "list or tuple argument expected, got: %s" % command self.log.debug("Local run: %s", command) if env is None: env = os.environ.copy() # Export __target_host for use in __remote_{copy,exec} scripts env['__target_host'] = self.target_host try: if return_output: return subprocess.check_output(command, env=env).decode() else: subprocess.check_call(command, env=env) except subprocess.CalledProcessError: raise cdist.Error("Command failed: " + " ".join(command)) except OSError as error: raise cdist.Error(" ".join(*args) + ": " + error.args[1])
def __init__(self, hosts=None, tags=None, hostfile=None, tagfile=None, db_basedir=dist_inventory_db, all=False, action="add"): super().__init__(db_basedir) self.actions = ("add", "del") if action not in self.actions: raise cdist.Error("Invalid action \'{}\', valid actions are:" " {}\n".format(action, self.actions.keys())) self.action = action self.hosts = hosts self.tags = tags self.hostfile = hostfile self.tagfile = tagfile self.all = all if not self.hosts and not self.hostfile: self.allhosts = True else: self.allhosts = False if not self.tags and not self.tagfile: self.tagfile = "-" if self.hostfile == "-" and self.tagfile == "-": raise cdist.Error("Cannot read both, hosts and tags, from stdin")
def run(self, command, env=None, return_output=False, message_prefix=None): """Run the given command with the given environment. Return the output as a string. """ self.log.debug("Local run: %s", command) assert isinstance( command, (list, tuple)), "list or tuple argument expected, got: %s" % command if env is None: env = os.environ.copy() # Export __target_host for use in __remote_{copy,exec} scripts env['__target_host'] = self.target_host # Export for emulator env['__cdist_object_marker'] = self.object_marker_name if message_prefix: message = cdist.message.Message(message_prefix, self.messages_path) env.update(message.env) try: if return_output: return subprocess.check_output(command, env=env).decode() else: subprocess.check_call(command, env=env) except subprocess.CalledProcessError: raise cdist.Error("Command failed: " + " ".join(command)) except OSError as error: raise cdist.Error(" ".join(command) + ": " + error.args[1]) finally: if message_prefix: message.merge_messages()
def _run_command(self, command, env=None, return_output=False): """Run the given command with the given environment. Return the output as a string. """ assert isinstance( command, (list, tuple)), "list or tuple argument expected, got: %s" % command # export target_host for use in __remote_{exec,copy} scripts os_environ = os.environ.copy() os_environ['__target_host'] = self.target_host # can't pass environment to remote side, so prepend command with # variable declarations if env: cmd = ["%s=%s" % item for item in env.items()] cmd.extend(command) else: cmd = command self.log.debug("Remote run: %s", command) try: if return_output: return subprocess.check_output(command, env=os_environ).decode() else: subprocess.check_call(command, env=os_environ) except subprocess.CalledProcessError: raise cdist.Error("Command failed: " + " ".join(command)) except OSError as error: raise cdist.Error(" ".join(*args) + ": " + error.args[1]) except UnicodeDecodeError: raise DecodeError(command)
def commandline(cls, args): """Configure remote system""" import multiprocessing # FIXME: Refactor relict - remove later log = logging.getLogger("cdist") initial_manifest_tempfile = None if args.manifest == '-': # read initial manifest from stdin import tempfile try: handle, initial_manifest_temp_path = tempfile.mkstemp( prefix='cdist.stdin.') with os.fdopen(handle, 'w') as fd: fd.write(sys.stdin.read()) except (IOError, OSError) as e: raise cdist.Error( "Creating tempfile for stdin data failed: %s" % e) args.manifest = initial_manifest_temp_path import atexit atexit.register(lambda: os.remove(initial_manifest_temp_path)) process = {} failed_hosts = [] time_start = time.time() for host in args.host: if args.parallel: log.debug("Creating child process for %s", host) process[host] = multiprocessing.Process(target=cls.onehost, args=(host, args, True)) process[host].start() else: try: cls.onehost(host, args, parallel=False) except cdist.Error as e: failed_hosts.append(host) # Catch errors in parallel mode when joining if args.parallel: for host in process.keys(): log.debug("Joining process %s", host) process[host].join() if not process[host].exitcode == 0: failed_hosts.append(host) time_end = time.time() log.info("Total processing time for %s host(s): %s", len(args.host), (time_end - time_start)) if len(failed_hosts) > 0: raise cdist.Error("Failed to configure the following hosts: " + " ".join(failed_hosts))
def __init__(self, hosts=None, hostfile=None, db_basedir=dist_inventory_db, all=False, action="add", configuration=None): super().__init__(db_basedir, configuration) self.actions = ("add", "del") if action not in self.actions: raise cdist.Error("Invalid action \'{}\', valid actions are:" " {}\n".format(action, self.actions.keys())) self.action = action self.hosts = hosts self.hostfile = hostfile self.all = all if not self.hosts and not self.hostfile: raise cdist.Error("Host(s) missing")
def object_run(self, cdist_object): """Run gencode and code for an object""" self.log.debug("Trying to run object " + cdist_object.name) if cdist_object.state == core.Object.STATE_RUNNING: # FIXME: resolve dependency circle / show problem source raise cdist.Error("Detected circular dependency in " + cdist_object.name) elif cdist_object.state == core.Object.STATE_DONE: self.log.debug("Ignoring run of already finished object %s", cdist_object) return else: cdist_object.state = core.Object.STATE_RUNNING cdist_type = cdist_object.type for requirement in cdist_object.requirements: self.log.debug("Object %s requires %s", cdist_object, requirement) required_object = cdist_object.object_from_name(requirement) # The user may have created dependencies without satisfying them if not required_object.exists: raise cdist.Error(cdist_object.name + " requires non-existing " + required_object.name) else: self.log.debug("Required object %s exists", required_object.name) self.object_run(required_object) # Generate self.log.info("Generating and executing code for " + cdist_object.name) cdist_object.code_local = self.code.run_gencode_local(cdist_object) cdist_object.code_remote = self.code.run_gencode_remote(cdist_object) if cdist_object.code_local or cdist_object.code_remote: cdist_object.changed = True # Execute if cdist_object.code_local: self.code.run_code_local(cdist_object) if cdist_object.code_remote: self.code.transfer_code_remote(cdist_object) self.code.run_code_remote(cdist_object) # Mark this object as done self.log.debug("Finishing run of " + cdist_object.name) cdist_object.state = core.Object.STATE_DONE
def transfer_dir_parallel(self, source, destination, jobs): """Transfer a directory to the remote side in parallel mode.""" self.log.debug("Remote transfer: %s -> %s", source, destination) self.rmdir(destination) if os.path.isdir(source): self.mkdir(destination) self.log.info("Remote transfer in {} parallel jobs".format( jobs)) self.log.debug("Multiprocessing start method is {}".format( multiprocessing.get_start_method())) self.log.debug(("Starting multiprocessing Pool for parallel " "remote transfer")) with multiprocessing.Pool(jobs) as pool: self.log.debug("Starting async for parallel transfer") commands = [] for f in glob.glob1(source, '*'): command = self._copy.split() path = os.path.join(source, f) command.extend([path, '{0}:{1}'.format( self.target_host[0], destination)]) commands.append(command) results = [ pool.apply_async(self._run_command, (cmd,)) for cmd in commands ] self.log.debug("Waiting async results for parallel transfer") for r in results: r.get() # self._run_command returns None self.log.debug(("Multiprocessing for parallel transfer " "finished")) else: raise cdist.Error("Source {} is not a directory".format(source))
def hosts(source): try: yield from cdist.hostsource.HostSource(source)() except (IOError, OSError, UnicodeError) as e: raise cdist.Error( "Error reading hosts from \'{}\': {}".format( source, e))
def setup_object(self): # Create object with given parameters self.parameters = {} for key, value in vars(self.args).items(): if value is not None: self.parameters[key] = value if self.cdist_object.exists and 'CDIST_OVERRIDE' not in self.env: # Make existing requirements a set so that we can compare it # later with new requirements. self._existing_reqs = set(self.cdist_object.requirements) if self.cdist_object.parameters != self.parameters: errmsg = ("Object %s already exists with conflicting " "parameters:\n%s: %s\n%s: %s" % (self.cdist_object.name, " ".join( self.cdist_object.source), self.cdist_object.parameters, self.object_source, self.parameters)) raise cdist.Error(errmsg) else: if self.cdist_object.exists: self.log.debug(('Object %s override forced with ' 'CDIST_OVERRIDE'), self.cdist_object.name) self.cdist_object.create(True) else: self.cdist_object.create() self.cdist_object.parameters = self.parameters # record the created object in typeorder file with open(self.typeorder_path, 'a') as typeorderfile: print(self.cdist_object.name, file=typeorderfile) # Record / Append source self.cdist_object.source.append(self.object_source)
def iterate_until_finished(self): # Continue process until no new objects are created anymore objects_changed = True while objects_changed: objects_changed = False for cdist_object in self.object_list(): if cdist_object.requirements_unfinished( cdist_object.requirements): """We cannot do anything for this poor object""" continue if cdist_object.state == core.CdistObject.STATE_UNDEF: """Prepare the virgin object""" self.object_prepare(cdist_object) objects_changed = True if cdist_object.requirements_unfinished( cdist_object.autorequire): """The previous step created objects we depend on - wait for them""" continue if cdist_object.state == core.CdistObject.STATE_PREPARED: self.object_run(cdist_object) objects_changed = True # Check whether all objects have been finished unfinished_objects = [] for cdist_object in self.object_list(): if not cdist_object.state == cdist_object.STATE_DONE: unfinished_objects.append(cdist_object) if unfinished_objects: info_string = [] for cdist_object in unfinished_objects: requirement_names = [] autorequire_names = [] for requirement in cdist_object.requirements_unfinished( cdist_object.requirements): requirement_names.append(requirement.name) for requirement in cdist_object.requirements_unfinished( cdist_object.autorequire): autorequire_names.append(requirement.name) requirements = ", ".join(requirement_names) autorequire = ", ".join(autorequire_names) info_string.append( "%s requires: %s autorequires: %s" % (cdist_object.name, requirements, autorequire)) raise cdist.Error( "The requirements of the following objects could not be resolved: %s" % ("; ".join(info_string)))
def object_run(self, cdist_object): """Run gencode and code for an object""" self.log.debug("Trying to run object %s" % (cdist_object.name)) if cdist_object.state == core.CdistObject.STATE_DONE: raise cdist.Error(("Attempting to run an already finished " "object: %s"), cdist_object) cdist_type = cdist_object.cdist_type # Generate self.log.info("Generating code for %s" % (cdist_object.name)) cdist_object.code_local = self.code.run_gencode_local(cdist_object) cdist_object.code_remote = self.code.run_gencode_remote(cdist_object) if cdist_object.code_local or cdist_object.code_remote: cdist_object.changed = True # Execute if not self.dry_run: if cdist_object.code_local or cdist_object.code_remote: self.log.info("Executing code for %s" % (cdist_object.name)) if cdist_object.code_local: self.code.run_code_local(cdist_object) if cdist_object.code_remote: self.code.transfer_code_remote(cdist_object) self.code.run_code_remote(cdist_object) else: self.log.info("Skipping code execution due to DRY RUN") # Mark this object as done self.log.debug("Finishing run of " + cdist_object.name) cdist_object.state = core.CdistObject.STATE_DONE
def resolve_target_addresses(host, family): try: return ipaddr.resolve_target_addresses(host, family) except: e = sys.exc_info()[1] raise cdist.Error(("Error resolving target addresses for host '{}'" ": {}").format(host, e))
def setup_object(self): # Setup object_id - FIXME: unset / do not setup anymore! if self.cdist_type.is_singleton: self.object_id = "singleton" else: self.object_id = self.args.object_id[0] del self.args.object_id # Instantiate the cdist object we are defining self.cdist_object = core.CdistObject(self.cdist_type, self.object_base_path, self.object_id) # Create object with given parameters self.parameters = {} for key, value in vars(self.args).items(): if value is not None: self.parameters[key] = value if self.cdist_object.exists: if self.cdist_object.parameters != self.parameters: raise cdist.Error( "Object %s already exists with conflicting parameters:\n%s: %s\n%s: %s" % (self.cdist_object.name, " ".join( self.cdist_object.source), self.cdist_object.parameters, self.object_source, self.parameters)) else: self.cdist_object.create() self.cdist_object.parameters = self.parameters # Record / Append source self.cdist_object.source.append(self.object_source)
def init_db(self): self.log.trace("Init db: {}".format(self.db_basedir)) if not os.path.exists(self.db_basedir): os.makedirs(self.db_basedir, exist_ok=True) elif not os.path.isdir(self.db_basedir): raise cdist.Error(("Invalid inventory db basedir \'{}\'," " must be a directory").format(self.db_basedir))
def run_script(self, script, env=None, return_output=False): """Run the given script with the given environment. Return the output as a string. """ command = ["/bin/sh", "-e"] command.append(script) self.log.debug("Local run script: %s", command) if env is None: env = os.environ.copy() # Export __target_host for use in __remote_{copy,exec} scripts env['__target_host'] = self.target_host self.log.debug("Local run script env: %s", env) try: if return_output: return subprocess.check_output(command, env=env).decode() else: subprocess.check_call(command, env=env) except subprocess.CalledProcessError as error: script_content = self.run(["cat", script], return_output=True) self.log.error("Code that raised the error:\n%s", script_content) raise LocalScriptError(script, command, script_content) except EnvironmentError as error: raise cdist.Error(" ".join(command) + ": " + error.args[1])
def object_run(self, cdist_object): """Run gencode and code for an object""" self.log.debug("Trying to run object " + cdist_object.name) if cdist_object.state == core.CdistObject.STATE_DONE: # TODO: remove once we are sure that this really never happens. raise cdist.Error( "Attempting to run an already finished object: %s", cdist_object) cdist_type = cdist_object.cdist_type # Generate self.log.info("Generating and executing code for " + cdist_object.name) cdist_object.code_local = self.code.run_gencode_local(cdist_object) cdist_object.code_remote = self.code.run_gencode_remote(cdist_object) if cdist_object.code_local or cdist_object.code_remote: cdist_object.changed = True # Execute if cdist_object.code_local: self.code.run_code_local(cdist_object) if cdist_object.code_remote: self.code.transfer_code_remote(cdist_object) self.code.run_code_remote(cdist_object) # Mark this object as done self.log.debug("Finishing run of " + cdist_object.name) cdist_object.state = core.CdistObject.STATE_DONE
def _create_conf_path_and_link_conf_dirs(self): # Create destination directories for sub_dir in CONF_SUBDIRS_LINKED: self.mkdir(os.path.join(self.conf_path, sub_dir)) # Iterate over all directories and link the to the output dir for conf_dir in self.conf_dirs: self.log.debug("Checking conf_dir %s ..." % (conf_dir)) for sub_dir in CONF_SUBDIRS_LINKED: current_dir = os.path.join(conf_dir, sub_dir) # Allow conf dirs to contain only partial content if not os.path.exists(current_dir): continue for entry in os.listdir(current_dir): src = os.path.abspath(os.path.join(conf_dir, sub_dir, entry)) dst = os.path.join(self.conf_path, sub_dir, entry) # Already exists? remove and link if os.path.exists(dst): os.unlink(dst) self.log.trace("Linking %s to %s ..." % (src, dst)) try: os.symlink(src, dst) except OSError as e: raise cdist.Error("Linking %s %s to %s failed: %s" % ( sub_dir, src, dst, e.__str__()))
def save_cache(self, start_time=time.time()): self.log.trace("cache subpath pattern: {}".format( self.cache_path_pattern)) cache_subpath = self._cache_subpath(start_time, self.cache_path_pattern) self.log.debug("cache subpath: {}".format(cache_subpath)) destination = os.path.join(self.cache_path, cache_subpath) self.log.trace(("Saving cache: " + self.base_path + " to " + destination)) if not os.path.exists(destination): shutil.move(self.base_path, destination) else: for direntry in os.listdir(self.base_path): srcentry = os.path.join(self.base_path, direntry) destentry = os.path.join(destination, direntry) try: if os.path.isdir(destentry): shutil.rmtree(destentry) elif os.path.exists(destentry): os.remove(destentry) except (PermissionError, OSError) as e: raise cdist.Error( "Cannot delete old cache entry {}: {}".format( destentry, e)) shutil.move(srcentry, destentry) # add target_host since cache dir can be hash-ed target_host host_cache_path = os.path.join(destination, "target_host") with open(host_cache_path, 'w') as hostf: print(self.target_host[0], file=hostf)
def setup_object(self): # FIXME: verify object id # Setup object_id if self.cdist_type.is_singleton: self.object_id = "singleton" else: self.object_id = self.args.object_id[0] del self.args.object_id # strip leading slash from object_id self.object_id = self.object_id.lstrip('/') # Instantiate the cdist object we are defining self.cdist_object = core.Object(self.cdist_type, self.object_base_path, self.object_id) # Create object with given parameters self.parameters = {} for key, value in vars(self.args).items(): if value is not None: self.parameters[key] = value if self.cdist_object.exists: if self.cdist_object.parameters != self.parameters: raise cdist.Error( "Object %s already exists with conflicting parameters:\n%s: %s\n%s: %s" % (self.cdist_object, " ".join(self.cdist_object.source), self.cdist_object.parameters, self.object_source, self.parameters)) else: self.cdist_object.create() self.cdist_object.parameters = self.parameters
def object_run(self, cdist_object): """Run gencode and code for an object""" try: self.log.verbose("Running object " + cdist_object.name) if cdist_object.state == core.CdistObject.STATE_DONE: raise cdist.Error(("Attempting to run an already finished " "object: %s"), cdist_object) # Generate self.log.debug("Generating code for %s" % (cdist_object.name)) cdist_object.code_local = self.code.run_gencode_local(cdist_object) cdist_object.code_remote = self.code.run_gencode_remote( cdist_object) if cdist_object.code_local or cdist_object.code_remote: cdist_object.changed = True # Execute if cdist_object.code_local or cdist_object.code_remote: self.log.info("Processing %s" % (cdist_object.name)) if not self.dry_run: if cdist_object.code_local: self.log.trace("Executing local code for %s" % (cdist_object.name)) self.code.run_code_local(cdist_object) if cdist_object.code_remote: self.log.trace("Executing remote code for %s" % (cdist_object.name)) self.code.transfer_code_remote(cdist_object) self.code.run_code_remote(cdist_object) # Mark this object as done self.log.trace("Finishing run of " + cdist_object.name) cdist_object.state = core.CdistObject.STATE_DONE except cdist.Error as e: raise cdist.CdistObjectError(cdist_object, e)
def _run_command(self, command, env=None, return_output=False): """Run the given command with the given environment. Return the output as a string. """ assert isinstance( command, (list, tuple)), ("list or tuple argument expected, got: %s" % command) # export target_host, target_hostname, target_fqdn # for use in __remote_{exec,copy} scripts os_environ = os.environ.copy() os_environ['__target_host'] = self.target_host[0] os_environ['__target_hostname'] = self.target_host[1] os_environ['__target_fqdn'] = self.target_host[2] self.log.debug("Remote run: %s", command) try: output, errout = exec_util.call_get_output(command, env=os_environ) self.log.debug("Remote stdout: {}".format(output)) # Currently, stderr is not captured. # self.log.debug("Remote stderr: {}".format(errout)) if return_output: return output.decode() except subprocess.CalledProcessError as e: exec_util.handle_called_process_error(e, command) except OSError as error: raise cdist.Error(" ".join(command) + ": " + error.args[1]) except UnicodeDecodeError: raise DecodeError(command)
def commandline(cls, argv): cdist_parser = cdist.argparse.get_parsers() parser = argparse.ArgumentParser(description="Create PreOS", prog="cdist preos", parents=[ cdist_parser['loglevel'], ]) parser.add_argument('preos', help='PreOS to create', nargs='?', default=None) parser.add_argument('-c', '--conf-dir', help=('Add configuration directory (one that ' 'contains "preos" subdirectory)'), action='append') parser.add_argument('-g', '--config-file', help='Use specified custom configuration file.', dest="config_file", required=False) parser.add_argument('-L', '--list-preoses', help='List available PreOS-es', action='store_true', default=False) parser.add_argument('remainder_args', nargs=argparse.REMAINDER) args = parser.parse_args(argv[1:]) cdist.argparse.handle_loglevel(args) log.debug("preos args : %s", args) conf_dirs = util.resolve_conf_dirs_from_config_and_args(args) extend_plugins_path(conf_dirs) sys.path.extend(_PLUGINS_PATH) cls.preoses = find_preoses() if args.list_preoses or not args.preos: print(get_available_preoses_string(cls)) sys.exit(0) preos_name = args.preos if preos_name in cls.preoses: preos = cls.preoses[preos_name] func = getattr(preos, _PREOS_CALL) if inspect.ismodule(preos): func_args = [ preos, args.remainder_args, ] else: func_args = [ args.remainder_args, ] log.info("Running preos : %s", preos_name) func(*func_args) else: raise cdist.Error("Invalid PreOS {}. {}".format( preos_name, get_available_preoses_string(cls)))
def __write(self, lines): try: with open(self.path, 'w') as fd: for line in lines: fd.write(str(line) + '\n') except EnvironmentError as e: # should never happen raise cdist.Error(str(e))
def _init_cache_dir(self, cache_dir): if cache_dir: self.cache_path = cache_dir elif self.home_dir: self.cache_path = os.path.join(self.home_dir, "cache") else: raise cdist.Error( "No homedir setup and no cache dir location given")
def _check_host(self, hostpath): if not os.path.exists(hostpath): return False else: if not os.path.isfile(hostpath): raise cdist.Error(("Host path \'{}\' exists, but is not" " a valid file").format(hostpath)) return True
def setup_object(self): # CDIST_ORDER_DEPENDENCY state order_dep_on = self._order_dep_on() order_dep_defined = "CDIST_ORDER_DEPENDENCY" in self.env if not order_dep_defined and order_dep_on: self._set_order_dep_state_off() if order_dep_defined and not order_dep_on: self._set_order_dep_state_on() # Create object with given parameters self.parameters = {} for key, value in vars(self.args).items(): if value is not None: self.parameters[key] = value if self.cdist_object.exists and 'CDIST_OVERRIDE' not in self.env: obj_params = self._object_params_in_context() if obj_params != self.parameters: errmsg = ("Object %s already exists with conflicting " "parameters:\n%s: %s\n%s: %s" % ( self.cdist_object.name, " ".join(self.cdist_object.source), obj_params, self.object_source, self.parameters)) raise cdist.Error(errmsg) else: if self.cdist_object.exists: self.log.debug(('Object %s override forced with ' 'CDIST_OVERRIDE'), self.cdist_object.name) self.cdist_object.create(True) else: self.cdist_object.create() self.cdist_object.parameters = self.parameters # Do the following recording even if object exists, but with # different requirements. # record the created object in typeorder file with open(self.typeorder_path, 'a') as typeorderfile: print(self.cdist_object.name, file=typeorderfile) # record the created object in parent object typeorder file __object_name = self.env.get('__object_name', None) depname = self.cdist_object.name if __object_name: parent = self.cdist_object.object_from_name(__object_name) parent.typeorder.append(self.cdist_object.name) if self._order_dep_on(): self.log.trace(('[ORDER_DEP] Adding %s to typeorder dep' ' for %s'), depname, parent.name) parent.typeorder_dep.append(depname) elif self._order_dep_on(): self.log.trace('[ORDER_DEP] Adding %s to global typeorder dep', depname) self._add_typeorder_dep(depname) # Record / Append source self.cdist_object.source.append(self.object_source)
def create(self): """Create this cdist object on the filesystem. """ try: os.makedirs(self.absolute_path, exist_ok=False) absolute_parameter_path = os.path.join(self.base_path, self.parameter_path) os.makedirs(absolute_parameter_path, exist_ok=False) except EnvironmentError as error: raise cdist.Error('Error creating directories for cdist object: %s: %s' % (self, error))
def commandline(cls, args): """Configure remote system""" # FIXME: Refactor relict - remove later log = logging.getLogger("cdist") cls._check_and_prepare_args(args) process = {} failed_hosts = [] time_start = time.time() base_root_path = cls._base_root_path(args) hostcnt = 0 for host in itertools.chain(cls.hosts(args.host), cls.hosts(args.hostfile)): hostdir = cdist.str_hash(host) host_base_path = os.path.join(base_root_path, hostdir) log.debug("Base root path for target host \"{}\" is \"{}\"".format( host, host_base_path)) hostcnt += 1 if args.parallel: log.debug("Creating child process for %s", host) process[host] = multiprocessing.Process( target=cls.onehost, args=(host, host_base_path, hostdir, args, True)) process[host].start() else: try: cls.onehost(host, host_base_path, hostdir, args, parallel=False) except cdist.Error as e: failed_hosts.append(host) # Catch errors in parallel mode when joining if args.parallel: for host in process.keys(): log.debug("Joining process %s", host) process[host].join() if not process[host].exitcode == 0: failed_hosts.append(host) time_end = time.time() log.info("Total processing time for %s host(s): %s", hostcnt, (time_end - time_start)) if len(failed_hosts) > 0: raise cdist.Error("Failed to configure the following hosts: " + " ".join(failed_hosts))