def _load_roles(self, roles, ds): # a role is a name that auto-includes the following if they exist # <rolename>/tasks/main.yml # <rolename>/handlers/main.yml # <rolename>/vars/main.yml # <rolename>/library # and it auto-extends tasks/handlers/vars_files/module paths as appropriate if found if roles is None: roles = [] if type(roles) != list: raise errors.AnsibleError("value of 'roles:' must be a list") new_tasks = [] new_handlers = [] new_vars_files = [] defaults_files = [] pre_tasks = ds.get('pre_tasks', None) if type(pre_tasks) != list: pre_tasks = [] for x in pre_tasks: new_tasks.append(x) # flush handlers after pre_tasks new_tasks.append(dict(meta='flush_handlers')) roles = self._build_role_dependencies(roles, [], self.vars) for (role, role_path, role_vars, default_vars) in roles: # special vars must be extracted from the dict to the included tasks special_keys = ["sudo", "sudo_user", "when", "with_items"] special_vars = {} for k in special_keys: if k in role_vars: special_vars[k] = role_vars[k] task_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'tasks')) handler_basepath = utils.path_dwim( self.basedir, os.path.join(role_path, 'handlers')) vars_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')) defaults_basepath = utils.path_dwim( self.basedir, os.path.join(role_path, 'defaults')) task = self._resolve_main(task_basepath) handler = self._resolve_main(handler_basepath) vars_file = self._resolve_main(vars_basepath) defaults_file = self._resolve_main(defaults_basepath) library = utils.path_dwim(self.basedir, os.path.join(role_path, 'library')) if not os.path.isfile(task) and not os.path.isfile( handler) and not os.path.isfile( vars_file) and not os.path.isdir(library): raise errors.AnsibleError( "found role at %s, but cannot find %s or %s or %s or %s" % (role_path, task, handler, vars_file, library)) if os.path.isfile(task): nt = dict(include=pipes.quote(task), vars=role_vars, default_vars=default_vars) for k in special_keys: if k in special_vars: nt[k] = special_vars[k] new_tasks.append(nt) if os.path.isfile(handler): nt = dict(include=pipes.quote(handler), vars=role_vars) for k in special_keys: if k in special_vars: nt[k] = special_vars[k] new_handlers.append(nt) if os.path.isfile(vars_file): new_vars_files.append(vars_file) if os.path.isfile(defaults_file): defaults_files.append(defaults_file) if os.path.isdir(library): utils.plugins.module_finder.add_directory(library) tasks = ds.get('tasks', None) post_tasks = ds.get('post_tasks', None) handlers = ds.get('handlers', None) vars_files = ds.get('vars_files', None) if type(tasks) != list: tasks = [] if type(handlers) != list: handlers = [] if type(vars_files) != list: vars_files = [] if type(post_tasks) != list: post_tasks = [] new_tasks.extend(tasks) # flush handlers after tasks + role tasks new_tasks.append(dict(meta='flush_handlers')) new_tasks.extend(post_tasks) # flush handlers after post tasks new_tasks.append(dict(meta='flush_handlers')) new_handlers.extend(handlers) new_vars_files.extend(vars_files) ds['tasks'] = new_tasks ds['handlers'] = new_handlers ds['vars_files'] = new_vars_files self.default_vars = self._load_role_defaults(defaults_files) return ds
def _search_executable(self, executable): cmd = distutils.spawn.find_executable(executable) if not cmd: raise errors.AnsibleError( "%s command not found in PATH") % executable return cmd
def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None): fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) # We can't use p.communicate here because the ControlMaster may have stdout open as well stdout = '' stderr = '' rpipes = [p.stdout, p.stderr] if indata: try: stdin.write(indata) stdin.close() except: raise errors.AnsibleError( 'SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh' ) # Read stdout/stderr from process while True: rfd, wfd, efd = select.select(rpipes, [], rpipes, 1) # fail early if the sudo/su password is wrong if self.runner.sudo and sudoable: if self.runner.sudo_pass: incorrect_password = gettext.dgettext( "sudo", "Sorry, try again.") if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): raise errors.AnsibleError('Incorrect sudo password') if stdout.endswith(prompt): raise errors.AnsibleError('Missing sudo password') if self.runner.su and su and self.runner.su_pass: incorrect_password = gettext.dgettext("su", "Sorry") if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): raise errors.AnsibleError('Incorrect su password') if p.stdout in rfd: dat = os.read(p.stdout.fileno(), 9000) stdout += dat if dat == '': rpipes.remove(p.stdout) if p.stderr in rfd: dat = os.read(p.stderr.fileno(), 9000) stderr += dat if dat == '': rpipes.remove(p.stderr) # only break out if no pipes are left to read or # the pipes are completely read and # the process is terminated if (not rpipes or not rfd) and p.poll() is not None: break # No pipes are left to read but process is not yet terminated # Only then it is safe to wait for the process to be finished # NOTE: Actually p.poll() is always None here if rpipes is empty elif not rpipes and p.poll() == None: p.wait() # The process is terminated. Since no pipes to read from are # left, there is no need to call select() again. break # close stdin after process is terminated and stdout/stderr are read # completely (see also issue #848) stdin.close() return (p.returncode, stdout, stderr)
def fetch_file(self, in_path, out_path): out_path = out_path.replace('\\', '/') vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) buffer_size = 2**19 # 0.5MB chunks if not os.path.exists(os.path.dirname(out_path)): os.makedirs(os.path.dirname(out_path)) out_file = None try: offset = 0 while True: try: script = ''' If (Test-Path -PathType Leaf "%(path)s") { $stream = [System.IO.File]::OpenRead("%(path)s"); $stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null; $buffer = New-Object Byte[] %(buffer_size)d; $bytesRead = $stream.Read($buffer, 0, %(buffer_size)d); $bytes = $buffer[0..($bytesRead-1)]; [System.Convert]::ToBase64String($bytes); $stream.Close() | Out-Null; } ElseIf (Test-Path -PathType Container "%(path)s") { Write-Host "[DIR]"; } Else { Write-Error "%(path)s does not exist"; Exit 1; } ''' % dict(buffer_size=buffer_size, path=powershell._escape(in_path), offset=offset) vvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self.host) cmd_parts = powershell._encode_script(script, as_list=True) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:]) if result.status_code != 0: raise IOError(result.std_err.encode('utf-8')) if result.std_out.strip() == '[DIR]': data = None else: data = base64.b64decode(result.std_out.strip()) if data is None: if not os.path.exists(out_path): os.makedirs(out_path) break else: if not out_file: # If out_path is a directory and we're expecting a file, bail out now. if os.path.isdir(out_path): break out_file = open(out_path, 'wb') out_file.write(data) if len(data) < buffer_size: break offset += len(data) except Exception: traceback.print_exc() raise errors.AnsibleError("failed to transfer file to %s" % out_path) finally: if out_file: out_file.close()
def decrypt(key, msg): try: return key.Decrypt(msg) except key_errors.InvalidSignatureError: raise errors.AnsibleError("decryption failed")
def _executor_internal(self, host, new_stdin): ''' executes any module one or more times ''' host_variables = self.inventory.get_variables(host) host_connection = host_variables.get('ansible_connection', self.transport) if host_connection in [ 'paramiko', 'ssh' ]: port = host_variables.get('ansible_ssh_port', self.remote_port) if port is None: port = C.DEFAULT_REMOTE_PORT else: # fireball, local, etc port = self.remote_port inject = {} inject = utils.combine_vars(inject, host_variables) inject = utils.combine_vars(inject, self.module_vars) inject = utils.combine_vars(inject, self.setup_cache[host]) inject.setdefault('ansible_ssh_user', self.remote_user) inject['hostvars'] = HostVars(self.setup_cache, self.inventory) inject['group_names'] = host_variables.get('group_names', []) inject['groups'] = self.inventory.groups_list() inject['vars'] = self.module_vars inject['environment'] = self.environment if self.inventory.basedir() is not None: inject['inventory_dir'] = self.inventory.basedir() if self.inventory.src() is not None: inject['inventory_file'] = self.inventory.src() # late processing of parameterized sudo_user if self.sudo_user is not None: self.sudo_user = template.template(self.basedir, self.sudo_user, inject) # allow with_foo to work in playbooks... items = None items_plugin = self.module_vars.get('items_lookup_plugin', None) if items_plugin is not None and items_plugin in utils.plugins.lookup_loader: basedir = self.basedir if '_original_file' in inject: basedir = os.path.dirname(inject['_original_file']) filesdir = os.path.join(basedir, '..', 'files') if os.path.exists(filesdir): basedir = filesdir items_terms = self.module_vars.get('items_lookup_terms', '') items_terms = template.template(basedir, items_terms, inject) items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject) if type(items) != list: raise errors.AnsibleError("lookup plugins have to return a list: %r" % items) if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng' ]: # hack for apt, yum, and pkgng so that with_items maps back into a single module call inject['item'] = ",".join(items) items = None # logic to replace complex args if possible complex_args = self.complex_args # logic to decide how to run things depends on whether with_items is used if items is None: if isinstance(complex_args, basestring): complex_args = template.template(self.basedir, complex_args, inject, convert_bare=True) complex_args = utils.safe_eval(complex_args) if type(complex_args) != dict: raise errors.AnsibleError("args must be a dictionary, received %s" % complex_args) return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port, complex_args=complex_args) elif len(items) > 0: # executing using with_items, so make multiple calls # TODO: refactor aggregrate = {} all_comm_ok = True all_changed = False all_failed = False results = [] for x in items: inject['item'] = x # TODO: this idiom should be replaced with an up-conversion to a Jinja2 template evaluation if isinstance(complex_args, basestring): complex_args = template.template(self.basedir, complex_args, inject, convert_bare=True) complex_args = utils.safe_eval(complex_args) if type(complex_args) != dict: raise errors.AnsibleError("args must be a dictionary, received %s" % complex_args) result = self._executor_internal_inner( host, self.module_name, self.module_args, inject, port, complex_args=complex_args ) results.append(result.result) if result.comm_ok == False: all_comm_ok = False all_failed = True break for x in results: if x.get('changed') == True: all_changed = True if (x.get('failed') == True) or (('rc' in x) and (x['rc'] != 0)): all_failed = True break msg = 'All items completed' if all_failed: msg = "One or more items failed." rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg) if not all_failed: del rd_result['failed'] return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result) else: self.callbacks.on_skipped(host, None) return ReturnData(host=host, comm_ok=True, result=dict(changed=False, skipped=True))
def _load_playbook_from_file(self, path, vars={}): ''' run top level error checking on playbooks and allow them to include other playbooks. ''' playbook_data = utils.parse_yaml_from_file(path) accumulated_plays = [] play_basedirs = [] if type(playbook_data) != list: raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data)) basedir = os.path.dirname(path) or '.' utils.plugins.push_basedir(basedir) for play in playbook_data: if type(play) != dict: raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), recieved: %s" % play) if 'include' in play: # a playbook (list of plays) decided to include some other list of plays # from another file. The result is a flat list of plays in the end. tokens = shlex.split(play['include']) incvars = vars.copy() if 'vars' in play: if isinstance(play['vars'], dict): incvars.update(play['vars']) elif isinstance(play['vars'], list): for v in play['vars']: incvars.update(v) # allow key=value parameters to be specified on the include line # to set variables for t in tokens[1:]: (k,v) = t.split("=", 1) incvars[k] = template(basedir, v, incvars) included_path = utils.path_dwim(basedir, template(basedir, tokens[0], incvars)) (plays, basedirs) = self._load_playbook_from_file(included_path, incvars) for p in plays: # support for parameterized play includes works by passing # those variables along to the subservient play if 'vars' not in p: p['vars'] = {} if isinstance(p['vars'], dict): p['vars'].update(incvars) elif isinstance(p['vars'], list): # nobody should really do this, but handle vars: a=1 b=2 p['vars'].extend([dict(k=v) for k,v in incvars.iteritems()]) accumulated_plays.extend(plays) play_basedirs.extend(basedirs) else: # this is a normal (non-included play) accumulated_plays.append(play) play_basedirs.append(basedir) return (accumulated_plays, play_basedirs)
for child_name in data['children']: if child_name in groups: groups[group_name].add_child_group(groups[child_name]) for group in groups.values(): if group.depth == 0 and group.name != 'all': all.add_child_group(group) return groups def get_host_variables(self, host): """ Runs <script> --host <hostname> to determine additional host variables """ if self.host_vars_from_top is not None: got = self.host_vars_from_top.get(host.name, {}) return got cmd = [self.filename, "--host", host.name] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, e: raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (out, err) = sp.communicate() if out.strip() == '': return dict() try: return json_dict_unicode_to_bytes(utils.parse_json(out)) except ValueError: raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
def _parse(self, err): all_hosts = {} # not passing from_remote because data from CMDB is trusted self.raw = utils.parse_json(self.data) self.raw = json_dict_unicode_to_bytes(self.raw) all = Group('all') groups = dict(all=all) group = None if 'failed' in self.raw: sys.stderr.write(err + "\n") raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw) for (group_name, data) in self.raw.items(): # in Ansible 1.3 and later, a "_meta" subelement may contain # a variable "hostvars" which contains a hash for each host # if this "hostvars" exists at all then do not call --host for each # host. This is for efficiency and scripts should still return data # if called with --host for backwards compat with 1.2 and earlier. if group_name == '_meta': if 'hostvars' in data: self.host_vars_from_top = data['hostvars'] continue if group_name != all.name: group = groups[group_name] = Group(group_name) else: group = all host = None if not isinstance(data, dict): data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars elif not any(k in data for k in ('hosts','vars')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: if not isinstance(data['hosts'], list): raise errors.AnsibleError("You defined a group \"%s\" with bad " "data for the host list:\n %s" % (group_name, data)) for hostname in data['hosts']: if not hostname in all_hosts: all_hosts[hostname] = Host(hostname) host = all_hosts[hostname] group.add_host(host) if 'vars' in data: if not isinstance(data['vars'], dict): raise errors.AnsibleError("You defined a group \"%s\" with bad " "data for variables:\n %s" % (group_name, data)) for k, v in data['vars'].iteritems(): if group.name == all.name: all.set_variable(k, v) else: group.set_variable(k, v) # Separate loop to ensure all groups are defined for (group_name, data) in self.raw.items(): if group_name == '_meta': continue if isinstance(data, dict) and 'children' in data: for child_name in data['children']: if child_name in groups: groups[group_name].add_child_group(groups[child_name]) for group in groups.values(): if group.depth == 0 and group.name != 'all': all.add_child_group(group) return groups
def _update_vars_files_for_host(self, host): if type(self.vars_files) != list: self.vars_files = [self.vars_files] if host is not None: inject = {} inject.update(self.playbook.inventory.get_variables(host)) inject.update(self.playbook.SETUP_CACHE[host]) for filename in self.vars_files: if type(filename) == list: # loop over all filenames, loading the first one, and failing if # none found found = False sequence = [] for real_filename in filename: filename2 = template(self.basedir, real_filename, self.vars) filename3 = filename2 if host is not None: filename3 = template(self.basedir, filename2, inject) filename4 = utils.path_dwim(self.basedir, filename3) sequence.append(filename4) if os.path.exists(filename4): found = True data = utils.parse_yaml_from_file(filename4) if type(data) != dict: raise errors.AnsibleError( "%s must be stored as a dictionary/hash" % filename4) if host is not None: if self._has_vars_in( filename2 ) and not self._has_vars_in(filename3): # this filename has variables in it that were fact specific # so it needs to be loaded into the per host SETUP_CACHE self.playbook.SETUP_CACHE[host].update(data) self.playbook.callbacks.on_import_for_host( host, filename4) elif not self._has_vars_in(filename4): # found a non-host specific variable, load into vars and NOT # the setup cache self.vars.update(data) elif host is not None: self.playbook.callbacks.on_not_import_for_host( host, filename4) if found: break if not found and host is not None: raise errors.AnsibleError( "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)) else: # just one filename supplied, load it! filename2 = template(self.basedir, filename, self.vars) filename3 = filename2 if host is not None: filename3 = template(self.basedir, filename2, inject) filename4 = utils.path_dwim(self.basedir, filename3) if self._has_vars_in(filename4): continue new_vars = utils.parse_yaml_from_file(filename4) if new_vars: if type(new_vars) != dict: raise errors.AnsibleError( "%s must be stored as dictionary/hash: %s" % (filename4, type(new_vars))) if host is not None and self._has_vars_in( filename2) and not self._has_vars_in(filename3): # running a host specific pass and has host specific variables # load into setup cache self.playbook.SETUP_CACHE[host] = utils.combine_vars( self.playbook.SETUP_CACHE[host], new_vars) self.playbook.callbacks.on_import_for_host( host, filename4) elif host is None: # running a non-host specific pass and we can update the global vars instead self.vars = utils.combine_vars(self.vars, new_vars)
instance = utils.plugins.lookup_loader.get(name.lower(), basedir=kwargs.get( 'basedir', None)) vars = kwargs.get('vars', None) if instance is not None: # safely catch run failures per #5059 try: ran = instance.run(*args, inject=vars, **kwargs) except Exception, e: ran = None if ran: ran = ",".join(ran) return ran else: raise errors.AnsibleError("lookup plugin (%s) not found" % name) def template(basedir, varname, vars, lookup_fatal=True, depth=0, expand_lists=True, convert_bare=False, fail_on_undefined=False, filter_fatal=True): ''' templates a data structure by traversing it and substituting for other data structures ''' from ansible import utils try:
def _get_vars(self): ''' load the vars section from a play, accounting for all sorts of variable features including loading from yaml files, prompting, and conditional includes of the first file found in a list. ''' if self.vars is None: self.vars = {} if type(self.vars) not in [dict, list]: raise errors.AnsibleError( "'vars' section must contain only key/value pairs") vars = {} # translate a list of vars into a dict if type(self.vars) == list: for item in self.vars: if getattr(item, 'items', None) is None: raise errors.AnsibleError( "expecting a key-value pair in 'vars' section") k, v = item.items()[0] vars[k] = v else: vars.update(self.vars) if type(self.vars_prompt) == list: for var in self.vars_prompt: if not 'name' in var: raise errors.AnsibleError( "'vars_prompt' item is missing 'name:'") vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self.playbook.extra_vars: vars[vname] = self.playbook.callbacks.on_vars_prompt( vname, private, prompt, encrypt, confirm, salt_size, salt, default) elif type(self.vars_prompt) == dict: for (vname, prompt) in self.vars_prompt.iteritems(): prompt_msg = "%s: " % prompt if vname not in self.playbook.extra_vars: vars[vname] = self.playbook.callbacks.on_vars_prompt( varname=vname, private=False, prompt=prompt_msg, default=None) else: raise errors.AnsibleError( "'vars_prompt' section is malformed, see docs") if type(self.playbook.extra_vars) == dict: vars.update(self.playbook.extra_vars) return vars
def __init__(self, playbook, ds, basedir): ''' constructor loads from a play datastructure ''' for x in ds.keys(): if not x in Play.VALID_KEYS: raise errors.AnsibleError( "%s is not a legal parameter in an Ansible Playbook" % x) # allow all playbook keys to be set by --extra-vars self.vars = ds.get('vars', {}) self.vars_prompt = ds.get('vars_prompt', {}) self.playbook = playbook self.vars = self._get_vars() self.basedir = basedir self.roles = ds.get('roles', None) self.tags = ds.get('tags', None) if self.tags is None: self.tags = [] elif type(self.tags) in [str, unicode]: self.tags = self.tags.split(",") elif type(self.tags) != list: self.tags = [] # We first load the vars files from the datastructure # so we have the default variables to pass into the roles self.vars_files = ds.get('vars_files', []) self._update_vars_files_for_host(None) # now we load the roles into the datastructure self.included_roles = [] ds = self._load_roles(self.roles, ds) # and finally re-process the vars files as they may have # been updated by the included roles self.vars_files = ds.get('vars_files', []) self._update_vars_files_for_host(None) # template everything to be efficient, but do not pre-mature template # tasks/handlers as they may have inventory scope overrides _tasks = ds.pop('tasks', []) _handlers = ds.pop('handlers', []) ds = template(basedir, ds, self.vars) ds['tasks'] = _tasks ds['handlers'] = _handlers self._ds = ds hosts = ds.get('hosts') if hosts is None: raise errors.AnsibleError('hosts declaration is required') elif isinstance(hosts, list): hosts = ';'.join(hosts) self.serial = int(ds.get('serial', 0)) self.hosts = hosts self.name = ds.get('name', self.hosts) self._tasks = ds.get('tasks', []) self._handlers = ds.get('handlers', []) self.remote_user = ds.get('user', self.playbook.remote_user) self.remote_port = ds.get('port', self.playbook.remote_port) self.sudo = ds.get('sudo', self.playbook.sudo) self.sudo_user = ds.get('sudo_user', self.playbook.sudo_user) self.transport = ds.get('connection', self.playbook.transport) self.gather_facts = ds.get('gather_facts', None) self.remote_port = self.remote_port self.any_errors_fatal = utils.boolean( ds.get('any_errors_fatal', 'false')) self.accelerate = utils.boolean(ds.get('accelerate', 'false')) self.accelerate_port = ds.get('accelerate_port', None) self.max_fail_pct = int(ds.get('max_fail_percentage', 100)) load_vars = {} if self.playbook.inventory.basedir() is not None: load_vars['inventory_dir'] = self.playbook.inventory.basedir() self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars) self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars) if self.sudo_user != 'root': self.sudo = True
def _load_tasks(self, tasks, vars={}, default_vars={}, sudo_vars={}, additional_conditions=[], original_file=None): ''' handle task and handler include statements ''' results = [] if tasks is None: # support empty handler files, and the like. tasks = [] for x in tasks: if not isinstance(x, dict): raise errors.AnsibleError("expecting dict; got: %s" % x) # evaluate sudo vars for current and child tasks included_sudo_vars = {} for k in ["sudo", "sudo_user"]: if k in x: included_sudo_vars[k] = x[k] elif k in sudo_vars: included_sudo_vars[k] = sudo_vars[k] x[k] = sudo_vars[k] if 'meta' in x: if x['meta'] == 'flush_handlers': results.append(Task(self, x)) continue task_vars = self.vars.copy() task_vars.update(vars) if original_file: task_vars['_original_file'] = original_file if 'include' in x: tokens = shlex.split(str(x['include'])) items = [''] included_additional_conditions = list(additional_conditions) for k in x: if k.startswith("with_"): plugin_name = k[5:] if plugin_name not in utils.plugins.lookup_loader: raise errors.AnsibleError( "cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name)) terms = template(self.basedir, x[k], task_vars) items = utils.plugins.lookup_loader.get( plugin_name, basedir=self.basedir, runner=None).run(terms, inject=task_vars) elif k.startswith("when_"): included_additional_conditions.insert( 0, utils.compile_when_to_only_if("%s %s" % (k[5:], x[k]))) elif k == 'when': included_additional_conditions.insert( 0, utils.compile_when_to_only_if("jinja2_compare %s" % x[k])) elif k in ("include", "vars", "default_vars", "only_if", "sudo", "sudo_user"): pass else: raise errors.AnsibleError( "parse error: task includes cannot be used with other directives: %s" % k) default_vars = utils.combine_vars(self.default_vars, x.get('default_vars', {})) if 'vars' in x: task_vars = utils.combine_vars(task_vars, x['vars']) if 'only_if' in x: included_additional_conditions.append(x['only_if']) for item in items: mv = task_vars.copy() mv['item'] = item for t in tokens[1:]: (k, v) = t.split("=", 1) mv[k] = template(self.basedir, v, mv) dirname = self.basedir if original_file: dirname = os.path.dirname(original_file) include_file = template(dirname, tokens[0], mv) include_filename = utils.path_dwim(dirname, include_file) data = utils.parse_yaml_from_file(include_filename) results += self._load_tasks(data, mv, default_vars, included_sudo_vars, included_additional_conditions, original_file=include_filename) elif type(x) == dict: results.append( Task(self, x, module_vars=task_vars, default_vars=default_vars, additional_conditions=additional_conditions)) else: raise Exception("unexpected task type") for x in results: if self.tags is not None: x.tags.extend(self.tags) return results
def add_group(self, group): if group.name not in self.groups_list(): self.groups.append(group) self._groups_list = None # invalidate internal cache else: raise errors.AnsibleError("group already in inventory: %s" % group.name)
def template_from_file(basedir, path, vars): ''' run a file through the templating engine ''' from ansible import utils realpath = utils.path_dwim(basedir, path) loader=jinja2.FileSystemLoader([basedir,os.path.dirname(realpath)]) def my_lookup(*args, **kwargs): kwargs['vars'] = vars return lookup(*args, basedir=basedir, **kwargs) environment = jinja2.Environment(loader=loader, trim_blocks=True, extensions=_get_extensions()) environment.filters.update(_get_filters()) environment.globals['lookup'] = my_lookup try: data = codecs.open(realpath, encoding="utf8").read() except UnicodeDecodeError: raise errors.AnsibleError("unable to process as utf-8: %s" % realpath) except: raise errors.AnsibleError("unable to read %s" % realpath) # Get jinja env overrides from template if data.startswith(JINJA2_OVERRIDE): eol = data.find('\n') line = data[len(JINJA2_OVERRIDE):eol] data = data[eol+1:] for pair in line.split(','): (key,val) = pair.split(':') setattr(environment,key.strip(),val.strip()) environment.template_class = J2Template t = environment.from_string(data) vars = vars.copy() try: template_uid = pwd.getpwuid(os.stat(realpath).st_uid).pw_name except: template_uid = os.stat(realpath).st_uid vars['template_host'] = os.uname()[1] vars['template_path'] = realpath vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(realpath)) vars['template_uid'] = template_uid vars['template_fullpath'] = os.path.abspath(realpath) vars['template_run_date'] = datetime.datetime.now() managed_default = C.DEFAULT_MANAGED_STR managed_str = managed_default.format( host = vars['template_host'], uid = vars['template_uid'], file = vars['template_path'] ) vars['ansible_managed'] = time.strftime( managed_str, time.localtime(os.path.getmtime(realpath)) ) # This line performs deep Jinja2 magic that uses the _jinja2_vars object for vars # Ideally, this could use some API where setting shared=True and the object won't get # passed through dict(o), but I have not found that yet. res = jinja2.utils.concat(t.root_render_func(t.new_context(_jinja2_vars(basedir, vars, t.globals), shared=True))) if data.endswith('\n') and not res.endswith('\n'): res = res + '\n' return template(basedir, res, vars)
def _execute_module(self, conn, tmp, module_name, args, async_jid=None, async_module=None, async_limit=None, inject=None, persist_files=False, complex_args=None): ''' runs a module that has already been transferred ''' # hack to support fireball mode if module_name == 'fireball': args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host)))) if 'port' not in args: args += " port=%s" % C.ZEROMQ_PORT (remote_module_path, module_style, shebang) = self._copy_module(conn, tmp, module_name, args, inject, complex_args) environment_string = self._compute_environment_string(inject) cmd_mod = "" if self.sudo and self.sudo_user != 'root': # deal with possible umask issues once sudo'ed to other user cmd_chmod = "chmod a+r %s" % remote_module_path self._low_level_exec_command(conn, cmd_chmod, tmp, sudoable=False) cmd = "" if module_style != 'new': if 'CHECKMODE=True' in args: # if module isn't using AnsibleModuleCommon infrastructure we can't be certain it knows how to # do --check mode, so to be safe we will not run it. return ReturnData(conn=conn, result=dict(skippped=True, msg="cannot yet run check mode against old-style modules")) args = template.template(self.basedir, args, inject) # decide whether we need to transfer JSON or key=value argsfile = None if module_style == 'non_native_want_json': if complex_args: complex_args.update(utils.parse_kv(args)) argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(complex_args)) else: argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(utils.parse_kv(args))) else: argsfile = self._transfer_str(conn, tmp, 'arguments', args) if async_jid is None: cmd = "%s %s" % (remote_module_path, argsfile) else: cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]]) else: if async_jid is None: cmd = "%s" % (remote_module_path) else: cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]]) if not shebang: raise errors.AnsibleError("module is missing interpreter line") cmd = " ".join([environment_string.strip(), shebang.replace("#!","").strip(), cmd]) cmd = cmd.strip() if tmp.find("tmp") != -1 and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files: if not self.sudo or self.sudo_user == 'root': # not sudoing or sudoing to root, so can cleanup files in the same step cmd = cmd + "; rm -rf %s >/dev/null 2>&1" % tmp res = self._low_level_exec_command(conn, cmd, tmp, sudoable=True) if self.sudo and self.sudo_user != 'root': # not sudoing to root, so maybe can't delete files as that other user # have to clean up temp files as original user in a second step if tmp.find("tmp") != -1 and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files: cmd2 = "rm -rf %s >/dev/null 2>&1" % tmp self._low_level_exec_command(conn, cmd2, tmp, sudoable=False) data = utils.parse_json(res['stdout']) if 'parsed' in data and data['parsed'] == False: data['msg'] += res['stderr'] return ReturnData(conn=conn, result=data)
def _executor_internal(self, host): ''' executes any module one or more times ''' host_variables = self.inventory.get_variables(host) host_connection = host_variables.get('ansible_connection', self.transport) if host_connection in ['paramiko', 'ssh']: port = host_variables.get('ansible_ssh_port', self.remote_port) if port is None: port = C.DEFAULT_REMOTE_PORT else: # fireball, local, etc port = self.remote_port inject = {} inject = utils.combine_vars(inject, host_variables) inject = utils.combine_vars(inject, self.module_vars) inject = utils.combine_vars(inject, self.setup_cache[host]) inject['hostvars'] = HostVars(self.setup_cache, self.inventory) inject['group_names'] = host_variables.get('group_names', []) inject['groups'] = self.inventory.groups_list() inject['vars'] = self.module_vars inject['environment'] = self.environment if self.inventory.basedir() is not None: inject['inventory_dir'] = self.inventory.basedir() # allow with_foo to work in playbooks... items = None items_plugin = self.module_vars.get('items_lookup_plugin', None) if items_plugin is not None and items_plugin in utils.plugins.lookup_loader: items_terms = self.module_vars.get('items_lookup_terms', '') items_terms = template.template(self.basedir, items_terms, inject) items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=self.basedir).run( items_terms, inject=inject) if type(items) != list: raise errors.AnsibleError( "lookup plugins have to return a list: %r" % items) if len(items) and utils.is_list_of_strings( items) and self.module_name in ['apt', 'yum']: # hack for apt and soon yum, with_items maps back into a single module call inject['item'] = ",".join(items) items = None # logic to decide how to run things depends on whether with_items is used if items is None: return self._executor_internal_inner( host, self.module_name, self.module_args, inject, port, complex_args=self.complex_args) elif len(items) > 0: # executing using with_items, so make multiple calls # TODO: refactor aggregrate = {} all_comm_ok = True all_changed = False all_failed = False results = [] for x in items: inject['item'] = x result = self._executor_internal_inner( host, self.module_name, self.module_args, inject, port, complex_args=self.complex_args) results.append(result.result) if result.comm_ok == False: all_comm_ok = False all_failed = True break for x in results: if x.get('changed') == True: all_changed = True if (x.get('failed') == True) or (('rc' in x) and (x['rc'] != 0)): all_failed = True break msg = 'All items completed' if all_failed: msg = "One or more items failed." rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg) if not all_failed: del rd_result['failed'] return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result) else: self.callbacks.on_skipped(host, None) return ReturnData(host=host, comm_ok=True, result=dict(skipped=True))
def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None): ''' decides how to invoke a module ''' # allow module args to work as a dictionary # though it is usually a string new_args = "" if type(module_args) == dict: for (k,v) in module_args.iteritems(): new_args = new_args + "%s='%s' " % (k,v) module_args = new_args # module_name may be dynamic (but cannot contain {{ ansible_ssh_user }}) module_name = template.template(self.basedir, module_name, inject) if module_name in utils.plugins.action_loader: if self.background != 0: raise errors.AnsibleError("async mode is not supported with the %s module" % module_name) handler = utils.plugins.action_loader.get(module_name, self) elif self.background == 0: handler = utils.plugins.action_loader.get('normal', self) else: handler = utils.plugins.action_loader.get('async', self) if type(self.conditional) != list: self.conditional = [ self.conditional ] for cond in self.conditional: if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): result = utils.jsonify(dict(changed=False, skipped=True)) self.callbacks.on_skipped(host, inject.get('item',None)) return ReturnData(host=host, result=result) if getattr(handler, 'setup', None) is not None: handler.setup(module_name, inject) conn = None actual_host = inject.get('ansible_ssh_host', host) # allow ansible_ssh_host to be templated actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True) actual_port = port actual_user = inject.get('ansible_ssh_user', self.remote_user) actual_pass = inject.get('ansible_ssh_pass', self.remote_pass) actual_transport = inject.get('ansible_connection', self.transport) actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file) if actual_transport in [ 'paramiko', 'ssh' ]: actual_port = inject.get('ansible_ssh_port', port) # the delegated host may have different SSH port configured, etc # and we need to transfer those, and only those, variables delegate_to = inject.get('delegate_to', None) if delegate_to is not None: delegate_to = template.template(self.basedir, delegate_to, inject) inject = inject.copy() interpreters = [] for i in inject: if i.startswith("ansible_") and i.endswith("_interpreter"): interpreters.append(i) for i in interpreters: del inject[i] port = C.DEFAULT_REMOTE_PORT try: delegate_info = inject['hostvars'][delegate_to] actual_host = delegate_info.get('ansible_ssh_host', delegate_to) # allow ansible_ssh_host to be templated actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True) actual_port = delegate_info.get('ansible_ssh_port', port) actual_user = delegate_info.get('ansible_ssh_user', actual_user) actual_pass = delegate_info.get('ansible_ssh_pass', actual_pass) actual_private_key_file = delegate_info.get('ansible_ssh_private_key_file', self.private_key_file) actual_transport = delegate_info.get('ansible_connection', self.transport) for i in delegate_info: if i.startswith("ansible_") and i.endswith("_interpreter"): inject[i] = delegate_info[i] except errors.AnsibleError: actual_host = delegate_to actual_port = port # user/pass may still contain variables at this stage actual_user = template.template(self.basedir, actual_user, inject) actual_pass = template.template(self.basedir, actual_pass, inject) # make actual_user available as __magic__ ansible_ssh_user variable inject['ansible_ssh_user'] = actual_user try: if actual_port is not None: actual_port = int(actual_port) except ValueError, e: result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port) return ReturnData(host=host, comm_ok=False, result=result)
def _run_task(self, pattern=None, task=None, remote_user=None, handlers=None, conditional=False, sudo=False, sudo_user=None, transport=None, port=None): ''' run a single task in the playbook and recursively run any subtasks. ''' # load the module name and parameters from the task entry name = task.get('name', None) action = task.get('action', None) if action is None: raise errors.AnsibleError( "action is required for each item in tasks: offending task is %s" % name if name else "unknown") if name is None: name = action only_if = task.get('only_if', 'True') async_seconds = int(task.get('async', 0)) # not async by default async_poll_interval = int(task.get('poll', 10)) # default poll = 10 seconds tokens = action.split(None, 1) module_name = tokens[0] module_args = '' if len(tokens) > 1: module_args = tokens[1] # include task specific vars module_vars = task.get('vars', {}) if 'first_available_file' in task: module_vars['first_available_file'] = task.get( 'first_available_file') # tasks can be direct (run on all nodes matching # the pattern) or conditional, where they ran # as the result of a change handler on a subset # of all of the hosts self.callbacks.on_task_start(name, conditional) # load up an appropriate ansible runner to # run the task in parallel results = self._run_module(pattern, module_name, module_args, module_vars, remote_user, async_seconds, async_poll_interval, only_if, sudo, sudo_user, transport, port) # add facts to the global setup cache for host, result in results['contacted'].iteritems(): if "ansible_facts" in result: for k, v in result['ansible_facts'].iteritems(): SETUP_CACHE[host][k] = v self.stats.compute(results) # if no hosts are matched, carry on, unlike /bin/ansible # which would warn you about this if results is None: results = {} # flag which notify handlers need to be run # this will be on a SUBSET of the actual host list. For instance # a file might need to be written on only half of the nodes so # we would only trigger restarting Apache on half of the nodes subtasks = task.get('notify', []) if isinstance(subtasks, basestring): subtasks = [subtasks] if len(subtasks) > 0: for host, results in results.get('contacted', {}).iteritems(): if results.get('changed', False): for subtask in subtasks: self._flag_handler(handlers, subtask, host)
import shlex import inspect import traceback import urlparse from ansible import errors from ansible import utils from ansible.callbacks import vvv, vvvv, verbose from ansible.runner.shell_plugins import powershell from ansible.utils.unicode import to_bytes try: from winrm import Response from winrm.exceptions import WinRMTransportError from winrm.protocol import Protocol except ImportError: raise errors.AnsibleError("winrm is not installed") HAVE_KERBEROS = False try: import kerberos HAVE_KERBEROS = True except ImportError: pass def vvvvv(msg, host=None): verbose(msg, host=host, caplevel=4) class Connection(object): '''WinRM connections over HTTP/HTTPS.'''
def _run_play(self, pg): ''' run a list of tasks for a given pattern, in order ''' # get configuration information about the pattern pattern = pg.get('hosts') if pattern is None: raise errors.AnsibleError('hosts declaration is required') if isinstance(pattern, list): pattern = ';'.join(pattern) pattern = utils.template(pattern, self.extra_vars, {}) name = pg.get('name', pattern) vars = self._get_vars(pg, self.basedir) vars_files = pg.get('vars_files', {}) tasks = pg.get('tasks', []) handlers = pg.get('handlers', []) user = pg.get('user', self.remote_user) port = pg.get('port', self.remote_port) sudo = pg.get('sudo', self.sudo) sudo_user = pg.get('sudo_user', self.sudo_user) transport = pg.get('connection', self.transport) # the default sudo user is root, so if you change it, sudo is implied if sudo_user != 'root': sudo = True self.callbacks.on_play_start(name) # push any variables down to the system # and get facts/ohai/other data back up self._do_setup_step(pattern, vars, user, port, sudo, sudo_user, transport, None) # now with that data, handle contentional variable file imports! if len(vars_files) > 0: self._do_setup_step(pattern, vars, user, port, sudo, sudo_user, transport, vars_files) # run all the top level tasks, these get run on every node for task in tasks: self._run_task(pattern=pattern, task=task, handlers=handlers, remote_user=user, sudo=sudo, sudo_user=sudo_user, transport=transport, port=port) # handlers only run on certain nodes, they are flagged by _flag_handlers # above. They only run on nodes when things mark them as changed, and # handlers only get run once. For instance, the system is designed # such that multiple config files if changed can ask for an Apache restart # but Apache will only be restarted once (at the end). for task in handlers: triggered_by = task.get('run', None) if type(triggered_by) == list: self.inventory.restrict_to(triggered_by) self._run_task(pattern=pattern, task=task, handlers=[], conditional=True, remote_user=user, sudo=sudo, sudo_user=sudo_user, transport=transport, port=port) self.inventory.lift_restriction()
def _winrm_connect(self): ''' Establish a WinRM connection over HTTP/HTTPS. ''' # get winrm-specific connection vars host_vars = self.runner.inventory._hosts_cache[ self.delegate].get_variables() port = self.port or 5986 vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \ (self.user, port, self.host), host=self.host) netloc = '%s:%d' % (self.host, port) exc = None for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']: if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self.user): continue if transport == 'kerberos': realm = self.user.split('@', 1)[1].strip() or None else: realm = None endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', '')) self._winrm_kwargs = dict(username=self.user, password=self.password, realm=realm) argspec = inspect.getargspec(Protocol.__init__) for arg in argspec.args: if arg in ('self', 'endpoint', 'transport', 'username', 'password', 'realm'): continue if 'ansible_winrm_%s' % arg in host_vars: self._winrm_kwargs[arg] = host_vars['ansible_winrm_%s' % arg] vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self.host) protocol = Protocol(endpoint, transport=transport, **self._winrm_kwargs) try: protocol.send_message('') return protocol except WinRMTransportError, exc: err_msg = str(exc) if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I): raise errors.AnsibleError( "the connection attempt timed out") m = re.search(r'Code\s+?(\d{3})', err_msg) if m: code = int(m.groups()[0]) if code == 401: raise errors.AnsibleError( "the username/password specified for this server was incorrect" ) elif code == 411: return protocol vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host) continue
def __init__(self, host_list=C.DEFAULT_HOST_LIST): # the host file file, or script path, or list of hosts # if a list, inventory data will NOT be loaded self.host_list = host_list # caching to avoid repeated calculations, particularly with # external inventory scripts. self._vars_per_host = {} self._vars_per_group = {} self._hosts_cache = {} self._groups_list = {} self._pattern_cache = {} # to be set by calling set_playbook_basedir by ansible-playbook self._playbook_basedir = None # the inventory object holds a list of groups self.groups = [] # a list of host(names) to contain current inquiries to self._restriction = None self._also_restriction = None self._subset = None if isinstance(host_list, basestring): if "," in host_list: host_list = host_list.split(",") host_list = [h for h in host_list if h and h.strip()] if host_list is None: self.parser = None elif isinstance(host_list, list): self.parser = None all = Group('all') self.groups = [all] ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?') for x in host_list: m = ipv6_re.match(x) if m: all.add_host(Host(m.groups()[0], m.groups()[1])) else: if ":" in x: tokens = x.rsplit(":", 1) # if there is ':' in the address, then this is a ipv6 if ':' in tokens[0]: all.add_host(Host(x)) else: all.add_host(Host(tokens[0], tokens[1])) else: all.add_host(Host(x)) elif os.path.exists(host_list): if os.path.isdir(host_list): # Ensure basedir is inside the directory self.host_list = os.path.join(self.host_list, "") self.parser = InventoryDirectory(filename=host_list) self.groups = self.parser.groups.values() elif utils.is_executable(host_list): self.parser = InventoryScript(filename=host_list) self.groups = self.parser.groups.values() else: self.parser = InventoryParser(filename=host_list) self.groups = self.parser.groups.values() utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True) else: raise errors.AnsibleError( "Unable to find an inventory file, specify one with -i ?") self._vars_plugins = [x for x in utils.plugins.vars_loader.all(self)]
def compile_when_to_only_if(expression): ''' when is a shorthand for writing only_if conditionals. It requires less quoting magic. only_if is retained for backwards compatibility. ''' # when: set $variable # when: unset $variable # when: failed $json_result # when: changed $json_result # when: int $x >= $z and $y < 3 # when: int $x in $alist # when: float $x > 2 and $y <= $z # when: str $x != $y # when: jinja2_compare asdf # implies {{ asdf }} if type(expression) not in [str, unicode]: raise errors.AnsibleError("invalid usage of when_ operator: %s" % expression) tokens = expression.split() if len(tokens) < 2: raise errors.AnsibleError("invalid usage of when_ operator: %s" % expression) # when_set / when_unset if tokens[0] in ['set', 'unset']: tcopy = tokens[1:] for (i, t) in enumerate(tokens[1:]): if t.find("$") != -1: tcopy[i] = "is_%s('''%s''')" % (tokens[0], t) else: tcopy[i] = t return " ".join(tcopy) # when_failed / when_changed elif tokens[0] in ['failed', 'changed']: tcopy = tokens[1:] for (i, t) in enumerate(tokens[1:]): if t.find("$") != -1: tcopy[i] = "is_%s(%s)" % (tokens[0], t) else: tcopy[i] = t return " ".join(tcopy) # when_integer / when_float / when_string elif tokens[0] in ['integer', 'float', 'string']: cast = None if tokens[0] == 'integer': cast = 'int' elif tokens[0] == 'string': cast = 'str' elif tokens[0] == 'float': cast = 'float' tcopy = tokens[1:] for (i, t) in enumerate(tokens[1:]): #if re.search(t, r"^\w"): # bare word will turn into Jinja2 so all the above # casting is really not needed #tcopy[i] = "%s('''%s''')" % (cast, t) t2 = t.strip() if (t2[0].isalpha() or t2[0] == '$') and cast == 'str' and t2 != 'in': tcopy[i] = "'%s'" % (t) else: tcopy[i] = t result = " ".join(tcopy) return result # when_boolean elif tokens[0] in ['bool', 'boolean']: tcopy = tokens[1:] for (i, t) in enumerate(tcopy): if t.find("$") != -1: tcopy[ i] = "(is_set('''%s''') and '''%s'''.lower() not in ('false', 'no', 'n', 'none', '0', ''))" % ( t, t) return " ".join(tcopy) # the stock 'when' without qualification (new in 1.2), assumes Jinja2 terms elif tokens[0] == 'jinja2_compare': # a Jinja2 evaluation that results in something Python can eval! presented = "{% if " + " ".join( tokens[1:]).strip() + " %} True {% else %} False {% endif %}" return presented else: raise errors.AnsibleError("invalid usage of when_ operator: %s" % expression)
def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): # the host file file, or script path, or list of hosts # if a list, inventory data will NOT be loaded self.host_list = host_list self._vault_password=vault_password # caching to avoid repeated calculations, particularly with # external inventory scripts. self._vars_per_host = {} self._vars_per_group = {} self._hosts_cache = {} self._groups_list = {} self._pattern_cache = {} # to be set by calling set_playbook_basedir by playbook code self._playbook_basedir = None # the inventory object holds a list of groups self.groups = [] # a list of host(names) to contain current inquiries to self._restriction = None self._also_restriction = None self._subset = None if isinstance(host_list, basestring): if "," in host_list: host_list = host_list.split(",") host_list = [ h for h in host_list if h and h.strip() ] if host_list is None: self.parser = None elif isinstance(host_list, list): self.parser = None all = Group('all') self.groups = [ all ] ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?') for x in host_list: m = ipv6_re.match(x) if m: all.add_host(Host(m.groups()[0], m.groups()[1])) else: if ":" in x: tokens = x.rsplit(":", 1) # if there is ':' in the address, then this is an ipv6 if ':' in tokens[0]: all.add_host(Host(x)) else: all.add_host(Host(tokens[0], tokens[1])) else: all.add_host(Host(x)) elif os.path.exists(host_list): if os.path.isdir(host_list): # Ensure basedir is inside the directory self.host_list = os.path.join(self.host_list, "") self.parser = InventoryDirectory(filename=host_list) self.groups = self.parser.groups.values() else: # check to see if the specified file starts with a # shebang (#!/), so if an error is raised by the parser # class we can show a more apropos error shebang_present = False try: inv_file = open(host_list) first_line = inv_file.readlines()[0] inv_file.close() if first_line.startswith('#!'): shebang_present = True except: pass if utils.is_executable(host_list): try: self.parser = InventoryScript(filename=host_list) self.groups = self.parser.groups.values() except: if not shebang_present: raise errors.AnsibleError("The file %s is marked as executable, but failed to execute correctly. " % host_list + \ "If this is not supposed to be an executable script, correct this with `chmod -x %s`." % host_list) else: raise else: try: self.parser = InventoryParser(filename=host_list) self.groups = self.parser.groups.values() except: if shebang_present: raise errors.AnsibleError("The file %s looks like it should be an executable inventory script, but is not marked executable. " % host_list + \ "Perhaps you want to correct this with `chmod +x %s`?" % host_list) else: raise utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True) else: raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?") self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ] # get group vars from group_vars/ files and vars plugins for group in self.groups: group.vars = utils.combine_vars(group.vars, self.get_group_variables(group.name, vault_password=self._vault_password)) # get host vars from host_vars/ files and vars plugins for host in self.get_hosts(): host.vars = utils.combine_vars(host.vars, self.get_host_variables(host.name, vault_password=self._vault_password))
def _update_vars_files_for_host(self, host): if type(self.vars_files) != list: self.vars_files = [self.vars_files] if (host is not None): self.playbook.SETUP_CACHE[host].update(self.vars) inventory = self.playbook.inventory hostrec = inventory.get_host(host) groupz = sorted(inventory.groups_for_host(host), key=lambda g: g.depth) groups = [g.name for g in groupz] basedir = inventory.basedir() if basedir is not None: for x in groups: path = os.path.join(basedir, "group_vars/%s" % x) if os.path.exists(path): data = utils.parse_yaml_from_file(path) if type(data) != dict: raise errors.AnsibleError( "%s must be stored as a dictionary/hash" % path) self.playbook.SETUP_CACHE[host].update(data) path = os.path.join(basedir, "host_vars/%s" % hostrec.name) if os.path.exists(path): data = utils.parse_yaml_from_file(path) if type(data) != dict: raise errors.AnsibleError( "%s must be stored as a dictionary/hash" % path) self.playbook.SETUP_CACHE[host].update(data) for filename in self.vars_files: if type(filename) == list: # loop over all filenames, loading the first one, and failing if # none found found = False sequence = [] for real_filename in filename: filename2 = utils.template(real_filename, self.vars) filename3 = filename2 if host is not None: filename3 = utils.template( filename2, self.playbook.SETUP_CACHE[host]) filename4 = utils.path_dwim(self.basedir, filename3) sequence.append(filename4) if os.path.exists(filename4): found = True data = utils.parse_yaml_from_file(filename4) if type(data) != dict: raise errors.AnsibleError( "%s must be stored as a dictionary/hash" % filename4) if host is not None: if self._has_vars_in( filename2 ) and not self._has_vars_in(filename3): # this filename has variables in it that were fact specific # so it needs to be loaded into the per host SETUP_CACHE self.playbook.SETUP_CACHE[host].update(data) self.playbook.callbacks.on_import_for_host( host, filename4) elif not self._has_vars_in(filename4): # found a non-host specific variable, load into vars and NOT # the setup cache self.vars.update(data) elif host is not None: self.playbook.callbacks.on_not_import_for_host( host, filename4) if found: break if not found: raise errors.AnsibleError( "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)) else: # just one filename supplied, load it! filename2 = utils.template(filename, self.vars) filename3 = filename2 if host is not None: filename3 = utils.template(filename2, self.playbook.SETUP_CACHE[host]) filename4 = utils.path_dwim(self.basedir, filename3) if self._has_vars_in(filename4): return new_vars = utils.parse_yaml_from_file(filename4) if new_vars: if type(new_vars) != dict: raise errors.AnsibleError( "%s must be stored as dictonary/hash: %s" % filename4) if host is not None and self._has_vars_in( filename2) and not self._has_vars_in(filename3): # running a host specific pass and has host specific variables # load into setup cache self.playbook.SETUP_CACHE[host].update(new_vars) elif host is None: # running a non-host specific pass and we can update the global vars instead self.vars.update(new_vars)
def get_variables(self, hostname, update_cached=False, vault_password=None): host = self.get_host(hostname) if not host: raise errors.AnsibleError("host not found: %s" % hostname) return host.get_variables()
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=False): ''' run a command on the remote host ''' ssh_cmd = self._password_cmd() ssh_cmd += ["ssh", "-C"] if not in_data: # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python # inside a tty automatically invokes the python interactive-mode but the modules are not # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines) ssh_cmd += ["-tt"] if utils.VERBOSITY > 3: ssh_cmd += ["-vvv"] else: ssh_cmd += ["-q"] ssh_cmd += self.common_args if self.ipv6: ssh_cmd += ['-6'] ssh_cmd += [self.host] if su and su_user: sudocmd, prompt, success_key = utils.make_su_cmd( su_user, executable, cmd) ssh_cmd.append(sudocmd) elif not self.runner.sudo or not sudoable: prompt = None if executable: ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd)) else: ssh_cmd.append(cmd) else: sudocmd, prompt, success_key = utils.make_sudo_cmd( sudo_user, executable, cmd) ssh_cmd.append(sudocmd) vvv("EXEC %s" % ssh_cmd, host=self.host) not_in_host_file = self.not_in_host_file(self.host) if C.HOST_KEY_CHECKING and not_in_host_file: # lock around the initial SSH connectivity so the user prompt about whether to add # the host to known hosts is not intermingled with multiprocess output. fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX) fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX) # create process (p, stdin) = self._run(ssh_cmd, in_data) self._send_password() if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \ (self.runner.su and su and self.runner.su_pass): # several cases are handled for sudo privileges with password # * NOPASSWD (tty & no-tty): detect success_key on stdout # * without NOPASSWD: # * detect prompt on stdout (tty) # * detect prompt on stderr (no-tty) fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) sudo_output = '' sudo_errput = '' while True: if success_key in sudo_output or \ (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \ (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)): break rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self.runner.timeout) if p.stderr in rfd: chunk = p.stderr.read() if not chunk: raise errors.AnsibleError( 'ssh connection closed waiting for sudo or su password prompt' ) sudo_errput += chunk incorrect_password = gettext.dgettext( "sudo", "Sorry, try again.") if sudo_errput.strip().endswith( "%s%s" % (prompt, incorrect_password)): raise errors.AnsibleError('Incorrect sudo password') elif sudo_errput.endswith(prompt): stdin.write(self.runner.sudo_pass + '\n') if p.stdout in rfd: chunk = p.stdout.read() if not chunk: raise errors.AnsibleError( 'ssh connection closed waiting for sudo or su password prompt' ) sudo_output += chunk if not rfd: # timeout. wrap up process communication stdout = p.communicate() raise errors.AnsibleError( 'ssh connection error waiting for sudo or su password prompt' ) if success_key not in sudo_output: if sudoable: stdin.write(self.runner.sudo_pass + '\n') elif su: stdin.write(self.runner.su_pass + '\n') (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt) if C.HOST_KEY_CHECKING and not_in_host_file: # lock around the initial SSH connectivity so the user prompt about whether to add # the host to known hosts is not intermingled with multiprocess output. fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN) fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN) controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \ 'unknown configuration option: ControlPersist' in stderr if C.HOST_KEY_CHECKING: if ssh_cmd[0] == "sshpass" and p.returncode == 6: raise errors.AnsibleError( 'Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.' ) if p.returncode != 0 and controlpersisterror: raise errors.AnsibleError( 'using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again' ) if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'): raise errors.AnsibleError( 'SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh' ) return (p.returncode, '', stdout, stderr)
def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0): # this number is arbitrary, but it seems sane if level > 20: raise errors.AnsibleError( "too many levels of recursion while resolving role dependencies" ) for role in roles: role_path, role_vars = self._get_role_path(role) role_vars = utils.combine_vars(passed_vars, role_vars) vars = self._resolve_main( utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))) vars_data = {} if os.path.isfile(vars): vars_data = utils.parse_yaml_from_file(vars) if vars_data: role_vars = utils.combine_vars(vars_data, role_vars) defaults = self._resolve_main( utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))) defaults_data = {} if os.path.isfile(defaults): defaults_data = utils.parse_yaml_from_file(defaults) # the meta directory contains the yaml that should # hold the list of dependencies (if any) meta = self._resolve_main( utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))) if os.path.isfile(meta): data = utils.parse_yaml_from_file(meta) if data: dependencies = data.get('dependencies', []) for dep in dependencies: allow_dupes = False (dep_path, dep_vars) = self._get_role_path(dep) meta = self._resolve_main( utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta'))) if os.path.isfile(meta): meta_data = utils.parse_yaml_from_file(meta) if meta_data: allow_dupes = utils.boolean( meta_data.get('allow_duplicates', '')) if not allow_dupes: if dep in self.included_roles: continue else: self.included_roles.append(dep) dep_vars = utils.combine_vars(passed_vars, dep_vars) dep_vars = utils.combine_vars(role_vars, dep_vars) vars = self._resolve_main( utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars'))) vars_data = {} if os.path.isfile(vars): vars_data = utils.parse_yaml_from_file(vars) if vars_data: dep_vars = utils.combine_vars( vars_data, dep_vars) defaults = self._resolve_main( utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults'))) dep_defaults_data = {} if os.path.isfile(defaults): dep_defaults_data = utils.parse_yaml_from_file( defaults) if 'role' in dep_vars: del dep_vars['role'] self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level + 1) dep_stack.append( [dep, dep_path, dep_vars, dep_defaults_data]) # only add the current role when we're at the top level, # otherwise we'll end up in a recursive loop if level == 0: self.included_roles.append(role) dep_stack.append([role, role_path, role_vars, defaults_data]) return dep_stack