def exec_command(self, cmd, in_data=None, sudoable=True): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) cmd_parts = shlex.split(to_bytes(cmd), posix=False) cmd_parts = map(to_unicode, cmd_parts) script = None cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or '' # Support running .ps1 files (via script/raw). if cmd_ext == '.ps1': script = '& %s' % cmd # Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8. elif cmd_ext in ('.bat', '.cmd'): script = '[System.Console]::OutputEncoding = [System.Text.Encoding]::Default; & %s' % cmd # Encode the command if not already encoded; supports running simple PowerShell commands via raw. elif '-EncodedCommand' not in cmd_parts: script = cmd if script: cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False) if '-EncodedCommand' in cmd_parts: encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1] decoded_cmd = to_unicode(base64.b64decode(encoded_cmd).decode('utf-16-le')) self._display.vvv("EXEC %s" % decoded_cmd, host=self._winrm_host) else: self._display.vvv("EXEC %s" % cmd, host=self._winrm_host) try: result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True) except Exception as e: traceback.print_exc() raise AnsibleError("failed to exec cmd %s" % cmd) result.std_out = to_unicode(result.std_out) result.std_err = to_unicode(result.std_err) return (result.status_code, result.std_out, result.std_err)
def _winrm_connect(self): ''' Establish a WinRM connection over HTTP/HTTPS. ''' display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host) netloc = '%s:%d' % (self._winrm_host, self._winrm_port) endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', '')) errors = [] for transport in self._winrm_transport: if transport == 'kerberos' and not HAVE_KERBEROS: errors.append('kerberos: the python kerberos library is not installed') continue display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host) try: protocol = Protocol(endpoint, transport=transport, **self._winrm_kwargs) protocol.send_message('') return protocol except Exception as e: err_msg = to_unicode(e).strip() if re.search(to_unicode(r'Operation\s+?timed\s+?out'), err_msg, re.I): raise AnsibleError('the connection attempt timed out') m = re.search(to_unicode(r'Code\s+?(\d{3})'), err_msg) if m: code = int(m.groups()[0]) if code == 401: err_msg = 'the username/password specified for this server was incorrect' elif code == 411: return protocol errors.append(u'%s: %s' % (transport, err_msg)) display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_unicode(traceback.format_exc())), host=self._winrm_host) if errors: raise AnsibleError(', '.join(map(to_str, errors))) else: raise AnsibleError('No transport found for WinRM connection')
def modify_module(module_name, module_path, module_args, task_vars=dict(), module_compression='ZIP_STORED'): """ Used to insert chunks of code into modules before transfer rather than doing regular python imports. This allows for more efficient transfer in a non-bootstrapping scenario by not moving extra files over the wire and also takes care of embedding arguments in the transferred modules. This version is done in such a way that local imports can still be used in the module code, so IDEs don't have to be aware of what is going on. Example: from ansible.module_utils.basic import * ... will result in the insertion of basic.py into the module from the module_utils/ directory in the source tree. All modules are required to import at least basic, though there will also be other snippets. For powershell, there's equivalent conventions like this: # POWERSHELL_COMMON which results in the inclusion of the common code from powershell.ps1 """ with open(module_path, 'rb') as f: # read in the module source module_data = f.read() (module_data, module_style, shebang) = _find_snippet_imports(module_name, module_data, module_path, module_args, task_vars, module_compression) if module_style == 'binary': return (module_data, module_style, to_unicode(shebang, nonstring='passthru')) elif shebang is None: lines = module_data.split(b"\n", 1) if lines[0].startswith(b"#!"): shebang = lines[0].strip() args = shlex.split(str(shebang[2:])) interpreter = args[0] interpreter = to_bytes(interpreter) new_shebang = to_bytes(_get_shebang(interpreter, task_vars, args[1:])[0], errors='strict', nonstring='passthru') if new_shebang: lines[0] = shebang = new_shebang if os.path.basename(interpreter).startswith(b'python'): lines.insert(1, to_bytes(ENCODING_STRING)) else: # No shebang, assume a binary module? pass module_data = b"\n".join(lines) else: shebang = to_bytes(shebang, errors='strict') return (module_data, module_style, to_unicode(shebang, nonstring='passthru'))
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=C.DEFAULT_EXECUTABLE, encoding_errors='replace'): ''' This is the function which executes the low level shell command, which may be commands to create/remove directories for temporary files, or to run the module code or python directly when pipelining. :kwarg encoding_errors: If the value returned by the command isn't utf-8 then we have to figure out how to transform it to unicode. If the value is just going to be displayed to the user (or discarded) then the default of 'replace' is fine. If the data is used as a key or is going to be written back out to a file verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' display.debug("_low_level_execute_command(): starting") if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) display.debug("_low_level_execute_command(): no command, exiting") return dict(stdout='', stderr='') allow_same_user = C.BECOME_ALLOW_SAME_USER same_user = self._play_context.become_user == self._play_context.remote_user if sudoable and self._play_context.become and (allow_same_user or not same_user): display.debug("_low_level_execute_command(): using become for this command") cmd = self._play_context.make_become_cmd(cmd, executable=executable) if executable is not None and self._connection.allow_executable: cmd = executable + ' -c ' + pipes.quote(cmd) display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type if isinstance(stdout, binary_type): out = to_unicode(stdout, errors=encoding_errors) elif not isinstance(stdout, text_type): out = to_unicode(b''.join(stdout.readlines()), errors=encoding_errors) else: out = stdout if isinstance(stderr, binary_type): err = to_unicode(stderr, errors=encoding_errors) elif not isinstance(stderr, text_type): err = to_unicode(b''.join(stderr.readlines()), errors=encoding_errors) else: err = stderr if rc is None: rc = 0 # be sure to remove the BECOME-SUCCESS message now out = self._strip_success_message(out) display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr)) return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)
def run(self): ''' Called when the process is started. Pushes the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' #import cProfile, pstats, StringIO #pr = cProfile.Profile() #pr.enable() if HAS_ATFORK: atfork() try: # execute the task and build a TaskResult from the result display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) executor_result = TaskExecutor( self._host, self._task, self._task_vars, self._play_context, self._new_stdin, self._loader, self._shared_loader_obj, self._rslt_q ).run() display.debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, executor_result) # put the result on the result queue display.debug("sending task result") self._rslt_q.put(task_result) display.debug("done sending task result") except AnsibleConnectionFailure: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except Exception as e: if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, dict(failed=True, exception=to_unicode(traceback.format_exc()), stdout='')) self._rslt_q.put(task_result, block=False) except: display.debug(u"WORKER EXCEPTION: %s" % to_unicode(e)) display.debug(u"WORKER TRACEBACK: %s" % to_unicode(traceback.format_exc())) display.debug("WORKER PROCESS EXITING")
def _get_diff(self, difflist): if not isinstance(difflist, list): difflist = [difflist] ret = [] for diff in difflist: try: with warnings.catch_warnings(): warnings.simplefilter('ignore') if 'dst_binary' in diff: ret.append("diff skipped: destination file appears to be binary\n") if 'src_binary' in diff: ret.append("diff skipped: source file appears to be binary\n") if 'dst_larger' in diff: ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger']) if 'src_larger' in diff: ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger']) if 'before' in diff and 'after' in diff: # format complex structures into 'files' for x in ['before', 'after']: if isinstance(diff[x], dict): diff[x] = json.dumps(diff[x], sort_keys=True, indent=4) if 'before_header' in diff: before_header = "before: %s" % diff['before_header'] else: before_header = 'before' if 'after_header' in diff: after_header = "after: %s" % diff['after_header'] else: after_header = 'after' differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), fromfile=before_header, tofile=after_header, fromfiledate='', tofiledate='', n=C.DIFF_CONTEXT) has_diff = False for line in differ: has_diff = True if line.startswith('+'): line = stringc(line, C.COLOR_DIFF_ADD) elif line.startswith('-'): line = stringc(line, C.COLOR_DIFF_REMOVE) elif line.startswith('@@'): line = stringc(line, C.COLOR_DIFF_LINES) ret.append(line) if has_diff: ret.append('\n') if 'prepared' in diff: ret.append(to_unicode(diff['prepared'])) except UnicodeDecodeError: ret.append(">> the files are different, but the diff library cannot compare unicode strings\n\n") return u''.join(ret)
def display(obj, result): msg = "" result = result._result display = obj._display.display wrap_width = 77 first = obj.first_host and obj.first_item failed = "failed" in result or "unreachable" in result # Only display msg if debug module or if failed (some modules have undesired 'msg' on 'ok') if "msg" in result and (failed or obj.action == "debug"): msg = result.pop("msg", "") # Disable Ansible's verbose setting for debug module to avoid the CallbackBase._dump_results() if "_ansible_verbose_always" in result: del result["_ansible_verbose_always"] # Display additional info when failed if failed: items = ( item for item in ["reason", "module_stderr", "module_stdout", "stderr"] if item in result and to_unicode(result[item]) != "" ) for item in items: msg = result[item] if msg == "" else "\n".join([msg, result.pop(item, "")]) # Add blank line between this fail message and the json dump Ansible displays next msg = "\n".join([msg, ""]) # Must pass unicode strings to Display.display() to prevent UnicodeError tracebacks if isinstance(msg, list): msg = "\n".join([to_unicode(x) for x in msg]) elif not isinstance(msg, unicode): msg = to_unicode(msg) # Wrap text msg = "\n".join([textwrap.fill(line, wrap_width, replace_whitespace=False) for line in msg.splitlines()]) # Display system info and msg, with horizontal rule between hosts/items hr = "-" * int(wrap_width * 0.67) if obj.task_failed and first: display(system(obj.vagrant_version), "bright gray") display(hr, "bright gray") if msg == "": if obj.task_failed and not first: display(hr, "bright gray") else: return else: if not first: display(hr, "bright gray") display(msg, "red" if failed else "bright purple")
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False): """ Display a message to the user Note: msg *must* be a unicode string to prevent UnicodeError tracebacks. """ # FIXME: this needs to be implemented #msg = utils.sanitize_output(msg) nocolor = msg if color: msg = stringc(msg, color) if not log_only: if not msg.endswith(u'\n'): msg2 = msg + u'\n' else: msg2 = msg msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr)) if sys.version_info >= (3,): # Convert back to text string on python3 # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_unicode(msg2, self._output_encoding(stderr=stderr)) if not stderr: fileobj = sys.stdout else: fileobj = sys.stderr fileobj.write(msg2) try: fileobj.flush() except IOError as e: # Ignore EPIPE in case fileobj has been prematurely closed, eg. # when piping to "head -n1" if e.errno != errno.EPIPE: raise if logger and not screen_only: msg2 = nocolor.lstrip(u'\n') msg2 = to_bytes(msg2) if sys.version_info >= (3,): # Convert back to text string on python3 # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_unicode(msg2, self._output_encoding(stderr=stderr)) if color == C.COLOR_ERROR: logger.error(msg2) else: logger.info(msg2)
def run(self): ''' Called when the process is started, and loops indefinitely until an error is encountered (typically an IOerror from the queue pipe being disconnected). During the loop, we attempt to pull tasks off the job queue and run them, pushing the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' if HAS_ATFORK: atfork() try: # execute the task and build a TaskResult from the result debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) executor_result = TaskExecutor( self._host, self._task, self._task_vars, self._play_context, self._new_stdin, self._loader, self._shared_loader_obj, ).run() debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, executor_result) # put the result on the result queue debug("sending task result") self._rslt_q.put(task_result) debug("done sending task result") except AnsibleConnectionFailure: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except Exception as e: if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, dict(failed=True, exception=to_unicode(traceback.format_exc()), stdout='')) self._rslt_q.put(task_result, block=False) except: debug(u"WORKER EXCEPTION: %s" % to_unicode(e)) debug(u"WORKER TRACEBACK: %s" % to_unicode(traceback.format_exc())) debug("WORKER PROCESS EXITING")
def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False, return_results=False): """ Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel to the inventory base directory or in the same directory as the playbook. Variables in the playbook dir will win over the inventory dir if files are in both. """ results = {} scan_pass = 0 _basedir = self._basedir _playbook_basedir = self._playbook_basedir # look in both the inventory base directory and the playbook base directory # unless we do an update for a new playbook base dir if not new_pb_basedir and _playbook_basedir: basedirs = [_basedir, _playbook_basedir] else: basedirs = [_basedir] for basedir in basedirs: # this can happen from particular API usages, particularly if not run # from /usr/bin/ansible-playbook if basedir in ('', None): basedir = './' scan_pass = scan_pass + 1 # it's not an eror if the directory does not exist, keep moving if not os.path.exists(basedir): continue # save work of second scan if the directories are the same if _basedir == _playbook_basedir and scan_pass != 1: continue # Before trying to load vars from file, check that the directory contains relvant file names if host is None and any(map(lambda ext: group.name + ext in self._group_vars_files, C.YAML_FILENAME_EXTENSIONS)): # load vars in dir/group_vars/name_of_group base_path = to_unicode(os.path.abspath(os.path.join(to_bytes(basedir), b"group_vars/" + to_bytes(group.name))), errors='strict') host_results = self._variable_manager.add_group_vars_file(base_path, self._loader) if return_results: results = combine_vars(results, host_results) elif group is None and any(map(lambda ext: host.name + ext in self._host_vars_files, C.YAML_FILENAME_EXTENSIONS)): # same for hostvars in dir/host_vars/name_of_host base_path = to_unicode(os.path.abspath(os.path.join(to_bytes(basedir), b"host_vars/" + to_bytes(host.name))), errors='strict') group_results = self._variable_manager.add_host_vars_file(base_path, self._loader) if return_results: results = combine_vars(results, group_results) # all done, results is a dictionary of variables for this particular host. return results
def display(obj, result): msg = '' result = result._result display = obj._display.display wrap_width = 77 first = obj.first_host and obj.first_item failed = 'failed' in result or 'unreachable' in result # Only display msg if debug module or if failed (some modules have undesired 'msg' on 'ok') if 'msg' in result and (failed or obj.action == 'debug'): msg = result.pop('msg', '') # Disable Ansible's verbose setting for debug module to avoid the CallbackBase._dump_results() if '_ansible_verbose_always' in result: del result['_ansible_verbose_always'] # Display additional info when failed if failed: items = (item for item in ['module_stderr', 'module_stdout', 'stderr'] if item in result and to_unicode(result[item]) != '') for item in items: msg = result[item] if msg == '' else '\n'.join([msg, result.pop(item, '')]) # Add blank line between this fail message and the json dump Ansible displays next msg = '\n'.join([msg, '']) # Must pass unicode strings to Display.display() to prevent UnicodeError tracebacks if isinstance(msg, list): msg = '\n'.join([to_unicode(x) for x in msg]) elif not isinstance(msg, unicode): msg = to_unicode(msg) # Wrap text msg = '\n'.join([textwrap.fill(line, wrap_width, replace_whitespace=False) for line in msg.splitlines()]) # Display system info and msg, with horizontal rule between hosts/items hr = '-' * int(wrap_width*.67) if obj.task_failed and first: display(system(obj.vagrant_version), 'bright gray') display(hr, 'bright gray') if msg == '': if obj.task_failed and not first: display(hr, 'bright gray') else: return else: if not first: display(hr, 'bright gray') display(msg, 'red' if failed else 'bright purple')
def path_dwim(self, given): ''' make relative paths work like folks expect. ''' given = unquote(given) given = to_unicode(given, errors='strict') if given.startswith(u"/"): return os.path.abspath(given) elif given.startswith(u"~"): return os.path.abspath(os.path.expanduser(given)) else: basedir = to_unicode(self._basedir, errors='strict') return os.path.abspath(os.path.join(basedir, given))
def main(self, *args, **kwargs): """ Main command-line execution loop. """ display.debug("starting run") display.display(" ", log_only=True) display.display(" ".join(sys.argv), log_only=True) display.display(" ", log_only=True) try: kwargs['standalone_mode'] = False result = super(AbleCLI, self).main(*args, **kwargs) return result except AnsibleOptionsError as e: # cli.parser.print_help() display.error(to_unicode(e), wrap_text=False) sys.exit(5) except AnsibleParserError as e: display.error(to_unicode(e), wrap_text=False) sys.exit(4) # TQM takes care of these, but leaving comment to reserve the exit codes # except AnsibleHostUnreachable as e: # display.error(str(e)) # sys.exit(3) # except AnsibleHostFailed as e: # display.error(str(e)) # sys.exit(2) except AnsibleError as e: display.error(to_unicode(e), wrap_text=False) sys.exit(1) except ClickException as e: display.error(e.format_message()) sys.exit(e.exit_code) except Abort: display.display("...aborting") display.error("User aborted") sys.exit(95) except KeyboardInterrupt: display.error("User interrupted execution") sys.exit(99) except Exception as e: # have_cli_options = cli is not None and cli.options is not None display.error("Unexpected Exception: %s" % to_unicode(e), wrap_text=False) # if not have_cli_options or have_cli_options and cli.options.verbosity > 2: display.display(u"the full traceback was:\n\n%s" % to_unicode(traceback.format_exc())) # else: display.display("to see the full traceback, use -vvv") sys.exit(250)
def _get_file_contents(self, file_name): ''' Reads the file contents from the given file name, and will decrypt them if they are found to be vault-encrypted. ''' if not file_name or not isinstance(file_name, string_types): raise AnsibleParserError("Invalid filename: '%s'" % str(file_name)) b_file_name = to_bytes(file_name) if not self.path_exists(b_file_name) or not self.is_file(b_file_name): raise AnsibleFileNotFound("the file_name '%s' does not exist, or is not readable" % file_name) show_content = True try: with open(b_file_name, 'rb') as f: data = f.read() if self._vault.is_encrypted(data): data = self._vault.decrypt(data, filename=b_file_name) show_content = False data = to_unicode(data, errors='strict') return (data, show_content) except (IOError, OSError) as e: raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, str(e)))
def test_decrypt_1_1(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v11_file = tempfile.NamedTemporaryFile(delete=False) with v11_file as f: f.write(to_bytes(v11_data)) ve = VaultEditor(None, "ansible", v11_file.name) # make sure the password functions for the cipher error_hit = False try: ve.decrypt_file() except errors.AnsibleError as e: error_hit = True # verify decrypted content f = open(v11_file.name, "rb") fdata = to_unicode(f.read()) f.close() os.unlink(v11_file.name) assert error_hit == False, "error decrypting 1.0 file" assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def decrypt(self, data, password): # SPLIT SALT, DIGEST, AND DATA data = b''.join(data.split(b"\n")) data = unhexlify(data) salt, cryptedHmac, cryptedData = data.split(b"\n", 2) salt = unhexlify(salt) cryptedData = unhexlify(cryptedData) key1, key2, iv = self.gen_key_initctr(password, salt) # EXIT EARLY IF DIGEST DOESN'T MATCH hmacDecrypt = HMAC.new(key2, cryptedData, SHA256) if not self.is_equal(cryptedHmac, to_bytes(hmacDecrypt.hexdigest())): return None # SET THE COUNTER AND THE CIPHER ctr = Counter.new(128, initial_value=int(iv, 16)) cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) # DECRYPT PADDED DATA decryptedData = cipher.decrypt(cryptedData) # UNPAD DATA try: padding_length = ord(decryptedData[-1]) except TypeError: padding_length = decryptedData[-1] decryptedData = decryptedData[:-padding_length] return to_unicode(decryptedData)
def decrypt(self, data): data = to_bytes(data) if self.password is None: raise errors.AnsibleError("A vault password must be specified to decrypt data") if not self.is_encrypted(data): raise errors.AnsibleError("data is not encrypted") # clean out header data = self._split_header(data) # create the cipher object ciphername = to_unicode(self.cipher_name) if 'Vault' + ciphername in globals() and ciphername in CIPHER_WHITELIST: cipher = globals()['Vault' + ciphername] this_cipher = cipher() else: raise errors.AnsibleError("{} cipher could not be found".format(ciphername)) # try to unencrypt data data = this_cipher.decrypt(data, self.password) if data is None: raise errors.AnsibleError("Decryption failed") return data
def _handle_template(self): src = self._task.args.get('src') working_path = self._get_working_path() if os.path.isabs(src) or urlparse.urlsplit('src').scheme: source = src else: source = self._loader.path_dwim_relative(working_path, 'templates', src) if not source: source = self._loader.path_dwim_relative(working_path, src) if not os.path.exists(source): return try: with open(source, 'r') as f: template_data = to_unicode(f.read()) except IOError: return dict(failed=True, msg='unable to load src file') # Create a template search path in the following order: # [working_path, self_role_path, dependent_role_paths, dirname(source)] searchpath = [working_path] if self._task._role is not None: searchpath.append(self._task._role._role_path) dep_chain = self._task._block.get_dep_chain() if dep_chain is not None: for role in dep_chain: searchpath.append(role._role_path) searchpath.append(os.path.dirname(source)) self._templar.environment.loader.searchpath = searchpath self._task.args['src'] = self._templar.template(template_data)
def run(self): ''' The main executor entrypoint, where we determine if the specified task requires looping and either runs the task with ''' debug("in run()") try: # lookup plugins need to know if this task is executing from # a role, so that it can properly find files/templates/etc. roledir = None if self._task._role: roledir = self._task._role._role_path self._job_vars['roledir'] = roledir items = self._get_loop_items() if items is not None: if len(items) > 0: item_results = self._run_loop(items) # loop through the item results, and remember the changed/failed # result flags based on any item there. changed = False failed = False for item in item_results: if 'changed' in item: changed = True if 'failed' in item: failed = True # create the overall result item, and set the changed/failed # flags there to reflect the overall result of the loop res = dict(results=item_results) if changed: res['changed'] = True if failed: res['failed'] = True res['msg'] = 'One or more items failed' else: res['msg'] = 'All items completed' else: res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) else: debug("calling self._execute()") res = self._execute() debug("_execute() done") # make sure changed is set in the result, if it's not present if 'changed' not in res: res['changed'] = False debug("dumping result to json") result = json.dumps(res) debug("done dumping result, returning") return result except AnsibleError, e: return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
def encrypt(self, data): data = to_unicode(data) if self.is_encrypted(data): raise errors.AnsibleError("data is already encrypted") if not self.cipher_name: self.cipher_name = "AES256" # raise errors.AnsibleError("the cipher must be set before encrypting data") if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: cipher = globals()['Vault' + self.cipher_name] this_cipher = cipher() else: raise errors.AnsibleError("{} cipher could not be found".format(self.cipher_name)) """ # combine sha + data this_sha = sha256(data).hexdigest() tmp_data = this_sha + "\n" + data """ # encrypt sha + data enc_data = this_cipher.encrypt(data, self.password) # add header tmp_data = self._add_header(enc_data) return tmp_data
def run(self, terms, variables, **kwargs): convert_data_p = kwargs.get('convert_data', True) basedir = self.get_basedir(variables) ret = [] for term in terms: display.debug("File lookup term: %s" % term) lookupfile = self._loader.path_dwim_relative(basedir, 'templates', term) display.vvvv("File lookup using %s as file" % lookupfile) if lookupfile and os.path.exists(lookupfile): with open(lookupfile, 'r') as f: template_data = to_unicode(f.read()) searchpath = [self._loader._basedir, os.path.dirname(lookupfile)] if 'role_path' in variables: searchpath.insert(1, C.DEFAULT_ROLES_PATH) searchpath.insert(1, variables['role_path']) self._templar.environment.loader.searchpath = searchpath res = self._templar.template(template_data, preserve_trailing_newlines=True,convert_data=convert_data_p) ret.append(res) else: raise AnsibleError("the template file %s could not be found for the lookup" % term) return ret
def _remote_checksum(self, path, all_vars, follow=False): ''' Produces a remote checksum given a path, Returns a number 0-4 for specific errors instead of checksum, also ensures it is different 0 = unknown error 1 = file does not exist, this might not be an error 2 = permissions issue 3 = its a directory, not a file 4 = stat module failed, likely due to not finding python ''' x = "0" # unknown error has occured try: remote_stat = self._execute_remote_stat(path, all_vars, follow=follow) if remote_stat['exists'] and remote_stat['isdir']: x = "3" # its a directory not a file else: x = remote_stat['checksum'] # if 1, file is missing except AnsibleError as e: errormsg = to_unicode(e) if errormsg.endswith('Permission denied'): x = "2" # cannot read file elif errormsg.endswith('MODULE FAILURE'): x = "4" # python not found or module uncaught exception finally: return x
def run(self): if self.options.verbosity > 0: if C.CONFIG_FILE: display.display(u"Using %s as config file" % to_unicode(C.CONFIG_FILE)) else: display.display(u"No config file found; using defaults")
def _split_ssh_args(argstring): """ Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to the argument list. The list will not contain any empty elements. """ try: # Python 2.6.x shlex doesn't handle unicode type so we have to # convert args to byte string for that case. More efficient to # try without conversion first but python2.6 doesn't throw an # exception, it merely mangles the output: # >>> shlex.split(u't e') # ['t\x00\x00\x00', '\x00\x00\x00e\x00\x00\x00'] return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] except AttributeError: return [to_unicode(x.strip()) for x in shlex.split(argstring) if x.strip()]
def _get_loop_items(self): ''' Loads a lookup plugin to handle the with_* portion of a task (if specified), and returns the items result. ''' # save the play context variables to a temporary dictionary, # so that we can modify the job vars without doing a full copy # and later restore them to avoid modifying things too early play_context_vars = dict() self._play_context.update_vars(play_context_vars) old_vars = dict() for k in play_context_vars.keys(): if k in self._job_vars: old_vars[k] = self._job_vars[k] self._job_vars[k] = play_context_vars[k] templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars) items = None if self._task.loop: if self._task.loop in self._shared_loader_obj.lookup_loader: #TODO: remove convert_bare true and deprecate this in with_ if self._task.loop == 'first_found': # first_found loops are special. If the item is undefined # then we want to fall through to the next value rather # than failing. loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False, convert_bare=True) loop_terms = [t for t in loop_terms if not templar._contains_vars(t)] else: try: loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: if u'has no attribute' in to_unicode(e): loop_terms = [] display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.") else: raise items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=self._job_vars) else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) # now we restore any old job variables that may have been modified, # and delete them if they were in the play context vars but not in # the old variables dictionary for k in play_context_vars.keys(): if k in old_vars: self._job_vars[k] = old_vars[k] else: del self._job_vars[k] if items: from ansible.vars.unsafe_proxy import UnsafeProxy for idx, item in enumerate(items): if item is not None and not isinstance(item, UnsafeProxy): items[idx] = UnsafeProxy(item) return items
def test_decrypt_1_0(self): """ Skip testing decrypting 1.0 files if we don't have access to AES, KDF or Counter, or we are running on python3 since VaultAES hasn't been backported. """ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or sys.version > '3': raise SkipTest v10_file = tempfile.NamedTemporaryFile(delete=False) with v10_file as f: f.write(to_bytes(v10_data)) ve = VaultEditor(None, "ansible", v10_file.name) # make sure the password functions for the cipher error_hit = False try: ve.decrypt_file() except errors.AnsibleError as e: error_hit = True # verify decrypted content f = open(v10_file.name, "rb") fdata = to_unicode(f.read()) f.close() os.unlink(v10_file.name) assert error_hit == False, "error decrypting 1.0 file" assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def _split_args(argstring): """ Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to the argument list. The list will not contain any empty elements. """ return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()]
def run(self, terms, variables, **kwargs): convert_data_p = kwargs.get('convert_data', True) ret = [] for term in terms: display.debug("File lookup term: %s" % term) lookupfile = self.find_file_in_search_path(variables, 'templates', term) display.vvvv("File lookup using %s as file" % lookupfile) if lookupfile: with open(lookupfile, 'r') as f: template_data = to_unicode(f.read()) # set jinja2 internal search path for includes if 'ansible_search_path' in variables: searchpath = variables['ansible_search_path'] else: searchpath = [self._loader._basedir, os.path.dirname(lookupfile)] self._templar.environment.loader.searchpath = searchpath # do the templating res = self._templar.template(template_data, preserve_trailing_newlines=True,convert_data=convert_data_p) ret.append(res) else: raise AnsibleError("the template file %s could not be found for the lookup" % term) return ret
def __getitem__(self, varname): if varname not in self._templar._available_variables: if varname in self._locals: return self._locals[varname] for i in self._extras: if varname in i: return i[varname] if varname in self._globals: return self._globals[varname] else: raise KeyError("undefined variable: %s" % varname) variable = self._templar._available_variables[varname] # HostVars is special, return it as-is, as is the special variable # 'vars', which contains the vars structure from ansible.vars.hostvars import HostVars if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars): return variable else: value = None try: value = self._templar.template(variable) except Exception as e: raise type(e)(to_unicode(variable) + ': ' + e.message) return value
def __init__(self, loader, groups=None, filename=C.DEFAULT_HOST_LIST): if groups is None: groups = dict() self._loader = loader self.groups = groups # Support inventory scripts that are not prefixed with some # path information but happen to be in the current working # directory when '.' is not in PATH. self.filename = os.path.abspath(filename) cmd = [ self.filename, "--list" ] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (stdout, stderr) = sp.communicate() if sp.returncode != 0: raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) # make sure script output is unicode so that json loader will output # unicode strings itself try: self.data = to_unicode(stdout, errors="strict") except Exception as e: raise AnsibleError("inventory data from {0} contained characters that cannot be interpreted as UTF-8: {1}".format(to_str(self.filename), to_str(e))) # see comment about _meta below self.host_vars_from_top = None self._parse(stderr)
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) context_validation_error = None try: # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override( task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, if the remote_addr is not set # default to using the host's address field as the remote address if not self._play_context.remote_addr: self._play_context.remote_addr = self._host.address # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) except AnsibleError as e: # save the error, which we'll raise later if we don't end up # skipping this task during the conditional evaluation step context_validation_error = e # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): display.debug("when evaluation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log) except AnsibleError: # skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags if self._task.action != 'include': raise # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: raise context_validation_error # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': include_variables = self._task.args.copy() include_file = include_variables.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_variables=include_variables) # Now we do final validation on the task, which sets all fields to their final values. self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): display.deprecated( "Using variables for task params is unsafe, especially if the variables come from an external source like facts" ) variable_params.update(self._task.args) self._task.args = variable_params # get the connection and the handler for this execution if not self._connection or not getattr( self._connection, 'connected', False ) or self._play_context.remote_addr != self._connection._play_context.remote_addr: self._connection = self._get_connection(variables=variables, templar=templar) self._connection.set_host_overrides(host=self._host) else: # if connection is reused, its _play_context is no longer valid and needs # to be replaced with the one templated above, in case other data changed self._connection._play_context = self._play_context self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token) # Read some values from the task, so that we can modify them if need be if self._task.until: retries = self._task.retries if retries is None: retries = 3 else: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() display.debug("starting attempt loop") result = None for attempt in range(1, retries + 1): display.debug("running the handler") try: result = self._handler.run(task_vars=variables) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=to_unicode(e)) display.debug("handler run complete") # preserve no log result["_ansible_no_log"] = self._play_context.no_log # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = wrap_var(result.copy()) if self._task. async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result try: if 'skipped' in result and result[ 'skipped'] or 'failed' in result and result[ 'failed']: return result result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: return dict( failed=True, msg=u"The async task did not return valid JSON: %s" % to_unicode(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) # ensure no log is preserved result["_ansible_no_log"] = self._play_context.no_log # helper methods for use below in evaluating changed/failed_when def _evaluate_changed_when_result(result): if self._task.changed_when is not None and self._task.changed_when: cond = Conditional(loader=self._loader) cond.when = self._task.changed_when result['changed'] = cond.evaluate_conditional( templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when: cond = Conditional(loader=self._loader) cond.when = self._task.failed_when failed_when_result = cond.evaluate_conditional( templar, vars_copy) result['failed_when_result'] = result[ 'failed'] = failed_when_result else: failed_when_result = False return failed_when_result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) # set the failed property if the result has a non-zero rc. This will be # overridden below if the failed_when property is set if result.get('rc', 0) != 0: result['failed'] = True # if we didn't skip this task, use the helpers to evaluate the changed/ # failed_when properties if 'skipped' not in result: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) if retries > 1: cond = Conditional(loader=self._loader) cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): break else: # no conditional check, or it failed, so sleep for the specified time if attempt < retries: result['attempts'] = attempt result['_ansible_retry'] = True result['retries'] = retries display.debug('Retrying task, attempt %d of %d' % (attempt, retries)) self._rslt_q.put(TaskResult(self._host, self._task, result), block=False) time.sleep(delay)
def _get_connection(self, variables, templar): ''' Reads the connection property for the host, and returns the correct connection object from the list of connection plugins ''' if self._task.delegate_to is not None: # since we're delegating, we don't want to use interpreter values # which would have been set for the original target host for i in variables.keys(): if isinstance(i, string_types) and i.startswith( 'ansible_') and i.endswith('_interpreter'): del variables[i] # now replace the interpreter values with those that may have come # from the delegated-to host delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()) if isinstance(delegated_vars, dict): for i in delegated_vars: if isinstance(i, string_types) and i.startswith( "ansible_") and i.endswith("_interpreter"): variables[i] = delegated_vars[i] conn_type = self._play_context.connection if conn_type == 'smart': conn_type = 'ssh' if sys.platform.startswith( 'darwin') and self._play_context.password: # due to a current bug in sshpass on OSX, which can trigger # a kernel panic even for non-privileged users, we revert to # paramiko on that OS when a SSH password is specified conn_type = "paramiko" else: # see if SSH can support ControlPersist if not use paramiko try: cmd = subprocess.Popen(['ssh', '-o', 'ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = cmd.communicate() err = to_unicode(err) if u"Bad configuration option" in err or u"Usage:" in err: conn_type = "paramiko" except OSError: conn_type = "paramiko" connection = self._shared_loader_obj.connection_loader.get( conn_type, self._play_context, self._new_stdin) if not connection: raise AnsibleError("the connection plugin '%s' was not found" % conn_type) if self._play_context.accelerate: # accelerate is deprecated as of 2.1... display.deprecated( 'Accelerated mode is deprecated. Consider using SSH with ControlPersist and pipelining enabled instead' ) # launch the accelerated daemon here ssh_connection = connection handler = self._shared_loader_obj.action_loader.get( 'normal', task=self._task, connection=ssh_connection, play_context=self._play_context, loader=self._loader, templar=templar, shared_loader_obj=self._shared_loader_obj, ) key = key_for_hostname(self._play_context.remote_addr) accelerate_args = dict( password=base64.b64encode(key.__str__()), port=self._play_context.accelerate_port, minutes=C.ACCELERATE_DAEMON_TIMEOUT, ipv6=self._play_context.accelerate_ipv6, debug=self._play_context.verbosity, ) connection = self._shared_loader_obj.connection_loader.get( 'accelerate', self._play_context, self._new_stdin) if not connection: raise AnsibleError("the connection plugin '%s' was not found" % conn_type) try: connection._connect() except AnsibleConnectionFailure: display.debug('connection failed, fallback to accelerate') res = handler._execute_module(module_name='accelerate', module_args=accelerate_args, task_vars=variables, delete_remote_tmp=False) display.debug(res) connection._connect() return connection
def _load_included_file(self, included_file, iterator, is_handler=False): ''' Loads an included YAML file of tasks, applying the optional set of variables. ''' display.debug("loading included file: %s" % included_file._filename) try: data = self._loader.load_from_file(included_file._filename) if data is None: return [] elif not isinstance(data, list): raise AnsibleError("included task files must contain a list of tasks") block_list = load_list_of_blocks( data, play=included_file._task._block._play, parent_block=included_file._task._block, task_include=included_file._task, role=included_file._task._role, use_handlers=is_handler, loader=self._loader ) # since we skip incrementing the stats when the task result is # first processed, we do so now for each host in the list for host in included_file._hosts: self._tqm._stats.increment('ok', host.name) except AnsibleError as e: # mark all of the hosts including this file as failed, send callbacks, # and increment the stats for this host for host in included_file._hosts: tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_unicode(e))) iterator.mark_host_failed(host) self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) self._tqm.send_callback('v2_runner_on_failed', tr) return [] # set the vars for this task from those specified as params to the include for b in block_list: # first make a copy of the including task, so that each has a unique copy to modify # FIXME: not sure if this is the best way to fix this, as we might be losing # information in the copy. Previously we assigned the include params to # the block variables directly, which caused other problems, so we may # need to figure out a third option if this also presents problems. b._task_include = b._task_include.copy(exclude_block=True) # then we create a temporary set of vars to ensure the variable reference is unique temp_vars = b._task_include.vars.copy() temp_vars.update(included_file._args.copy()) # pop tags out of the include args, if they were specified there, and assign # them to the include. If the include already had tags specified, we raise an # error so that users know not to specify them both ways tags = temp_vars.pop('tags', []) if isinstance(tags, string_types): tags = [ tags ] if len(tags) > 0: if len(b._task_include.tags) > 0: raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement", obj=included_file._task._ds) display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option") b._task_include.tags = tags b._task_include.vars = temp_vars # finally, send the callback and return the list of blocks loaded self._tqm.send_callback('v2_playbook_on_include', included_file) display.debug("done processing included file") return block_list
def run(self): # Note: slightly wrong, this is written so that implicit localhost # Manage passwords sshpass = None becomepass = None vault_pass = None passwords = {} # don't deal with privilege escalation or passwords when we don't need to if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax: self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = {'conn_pass': sshpass, 'become_pass': becomepass} if self.options.vault_password_file: # read vault_pass from a file vault_pass = read_vault_file(self.options.vault_password_file) elif self.options.ask_vault_pass: vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0] loader = DataLoader(vault_password=vault_pass) extra_vars = {} for extra_vars_opt in self.options.extra_vars: extra_vars_opt = to_unicode(extra_vars_opt, errors='strict') if extra_vars_opt.startswith(u"@"): # Argument is a YAML file (JSON is a subset of YAML) data = loader.load_from_file(extra_vars_opt[1:]) elif extra_vars_opt and extra_vars_opt[0] in u'[{': # Arguments as YAML data = loader.load(extra_vars_opt) else: # Arguments as Key-value data = parse_kv(extra_vars_opt) extra_vars = combine_vars(extra_vars, data) # FIXME: this should be moved inside the playbook executor code only_tags = self.options.tags.split(",") skip_tags = self.options.skip_tags if self.options.skip_tags is not None: skip_tags = self.options.skip_tags.split(",") # initial error check, to make sure all specified playbooks are accessible # before we start running anything through the playbook executor for playbook in self.args: if not os.path.exists(playbook): raise AnsibleError("the playbook: %s could not be found" % playbook) if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): raise AnsibleError( "the playbook: %s does not appear to be a file" % playbook) # create the variable manager, which will be shared throughout # the code, ensuring a consistent view of global variables variable_manager = VariableManager() variable_manager.extra_vars = extra_vars # create the inventory, and filter it based on the subset specified (if any) inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory) variable_manager.set_inventory(inventory) # (which is not returned in list_hosts()) is taken into account for # warning if inventory is empty. But it can't be taken into account for # checking if limit doesn't match any hosts. Instead we don't worry about # limit if only implicit localhost was in inventory to start with. # # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts()) no_hosts = False if len(inventory.list_hosts()) == 0: # Empty inventory self.display.warning( "provided hosts list is empty, only localhost is available") no_hosts = True inventory.subset(self.options.subset) if len(inventory.list_hosts()) == 0 and no_hosts is False: # Invalid limit raise AnsibleError("Specified --limit does not match any hosts") # create the playbook executor, which manages running the plays via a task queue manager pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=self.display, options=self.options, passwords=passwords) results = pbex.run() if isinstance(results, list): for p in results: self.display.display('\nplaybook: %s\n' % p['playbook']) for play in p['plays']: if self.options.listhosts: self.display.display("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts']))) for host in play['hosts']: self.display.display(" %s" % host) if self.options.listtasks: #TODO: do we want to display block info? self.display.display("\n %s" % (play['name'])) for task in play['tasks']: self.display.display(" %s" % task) if self.options.listtags: #TODO: fix once we figure out block handling above self.display.display("\n %s: tags count=%d" % (play['name'], len(play['tags']))) for tag in play['tags']: self.display.display(" %s" % tag) return 0 else: return results
def default(self, arg, forceshell=False): """ actually runs modules """ if arg.startswith("#"): return False if not self.options.cwd: display.error("No host found") return False if arg.split()[0] in self.modules: module = arg.split()[0] module_args = ' '.join(arg.split()[1:]) else: module = 'shell' module_args = arg if forceshell is True: module = 'shell' module_args = arg self.options.module_name = module result = None try: check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw') play_ds = dict( name="Ansible Shell", hosts=self.options.cwd, gather_facts='no', tasks=[ dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw))) ]) play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader) except Exception as e: display.error(u"Unable to build command: %s" % to_unicode(e)) return False try: cb = 'minimal' #FIXME: make callbacks configurable # now create a task queue manager to execute the play self._tqm = None try: self._tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=False, ) result = self._tqm.run(play) finally: if self._tqm: self._tqm.cleanup() if result is None: display.error("No hosts found") return False except KeyboardInterrupt: display.error('User interrupted execution') return False except Exception as e: display.error(to_unicode(e)) #FIXME: add traceback in very very verbose mode return False
def run(self, tmp=None, task_vars=None): ''' handler for file transfer operations ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) source = self._task.args.get('src', None) content = self._task.args.get('content', None) dest = self._task.args.get('dest', None) raw = boolean(self._task.args.get('raw', 'no')) force = boolean(self._task.args.get('force', 'yes')) faf = self._task.first_available_file remote_src = boolean(self._task.args.get('remote_src', False)) follow = boolean(self._task.args.get('follow', False)) if (source is None and content is None and faf is None) or dest is None: result['failed'] = True result['msg'] = "src (or content) and dest are required" return result elif (source is not None or faf is not None) and content is not None: result['failed'] = True result['msg'] = "src and content are mutually exclusive" return result elif content is not None and dest is not None and dest.endswith("/"): result['failed'] = True result['msg'] = "dest must be a file if content is defined" return result # Check if the source ends with a "/" source_trailing_slash = False if source: source_trailing_slash = self._connection._shell.path_has_trailing_slash( source) # Define content_tempfile in case we set it after finding content populated. content_tempfile = None # If content is defined make a temp file and write the content into it. if content is not None: try: # If content comes to us as a dict it should be decoded json. # We need to encode it back into a string to write it out. if isinstance(content, dict) or isinstance(content, list): content_tempfile = self._create_content_tempfile( json.dumps(content)) else: content_tempfile = self._create_content_tempfile(content) source = content_tempfile except Exception as err: result['failed'] = True result[ 'msg'] = "could not write content temp file: %s" % to_str( err) return result # if we have first_available_file in our vars # look up the files and use the first one we find as src elif faf: source = self._get_first_available_file( faf, task_vars.get('_original_file', None)) elif remote_src: result.update( self._execute_module(module_name='copy', module_args=self._task.args, task_vars=task_vars, delete_remote_tmp=False)) return result else: # find in expected paths try: source = self._find_needle('files', source) except AnsibleError as e: result['failed'] = True result['msg'] = to_unicode(e) return result # A list of source file tuples (full_path, relative_path) which will try to copy to the destination source_files = [] # If source is a directory populate our list else source is a file and translate it to a tuple. if os.path.isdir(to_bytes(source, errors='strict')): # Get the amount of spaces to remove to get the relative path. if source_trailing_slash: sz = len(source) else: sz = len(source.rsplit('/', 1)[0]) + 1 # Walk the directory and append the file tuples to source_files. for base_path, sub_folders, files in os.walk(to_bytes(source)): for file in files: full_path = os.path.join(base_path, file) rel_path = full_path[sz:] if rel_path.startswith('/'): rel_path = rel_path[1:] source_files.append((full_path, rel_path)) # If it's recursive copy, destination is always a dir, # explicitly mark it so (note - copy module relies on this). if not self._connection._shell.path_has_trailing_slash(dest): dest = self._connection._shell.join_path(dest, '') else: source_files.append((source, os.path.basename(source))) changed = False module_return = dict(changed=False) # A register for if we executed a module. # Used to cut down on command calls when not recursive. module_executed = False # Tell _execute_module to delete the file if there is one file. delete_remote_tmp = (len(source_files) == 1) # If this is a recursive action create a tmp path that we can share as the _exec_module create is too late. remote_user = task_vars.get( 'ansible_ssh_user') or self._play_context.remote_user if not delete_remote_tmp: if tmp is None or "-tmp-" not in tmp: tmp = self._make_tmp_path(remote_user) self._cleanup_remote_tmp = True # expand any user home dir specifier dest = self._remote_expand_user(dest) diffs = [] for source_full, source_rel in source_files: source_full = self._loader.get_real_file(source_full) # Generate a hash of the local file. local_checksum = checksum(source_full) # If local_checksum is not defined we can't find the file so we should fail out. if local_checksum is None: result['failed'] = True result['msg'] = "could not find src=%s" % source_full self._remove_tmp_path(tmp) return result # This is kind of optimization - if user told us destination is # dir, do path manipulation right away, otherwise we still check # for dest being a dir via remote call below. if self._connection._shell.path_has_trailing_slash(dest): dest_file = self._connection._shell.join_path(dest, source_rel) else: dest_file = self._connection._shell.join_path(dest) # Attempt to get remote file info dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp) if dest_status['exists'] and dest_status['isdir']: # The dest is a directory. if content is not None: # If source was defined as content remove the temporary file and fail out. self._remove_tempfile_if_content_defined( content, content_tempfile) self._remove_tmp_path(tmp) result['failed'] = True result['msg'] = "can not use content with a dir as dest" return result else: # Append the relative source location to the destination and get remote stats again dest_file = self._connection._shell.join_path( dest, source_rel) dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp) if dest_status['exists'] and not force: # remote_file does not exist so continue to next iteration. continue if local_checksum != dest_status['checksum']: # The checksums don't match and we will change or error out. changed = True # Create a tmp path if missing only if this is not recursive. # If this is recursive we already have a tmp path. if delete_remote_tmp: if tmp is None or "-tmp-" not in tmp: tmp = self._make_tmp_path(remote_user) self._cleanup_remote_tmp = True if self._play_context.diff and not raw: diffs.append( self._get_diff_data(dest_file, source_full, task_vars)) if self._play_context.check_mode: self._remove_tempfile_if_content_defined( content, content_tempfile) changed = True module_return = dict(changed=True) continue # Define a remote directory that we will copy the file to. tmp_src = self._connection._shell.join_path(tmp, 'source') remote_path = None if not raw: remote_path = self._transfer_file(source_full, tmp_src) else: self._transfer_file(source_full, dest_file) # We have copied the file remotely and no longer require our content_tempfile self._remove_tempfile_if_content_defined( content, content_tempfile) self._loader.cleanup_tmp_file(source_full) # fix file permissions when the copy is done as a different user if remote_path: self._fixup_perms((tmp, remote_path), remote_user) if raw: # Continue to next iteration if raw is defined. continue # Run the copy module # src and dest here come after original and override them # we pass dest only to make sure it includes trailing slash in case of recursive copy new_module_args = self._task.args.copy() new_module_args.update( dict( src=tmp_src, dest=dest, original_basename=source_rel, )) if 'content' in new_module_args: del new_module_args['content'] module_return = self._execute_module( module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=delete_remote_tmp) module_executed = True else: # no need to transfer the file, already correct hash, but still need to call # the file module in case we want to change attributes self._remove_tempfile_if_content_defined( content, content_tempfile) self._loader.cleanup_tmp_file(source_full) if raw: # Continue to next iteration if raw is defined. self._remove_tmp_path(tmp) continue # Build temporary module_args. new_module_args = self._task.args.copy() new_module_args.update( dict(src=source_rel, dest=dest, original_basename=source_rel)) # Execute the file module. module_return = self._execute_module( module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=delete_remote_tmp) module_executed = True if not module_return.get('checksum'): module_return['checksum'] = local_checksum if module_return.get('failed'): result.update(module_return) if not delete_remote_tmp: self._remove_tmp_path(tmp) return result if module_return.get('changed'): changed = True # the file module returns the file path as 'path', but # the copy module uses 'dest', so add it if it's not there if 'path' in module_return and 'dest' not in module_return: module_return['dest'] = module_return['path'] # Delete tmp path if we were recursive or if we did not execute a module. if not delete_remote_tmp or (delete_remote_tmp and not module_executed): self._remove_tmp_path(tmp) if module_executed and len(source_files) == 1: result.update(module_return) else: result.update(dict(dest=dest, src=source, changed=changed)) if diffs: result['diff'] = diffs return result
def get_original_host(host_name): host_name = to_unicode(host_name) if host_name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host_name] else: return self._inventory.get_host(host_name)
def _load_included_file(self, included_file, iterator, is_handler=False): ''' Loads an included YAML file of tasks, applying the optional set of variables. ''' display.debug("loading included file: %s" % included_file._filename) try: data = self._loader.load_from_file(included_file._filename) if data is None: return [] elif not isinstance(data, list): raise AnsibleError("included task files must contain a list of tasks") ti_copy = included_file._task.copy() temp_vars = ti_copy.vars.copy() temp_vars.update(included_file._args) # pop tags out of the include args, if they were specified there, and assign # them to the include. If the include already had tags specified, we raise an # error so that users know not to specify them both ways tags = included_file._task.vars.pop('tags', []) if isinstance(tags, string_types): tags = tags.split(',') if len(tags) > 0: if len(included_file._task.tags) > 0: raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement", obj=included_file._task._ds) display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option") included_file._task.tags = tags ti_copy.vars = temp_vars block_list = load_list_of_blocks( data, play=iterator._play, parent_block=None, task_include=ti_copy, role=included_file._task._role, use_handlers=is_handler, loader=self._loader, variable_manager=self._variable_manager, ) # since we skip incrementing the stats when the task result is # first processed, we do so now for each host in the list for host in included_file._hosts: self._tqm._stats.increment('ok', host.name) except AnsibleError as e: # mark all of the hosts including this file as failed, send callbacks, # and increment the stats for this host for host in included_file._hosts: tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_unicode(e))) iterator.mark_host_failed(host) self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) self._tqm.send_callback('v2_runner_on_failed', tr) return [] # finally, send the callback and return the list of blocks loaded self._tqm.send_callback('v2_playbook_on_include', included_file) display.debug("done processing included file") return block_list
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='replace'): ''' This is the function which executes the low level shell command, which may be commands to create/remove directories for temporary files, or to run the module code or python directly when pipelining. :kwarg encoding_errors: If the value returned by the command isn't utf-8 then we have to figure out how to transform it to unicode. If the value is just going to be displayed to the user (or discarded) then the default of 'replace' is fine. If the data is used as a key or is going to be written back out to a file verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' if executable is not None: cmd = executable + ' -c ' + cmd self._display.debug("in _low_level_execute_command() (%s)" % (cmd, )) if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) self._display.debug( "no command, exiting _low_level_execute_command()") return dict(stdout='', stderr='') allow_same_user = C.BECOME_ALLOW_SAME_USER same_user = self._play_context.become_user == self._play_context.remote_user if sudoable and self._play_context.become and (allow_same_user or not same_user): self._display.debug("using become for this command") cmd = self._play_context.make_become_cmd(cmd, executable=executable) self._display.debug("executing the command %s through the connection" % cmd) rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) self._display.debug("command execution done") # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type if isinstance(stdout, binary_type): out = to_unicode(stdout, errors=encoding_errors) elif not isinstance(stdout, text_type): out = to_unicode(b''.join(stdout.readlines()), errors=encoding_errors) else: out = stdout if isinstance(stderr, binary_type): err = to_unicode(stderr, errors=encoding_errors) elif not isinstance(stderr, text_type): err = to_unicode(b''.join(stderr.readlines()), errors=encoding_errors) else: err = stderr self._display.debug("done with _low_level_execute_command() (%s)" % (cmd, )) if rc is None: rc = 0 return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)
def set_basedir(self, basedir): ''' sets the base directory, used to find files when a relative path is given ''' if basedir is not None: self._basedir = to_unicode(basedir)
def execute_ansible_adhoc(user, sudouser, hosts, environment, hostpattern, unix_command, output): """ Execute ansible adhoc command Example: python ansible_adhoc.py --user=exxxxx --sudouser=lampp --unix_command='uptime' --environment prod --hostpattern='rn2-lampp-lapp600*.rno.apple.com' python ansible_adhoc.py --user=exxxxx --sudouser=lampp --unix_command='uptime' --hosts=rn2-lampp-lapp6016.rno.apple.com python ansible_adhoc.py --user=exxxxx --sudouser=lampp --unix_command='uptime' --hosts='rn2-lampp-lapp6016.rno.apple.com,rn2-lampp-lapp6017.rno.apple.com' """ # Input checks if not unix_command: print( "Error: You have not provided the unix command to run. Check help") sys.exit(1) if not user: print( "Error: You have not provided the user if with which to access remote server" ) sys.exit(1) use_hosts_list = True if part_of_ext_git: if hosts is None: if environment is None or hostpattern is None: print( "Error: You have to provide hosts list or an environment for inventory file and host pattern to run commands on" ) sys.exit(1) else: use_hosts_list = False inventory_file = os.path.join(project_dir, 'environs', environment, 'ansible', 'inventory.cfg') print( command_template.format(user=user, sudouser=sudouser, hosts=hostpattern, unix_command=unix_command, inventoryfile=inventory_file)) else: if hosts is None: print( "Error: Not a part of salt repo. Please provide a comma seperated hosts list" ) sys.exit(1) else: print( command_template.format(user=user, sudouser=sudouser, hosts=hosts, unix_command=unix_command, inventoryfile='')) variable_manager = VariableManager() loader = DataLoader() options = None if not sudouser: options = Options(connection='ssh', module_path='', forks=100, become=False, become_method=None, become_user='', check=False) else: options = Options(connection='ssh', module_path='', forks=100, become=True, become_method='sudo', become_user=sudouser, check=False) passwords = dict(vault_pass='******') inventory = None run_on_hosts = None if use_hosts_list: host_list = hosts.split(',') run_on_hosts = host_list inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=host_list) else: inventory_file = os.path.join(project_dir, 'environs', environment, 'ansible', 'inventory.cfg') inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=inventory_file) run_on_hosts = to_unicode(hostpattern, errors='strict') variable_manager.set_inventory(inventory) results_callback = ResultsCollector() # create play with tasks play_source = dict( name="Ansible Play", hosts=run_on_hosts, gather_facts='no', tasks=[ dict(action=dict(module='shell', args=unix_command), register='shell_out'), dict(action=dict(module='debug', args=dict(msg='{{shell_out.stdout}}'))) ]) tqm = None try: play = Play().load(play_source, variable_manager=variable_manager, loader=loader) tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback=results_callback, ) tqm._stdout_callback = results_callback result = tqm.run(play) finally: if tqm is not None: tqm.cleanup() results_raw = {'success': {}, 'failed': {}, 'unreachable': {}} for host, result in results_callback.host_ok.items(): if result: results_raw['success'][host] = result._result['msg'] for host, result in results_callback.host_failed.items(): if result: results_raw['failed'][host] = result._result['msg'] for host, result in results_callback.host_unreachable.items(): if result: results_raw['unreachable'][host] = result._result['msg'] if output: with open(output, 'w') as f: results_raw_json = json.dumps(results_raw) f.write(results_raw_json) f.close() else: print("Results from all hosts:\n") pprint(results_raw, width=1, indent=2)
def send_callback(self, method_name, *args, **kwargs): for callback_plugin in [self._stdout_callback] + self._callback_plugins: # a plugin that set self.disabled to True will not be called # see osx_say.py example for such a plugin if getattr(callback_plugin, 'disabled', False): continue # try to find v2 method, fallback to v1 method, ignore callback if no method found methods = [] for possible in [method_name, 'v2_on_any']: gotit = getattr(callback_plugin, possible, None) if gotit is None: gotit = getattr(callback_plugin, possible.replace('v2_',''), None) if gotit is not None: methods.append(gotit) for method in methods: try: # temporary hack, required due to a change in the callback API, so # we don't break backwards compatibility with callbacks which were # designed to use the original API # FIXME: target for removal and revert to the original code here after a year (2017-01-14) if method_name == 'v2_playbook_on_start': import inspect (f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method) if 'playbook' in f_args: method(*args, **kwargs) else: method() else: method(*args, **kwargs) except Exception as e: #TODO: add config toggle to make this fatal or not? display.warning(u"Failure when attempting to use callback plugin (%s): %s" % (to_unicode(callback_plugin), to_unicode(e)))
def is_encrypted(self, data): data = to_unicode(data) if data.startswith(HEADER): return True else: return False
def run(self, tmp=None, task_vars=None): ''' handler for template operations ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) faf = self._task.first_available_file force = boolean(self._task.args.get('force', True)) state = self._task.args.get('state', None) if state is not None: result['failed'] = True result['msg'] = "'state' cannot be specified on a template" return result elif (source is None and faf is not None) or dest is None: result['failed'] = True result['msg'] = "src and dest are required" return result if tmp is None: tmp = self._make_tmp_path() if faf: source = self._get_first_available_file( faf, task_vars.get('_original_file', None, 'templates')) if source is None: result['failed'] = True result[ 'msg'] = "could not find src in first_available_file list" return result else: if self._task._role is not None: source = self._loader.path_dwim_relative( self._task._role._role_path, 'templates', source) else: source = self._loader.path_dwim_relative( self._loader.get_basedir(), 'templates', source) # Expand any user home dir specification dest = self._remote_expand_user(dest) directory_prepended = False if dest.endswith(os.sep): directory_prepended = True base = os.path.basename(source) dest = os.path.join(dest, base) # template the source data locally & get ready to transfer try: with open(source, 'r') as f: template_data = to_unicode(f.read()) try: template_uid = pwd.getpwuid(os.stat(source).st_uid).pw_name except: template_uid = os.stat(source).st_uid temp_vars = task_vars.copy() temp_vars['template_host'] = os.uname()[1] temp_vars['template_path'] = source temp_vars['template_mtime'] = datetime.datetime.fromtimestamp( os.path.getmtime(source)) temp_vars['template_uid'] = template_uid temp_vars['template_fullpath'] = os.path.abspath(source) temp_vars['template_run_date'] = datetime.datetime.now() managed_default = C.DEFAULT_MANAGED_STR managed_str = managed_default.format( host=temp_vars['template_host'], uid=temp_vars['template_uid'], file=to_bytes(temp_vars['template_path'])) temp_vars['ansible_managed'] = time.strftime( managed_str, time.localtime(os.path.getmtime(source))) # Create a new searchpath list to assign to the templar environment's file # loader, so that it knows about the other paths to find template files searchpath = [self._loader._basedir, os.path.dirname(source)] if self._task._role is not None: searchpath.insert(1, C.DEFAULT_ROLES_PATH) searchpath.insert(1, self._task._role._role_path) self._templar.environment.loader.searchpath = searchpath old_vars = self._templar._available_variables self._templar.set_available_variables(temp_vars) resultant = self._templar.template(template_data, preserve_trailing_newlines=True, escape_backslashes=False, convert_data=False) self._templar.set_available_variables(old_vars) except Exception as e: result['failed'] = True result['msg'] = type(e).__name__ + ": " + str(e) return result local_checksum = checksum_s(resultant) remote_checksum = self.get_checksum(dest, task_vars, not directory_prepended, source=source) if isinstance(remote_checksum, dict): # Error from remote_checksum is a dict. Valid return is a str result.update(remote_checksum) return result diff = {} new_module_args = self._task.args.copy() if (remote_checksum == '1') or (force and local_checksum != remote_checksum): result['changed'] = True # if showing diffs, we need to get the remote value if self._play_context.diff: diff = self._get_diff_data(dest, resultant, task_vars, source_file=False) if not self._play_context.check_mode: # do actual work thorugh copy xfered = self._transfer_data( self._connection._shell.join_path(tmp, 'source'), resultant) # fix file permissions when the copy is done as a different user if self._play_context.become and self._play_context.become_user != 'root': self._remote_chmod('a+r', xfered) # run the copy module new_module_args.update( dict( src=xfered, dest=dest, original_basename=os.path.basename(source), follow=True, ), ) result.update( self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars)) if result.get('changed', False) and self._play_context.diff: result['diff'] = diff return result else: # when running the file module based on the template data, we do # not want the source filename (the name of the template) to be used, # since this would mess up links, so we clear the src param and tell # the module to follow links. When doing that, we have to set # original_basename to the template just in case the dest is # a directory. new_module_args.update( dict( src=None, original_basename=os.path.basename(source), follow=True, ), ) result.update( self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars)) return result
def post_validate(self, templar): ''' we can't tell that everything is of the right type until we have all the variables. Run basic types (from isa) as well as any _post_validate_<foo> functions. ''' # save the omit value for later checking omit_value = templar._available_variables.get('omit') for (name, attribute) in iteritems(self._get_base_attributes()): if getattr(self, name) is None: if not attribute.required: continue else: raise AnsibleParserError("the field '%s' is required but was not set" % name) elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'): # Intermediate objects like Play() won't have their fields validated by # default, as their values are often inherited by other objects and validated # later, so we don't want them to fail out early continue try: # Run the post-validator if present. These methods are responsible for # using the given templar to template the values, if required. method = getattr(self, '_post_validate_%s' % name, None) if method: value = method(attribute, getattr(self, name), templar) else: # if the attribute contains a variable, template it now value = templar.template(getattr(self, name)) # if this evaluated to the omit value, set the value back to # the default specified in the FieldAttribute and move on if omit_value is not None and value == omit_value: setattr(self, name, attribute.default) continue # and make sure the attribute is of the type it should be if value is not None: if attribute.isa == 'string': value = to_unicode(value) elif attribute.isa == 'int': value = int(value) elif attribute.isa == 'float': value = float(value) elif attribute.isa == 'bool': value = boolean(value) elif attribute.isa == 'percent': # special value, which may be an integer or float # with an optional '%' at the end if isinstance(value, string_types) and '%' in value: value = value.replace('%', '') value = float(value) elif attribute.isa == 'list': if value is None: value = [] elif not isinstance(value, list): value = [ value ] if attribute.listof is not None: for item in value: if not isinstance(item, attribute.listof): raise AnsibleParserError("the field '%s' should be a list of %s," " but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds()) elif attribute.required and attribute.listof == string_types: if item is None or item.strip() == "": raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds()) elif attribute.isa == 'set': if value is None: value = set() else: if not isinstance(value, (list, set)): value = [ value ] if not isinstance(value, set): value = set(value) elif attribute.isa == 'dict': if value is None: value = dict() elif not isinstance(value, dict): raise TypeError("%s is not a dictionary" % value) # and assign the massaged value back to the attribute field setattr(self, name, value) except (TypeError, ValueError) as e: raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s." " Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds()) except UndefinedError as e: if templar._fail_on_undefined_errors and name != 'name': raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined." " The error was: %s" % (name,e), obj=self.get_ds())
def run(self, tmp=None, task_vars=None): """Run the method""" try: remote_user = task_vars.get( 'ansible_ssh_user') or self._play_context.remote_user if not tmp: tmp = self._make_tmp_path(remote_user) except TypeError: if not tmp: tmp = self._make_tmp_path() _status, _vars = self._load_options_and_status(task_vars=task_vars) if not _status: return _vars temp_vars = task_vars.copy() template_host = temp_vars['template_host'] = os.uname()[1] source = temp_vars['template_path'] = _vars['source'] temp_vars['template_mtime'] = datetime.datetime.fromtimestamp( os.path.getmtime(source)) try: template_uid = temp_vars['template_uid'] = pwd.getpwuid( os.stat(source).st_uid).pw_name except Exception: template_uid = temp_vars['template_uid'] = os.stat(source).st_uid managed_default = C.DEFAULT_MANAGED_STR managed_str = managed_default.format(host=template_host, uid=template_uid, file=to_bytes(source)) temp_vars['ansible_managed'] = time.strftime( managed_str, time.localtime(os.path.getmtime(source))) temp_vars['template_fullpath'] = os.path.abspath(source) temp_vars['template_run_date'] = datetime.datetime.now() with open(source, 'r') as f: template_data = to_unicode(f.read()) self._templar.environment.loader.searchpath = _vars['searchpath'] self._templar.set_available_variables(temp_vars) resultant = self._templar.template(template_data, preserve_trailing_newlines=True, escape_backslashes=False, convert_data=False) # Access to protected method is unavoidable in Ansible self._templar.set_available_variables( self._templar._available_variables) if _vars['config_overrides']: type_merger = getattr(self, CONFIG_TYPES.get(_vars['config_type'])) resultant = type_merger(config_overrides=_vars['config_overrides'], resultant=resultant, list_extend=_vars.get('list_extend', True)) # Re-template the resultant object as it may have new data within it # as provided by an override variable. resultant = self._templar.template(resultant, preserve_trailing_newlines=True, escape_backslashes=False, convert_data=False) # run the copy module new_module_args = self._task.args.copy() # Access to protected method is unavoidable in Ansible transferred_data = self._transfer_data( self._connection._shell.join_path(tmp, 'source'), resultant) new_module_args.update( dict( src=transferred_data, dest=_vars['dest'], original_basename=os.path.basename(source), follow=True, ), ) # Remove data types that are not available to the copy module new_module_args.pop('config_overrides', None) new_module_args.pop('config_type', None) new_module_args.pop('list_extend', None) # Run the copy module return self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars)
def to_yaml(a, *args, **kw): '''Make verbose, human readable yaml''' transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, **kw) return to_unicode(transformed)
def _get_diff(self, difflist): if not isinstance(difflist, list): difflist = [difflist] ret = [] for diff in difflist: try: with warnings.catch_warnings(): warnings.simplefilter('ignore') if 'dst_binary' in diff: ret.append( "diff skipped: destination file appears to be binary\n" ) if 'src_binary' in diff: ret.append( "diff skipped: source file appears to be binary\n") if 'dst_larger' in diff: ret.append( "diff skipped: destination file size is greater than %d\n" % diff['dst_larger']) if 'src_larger' in diff: ret.append( "diff skipped: source file size is greater than %d\n" % diff['src_larger']) if 'before' in diff and 'after' in diff: # format complex structures into 'files' for x in ['before', 'after']: if isinstance(diff[x], dict): diff[x] = json.dumps(diff[x], sort_keys=True, indent=4) if 'before_header' in diff: before_header = "before: %s" % diff['before_header'] else: before_header = 'before' if 'after_header' in diff: after_header = "after: %s" % diff['after_header'] else: after_header = 'after' differ = difflib.unified_diff( to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), fromfile=before_header, tofile=after_header, fromfiledate='', tofiledate='', n=C.DIFF_CONTEXT) has_diff = False for line in differ: has_diff = True if line.startswith('+'): line = stringc(line, C.COLOR_DIFF_ADD) elif line.startswith('-'): line = stringc(line, C.COLOR_DIFF_REMOVE) elif line.startswith('@@'): line = stringc(line, C.COLOR_DIFF_LINES) ret.append(line) if has_diff: ret.append('\n') if 'prepared' in diff: ret.append(to_unicode(diff['prepared'])) except UnicodeDecodeError: ret.append( ">> the files are different, but the diff library cannot compare unicode strings\n\n" ) return u''.join(ret)
def run(self): ''' The main executor entrypoint, where we determine if the specified task requires looping and either runs the task with self._run_loop() or self._execute(). After that, the returned results are parsed and returned as a dict. ''' display.debug("in run()") try: # lookup plugins need to know if this task is executing from # a role, so that it can properly find files/templates/etc. roledir = None if self._task._role: roledir = self._task._role._role_path self._job_vars['roledir'] = roledir items = self._get_loop_items() if items is not None: if len(items) > 0: item_results = self._run_loop(items) # loop through the item results, and remember the changed/failed # result flags based on any item there. changed = False failed = False for item in item_results: if 'changed' in item and item['changed']: changed = True if 'failed' in item and item['failed']: failed = True # create the overall result item, and set the changed/failed # flags there to reflect the overall result of the loop res = dict(results=item_results) if changed: res['changed'] = True if failed: res['failed'] = True res['msg'] = 'One or more items failed' else: res['msg'] = 'All items completed' else: res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) else: display.debug("calling self._execute()") res = self._execute() display.debug("_execute() done") # make sure changed is set in the result, if it's not present if 'changed' not in res: res['changed'] = False def _clean_res(res): if isinstance(res, dict): for k in res.keys(): res[k] = _clean_res(res[k]) elif isinstance(res, list): for idx, item in enumerate(res): res[idx] = _clean_res(item) elif isinstance(res, UnsafeProxy): return res._obj elif isinstance(res, binary_type): return to_unicode(res, errors='strict') return res display.debug("dumping result to json") res = _clean_res(res) display.debug("done dumping result, returning") return res except AnsibleError as e: return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr')) except Exception as e: return dict(failed=True, msg='Unexpected failure during module execution.', exception=to_unicode(traceback.format_exc()), stdout='') finally: try: self._connection.close() except AttributeError: pass except Exception as e: display.debug(u"error closing connection: %s" % to_unicode(e))
def parse_inventory(self, host_list): if isinstance(host_list, string_types): if "," in host_list: host_list = host_list.split(",") host_list = [h for h in host_list if h and h.strip()] self.parser = None # Always create the 'all' and 'ungrouped' groups, even if host_list is # empty: in this case we will subsequently an the implicit 'localhost' to it. ungrouped = Group('ungrouped') all = Group('all') all.add_child_group(ungrouped) self.groups = dict(all=all, ungrouped=ungrouped) if host_list is None: pass elif isinstance(host_list, list): for h in host_list: try: (host, port) = parse_address(h, allow_ranges=False) except AnsibleError as e: display.vvv( "Unable to parse address from hostname, leaving unchanged: %s" % to_unicode(e)) host = h port = None new_host = Host(host, port) all.add_host(new_host) if new_host.name in C.LOCALHOST: if self.localhost is None: self.localhost = new_host else: display.warning( "A duplicate localhost-like entry was found (%s). First found localhost was %s" % (new_host.name, self.localhost.name)) elif self._loader.path_exists(host_list): #TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins' if self.is_directory(host_list): # Ensure basedir is inside the directory host_list = os.path.join(self.host_list, "") self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list) else: self.parser = get_file_parser(host_list, self.groups, self._loader) vars_loader.add_directory(self._basedir, with_subdir=True) if not self.parser: # should never happen, but JIC raise AnsibleError( "Unable to parse %s as an inventory source" % host_list) else: display.warning("Host file not found: %s" % to_unicode(host_list)) self._vars_plugins = [x for x in vars_loader.all(self)] # set group vars from group_vars/ files and vars plugins for g in self.groups: group = self.groups[g] group.vars = combine_vars(group.vars, self.get_group_variables(group.name)) self.get_group_vars(group) # set host vars from host_vars/ files and vars plugins for host in self.get_hosts(): host.vars = combine_vars(host.vars, self.get_host_variables(host.name)) self.get_host_vars(host)
def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = True work_to_do = True while work_to_do and not self._tqm._terminated: try: display.debug("getting the remaining hosts for this loop") hosts_left = [ host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] display.debug("done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) # skip control skip_rest = False choose_step = True # flag set if task is set to any_errors_fatal any_errors_fatal = False results = [] for (host, task) in host_tasks: if not task: continue if self._tqm._terminated: break run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task) continue if task.action == 'meta': self._execute_meta(task, play_context, iterator) else: # handle step if needed, skip meta actions as they are used internally if self._step and choose_step: if self._take_step(task): choose_step = False else: skip_rest = True break display.debug("getting variables") task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if (task.any_errors_fatal or run_once) and not task.ignore_errors: any_errors_fatal = True if not callback_sent: display.debug( "sending task start callback, copying the task so we can template it temporarily" ) saved_name = task.name display.debug( "done copying, going to template now") try: task.name = text_type( templar.template(task.name, fail_on_undefined=False)) display.debug("done templating") except: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug( "templating failed for some reason") pass display.debug("here goes the callback...") self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) task.name = saved_name callback_sent = True display.debug("sending task start callback") self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) # if we're bypassing the host loop, break out now if run_once: break results += self._process_pending_results(iterator, one_pass=True) # go to next host/task group if skip_rest: continue display.debug( "done queuing things up, now waiting for results queue to drain" ) results += self._wait_on_pending_results(iterator) host_results.extend(results) if not work_to_do and len(iterator.get_failed_hosts()) > 0: display.debug("out of hosts to run on") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result = False break try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, inventory=self._inventory, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError as e: return False include_failure = False if len(included_files) > 0: display.debug("we have included files to process") noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) display.debug("generating all_blocks data") all_blocks = dict((host, []) for host in hosts_left) display.debug("done generating all_blocks data") for included_file in included_files: display.debug("processing included file: %s" % included_file._filename) # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file( included_file, iterator=iterator) display.debug( "iterating over new_blocks loaded from include file" ) for new_block in new_blocks: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, task=included_file._task, ) display.debug("filtering new block on tags") final_block = new_block.filter_tagged_tasks( play_context, task_vars) display.debug( "done filtering new block on tags") noop_block = Block(parent_block=task._block) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) display.debug( "done iterating over new_blocks loaded from include file" ) except AnsibleError as e: for host in included_file._hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.error(to_unicode(e), wrap_text=False) include_failure = True continue # finally go through all of the hosts and append the # accumulated blocks to their list of tasks display.debug( "extending task lists for all hosts with included blocks" ) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done extending task lists") display.debug("done processing included files") display.debug("results queue empty") display.debug("checking for any_errors_fatal") failed_hosts = [] for res in results: if res.is_failed(): failed_hosts.append(res._host.name) # if any_errors_fatal and we had an error, mark all hosts as failed if any_errors_fatal and len(failed_hosts) > 0: for host in hosts_left: # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states if host.name not in failed_hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.debug("done checking for any_errors_fatal") display.debug("checking for max_fail_percentage") if iterator._play.max_fail_percentage is not None and len( results) > 0: percentage = iterator._play.max_fail_percentage / 100.0 if (len(self._tqm._failed_hosts) / len(results)) > percentage: for host in hosts_left: # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states if host.name not in failed_hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.debug("done checking for max_fail_percentage") except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return False # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)