def run(self): """Run the ansible command Subclasses must implement this method. It does the actual work of running an Ansible command. """ display.vv(to_text(self.parser.get_version())) if C.CONFIG_FILE: display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE)) else: display.v(u"No config file found; using defaults") # warn about deprecated config options for deprecated in C.config.DEPRECATED: name = deprecated[0] why = deprecated[1]['why'] if 'alternatives' in deprecated[1]: alt = ', use %s instead' % deprecated[1]['alternatives'] else: alt = '' ver = deprecated[1]['version'] display.deprecated("%s option, %s %s" % (name, why, alt), version=ver) # warn about typing issues with configuration entries for unable in C.config.UNABLE: display.warning("Unable to set correct type for configuration entry: %s" % unable)
def on_become(self, passwd=None): if self._get_prompt().endswith(b'enable#'): return out = self._exec_cli_command('show privilege') out = to_text(out, errors='surrogate_then_replace').strip() if 'Disabled' in out: raise AnsibleConnectionFailure('Feature privilege is not enabled') # if already at privilege level 15 return if '15' in out: return cmd = {u'command': u'enable'} if passwd: cmd[u'prompt'] = to_text(r"(?i)[\r\n]?Password: $", errors='surrogate_or_strict') cmd[u'answer'] = passwd cmd[u'prompt_retry_check'] = True try: self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict')) prompt = self._get_prompt() if prompt is None or not prompt.strip().endswith(b'enable#'): raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt) except AnsibleConnectionFailure as e: prompt = self._get_prompt() raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message))
def run_command(module, commands): conn = get_connection(module) responses = list() for cmd in to_list(commands): try: if isinstance(cmd, str): cmd = json.loads(cmd) command = cmd.get('command', None) prompt = cmd.get('prompt', None) answer = cmd.get('answer', None) sendonly = cmd.get('sendonly', False) newline = cmd.get('newline', True) except: command = cmd prompt = None answer = None sendonly = False newline = True out = conn.get(command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline) try: responses.append(to_text(out, errors='surrogate_or_strict')) except UnicodeError: module.fail_json(msg=u'failed to decode output from {0}:{1}'.format(cmd, to_text(out))) return responses
def do_encrypt(result, encrypt, salt_size=None, salt=None): if PASSLIB_AVAILABLE: try: crypt = getattr(passlib.hash, encrypt) except: raise AnsibleError("passlib does not support '%s' algorithm" % encrypt) if salt_size: result = crypt.encrypt(result, salt_size=salt_size) elif salt: if crypt._salt_is_bytes: salt = to_bytes(salt, encoding="ascii", errors="strict") else: salt = to_text(salt, encoding="ascii", errors="strict") result = crypt.encrypt(result, salt=salt) else: result = crypt.encrypt(result) else: raise AnsibleError("passlib must be installed to encrypt vars_prompt values") # Hashes from passlib.hash should be represented as ascii strings of hex # digits so this should not traceback. If it's not representable as such # we need to traceback and then blacklist such algorithms because it may # impact calling code. return to_text(result, errors="strict")
def parse_rpc_error(self, rpc_error): if self.check_rc: try: error_root = fromstring(rpc_error) root = Element('root') root.append(error_root) error_list = root.findall('.//nc:rpc-error', NS_MAP) if not error_list: raise ConnectionError(to_text(rpc_error, errors='surrogate_then_replace')) warnings = [] for error in error_list: message_ele = error.find('./nc:error-message', NS_MAP) if message_ele is None: message_ele = error.find('./nc:error-info', NS_MAP) message = message_ele.text if message_ele is not None else None severity = error.find('./nc:error-severity', NS_MAP).text if severity == 'warning' and self.ignore_warning and message is not None: warnings.append(message) else: raise ConnectionError(to_text(rpc_error, errors='surrogate_then_replace')) return warnings except XMLSyntaxError: raise ConnectionError(rpc_error)
def run(self, terms, variables=None, **kwargs): validate_certs = kwargs.get('validate_certs', True) split_lines = kwargs.get('split_lines', True) use_proxy = kwargs.get('use_proxy', True) ret = [] for term in terms: display.vvvv("url lookup connecting to %s" % term) try: response = open_url(term, validate_certs=validate_certs, use_proxy=use_proxy) except HTTPError as e: raise AnsibleError("Received HTTP error for %s : %s" % (term, str(e))) except URLError as e: raise AnsibleError("Failed lookup url for %s : %s" % (term, str(e))) except SSLValidationError as e: raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, str(e))) except ConnectionError as e: raise AnsibleError("Error connecting to %s: %s" % (term, str(e))) if split_lines: for line in response.read().splitlines(): ret.append(to_text(line)) else: ret.append(to_text(response.read())) return ret
def _start_connection(self): ''' Starts the persistent connection ''' master, slave = pty.openpty() python = sys.executable def find_file_in_path(filename): # Check $PATH first, followed by same directory as sys.argv[0] paths = os.environ['PATH'].split(os.pathsep) + [os.path.dirname(sys.argv[0])] for dirname in paths: fullpath = os.path.join(dirname, filename) if os.path.isfile(fullpath): return fullpath raise AnsibleError("Unable to find location of '%s'" % filename) p = subprocess.Popen( [python, find_file_in_path('ansible-connection'), to_text(os.getppid())], stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdin = os.fdopen(master, 'wb', 0) os.close(slave) # Need to force a protocol that is compatible with both py2 and py3. # That would be protocol=2 or less. # Also need to force a protocol that excludes certain control chars as # stdin in this case is a pty and control chars will cause problems. # that means only protocol=0 will work. src = cPickle.dumps(self._play_context.serialize(), protocol=0) stdin.write(src) stdin.write(b'\n#END_INIT#\n') stdin.flush() (stdout, stderr) = p.communicate() stdin.close() if p.returncode == 0: result = json.loads(to_text(stdout, errors='surrogate_then_replace')) else: try: result = json.loads(to_text(stderr, errors='surrogate_then_replace')) except getattr(json.decoder, 'JSONDecodeError', ValueError): # JSONDecodeError only available on Python 3.5+ result = {'error': to_text(stderr, errors='surrogate_then_replace')} if 'messages' in result: for msg in result.get('messages'): display.vvvv('%s' % msg, host=self._play_context.remote_addr) if 'error' in result: if self._play_context.verbosity > 2: if result.get('exception'): msg = "The full traceback is:\n" + result['exception'] display.display(msg, color=C.COLOR_ERROR) raise AnsibleError(result['error']) return result['socket_path']
def get_device_info(self): device_info = {} device_info['network_os'] = 'slxos' reply = self.get(b'show version') data = to_text(reply, errors='surrogate_or_strict').strip() match = re.search(r'SLX\-OS Operating System Version: (\S+)', data) if match: device_info['network_os_version'] = match.group(1) reply = self.get(b'show chassis') data = to_text(reply, errors='surrogate_or_strict').strip() match = re.search(r'^Chassis Name:(\s+)(\S+)', data, re.M) if match: device_info['network_os_model'] = match.group(2) reply = self.get(b'show running-config | inc "switch-attributes host-name') data = to_text(reply, errors='surrogate_or_strict').strip() match = re.search(r'switch-attributes host-name (\S+)', data, re.M) if match: device_info['network_os_hostname'] = match.group(1) return device_info
def v2_runner_on_ok(self, result, **kwargs): """Run when a task finishes correctly.""" failed = 'failed' in result._result unreachable = 'unreachable' in result._result if 'print_action' in result._task.tags or failed or unreachable or \ self._display.verbosity > 1: self._print_task() self.last_skipped = False msg = to_text(result._result.get('msg', '')) or\ to_text(result._result.get('reason', '')) self._print_host_or_item(result._host, result._result.get('changed', False), msg, result._result.get('diff', None), is_host=True, error=failed or unreachable, stdout=result._result.get('module_stdout', None), stderr=result._result.get('exception', None), ) if 'results' in result._result: for r in result._result['results']: failed = 'failed' in r self._print_host_or_item(r['item'], r.get('changed', False), to_text(r.get('msg', '')), r.get('diff', None), is_host=False, error=failed, stdout=r.get('module_stdout', None), stderr=r.get('exception', None), ) else: self.last_skipped = True print('.', end="")
def __init__(self, loader, groups, filename=C.DEFAULT_HOST_LIST): self.filename = filename # Start with an empty host list and whatever groups we're passed in # (which should include the default 'all' and 'ungrouped' groups). self.hosts = {} self.patterns = {} self.groups = groups # Read in the hosts, groups, and variables defined in the # inventory file. with open(filename, 'rb') as fh: data = fh.read() try: # Faster to do to_text once on a long string than many # times on smaller strings data = to_text(data, errors='surrogate_or_strict') data = [line for line in data.splitlines() if not (line.startswith(u';') or line.startswith(u'#'))] except UnicodeError: # Skip comment lines here to avoid potential undecodable # errors in comments: https://github.com/ansible/ansible/issues/17593 data = [to_text(line, errors='surrogate_or_strict') for line in data.splitlines() if not (line.startswith(b';') or line.startswith(b'#'))] self._parse(data)
def load_config(module, commands, commit=False, comment=None): connection = get_connection(module) out = connection.edit_config(commands) diff = None if module._diff: out = connection.get('compare') out = to_text(out, errors='surrogate_or_strict') if not out.startswith('No changes'): out = connection.get('show') diff = to_text(out, errors='surrogate_or_strict').strip() if commit: try: out = connection.commit(comment) except ConnectionError: connection.discard_changes() module.fail_json(msg='commit failed: %s' % out) else: connection.get('exit') else: connection.discard_changes() if diff: return diff
def run_commands(module, commands, check_rc=True): responses = list() connection = get_connection(module) for cmd in to_list(commands): try: cmd = json.loads(cmd) command = cmd['command'] prompt = cmd['prompt'] answer = cmd['answer'] except: command = cmd prompt = None answer = None out = connection.get(command, prompt, answer) try: out = to_text(out, errors='surrogate_or_strict') except UnicodeError: module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out))) responses.append(out) return responses
def run_commands(self, commands, check_rc=True): """Run list of commands on remote device and return results """ responses = list() for item in to_list(commands): if item['output'] == 'json' and not is_json(item['command']): cmd = '%s | json' % item['command'] elif item['output'] == 'text' and is_json(item['command']): cmd = item['command'].split('|')[0] else: cmd = item['command'] rc, out, err = self.exec_command(cmd) out = to_text(out, errors='surrogate_then_replace') if check_rc and rc != 0: self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace')) try: out = self._module.from_json(out) except ValueError: out = str(out).strip() responses.append(out) return responses
def parse(self, inventory, loader, path, cache=True): super(InventoryModule, self).parse(inventory, loader, path) self._filename = path try: # Read in the hosts, groups, and variables defined in the inventory file. if self.loader: (b_data, private) = self.loader._get_file_contents(path) else: b_path = to_bytes(path, errors='surrogate_or_strict') with open(b_path, 'rb') as fh: b_data = fh.read() try: # Faster to do to_text once on a long string than many # times on smaller strings data = to_text(b_data, errors='surrogate_or_strict').splitlines() except UnicodeError: # Handle non-utf8 in comment lines: https://github.com/ansible/ansible/issues/17593 data = [] for line in b_data.splitlines(): if line and line[0] in self.b_COMMENT_MARKERS: # Replace is okay for comment lines # data.append(to_text(line, errors='surrogate_then_replace')) # Currently we only need these lines for accurate lineno in errors data.append(u'') else: # Non-comment lines still have to be valid uf-8 data.append(to_text(line, errors='surrogate_or_strict')) self._parse(path, data) except Exception as e: raise AnsibleParserError(e)
def find_connection(connection, module_params, vpn_connection_id=None): ''' Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None, or raise an error if there were multiple viable connections. ''' filters = module_params.get('filters') # vpn_connection_id may be provided via module option; takes precedence over any filter values if not vpn_connection_id and module_params.get('vpn_connection_id'): vpn_connection_id = module_params.get('vpn_connection_id') if not isinstance(vpn_connection_id, list) and vpn_connection_id: vpn_connection_id = [to_text(vpn_connection_id)] elif isinstance(vpn_connection_id, list): vpn_connection_id = [to_text(connection) for connection in vpn_connection_id] formatted_filter = [] # if vpn_connection_id is provided it will take precedence over any filters since it is a unique identifier if not vpn_connection_id: formatted_filter = create_filter(module_params, provided_filters=filters) # see if there is a unique matching connection try: if vpn_connection_id: existing_conn = connection.describe_vpn_connections(VpnConnectionIds=vpn_connection_id, Filters=formatted_filter) else: existing_conn = connection.describe_vpn_connections(Filters=formatted_filter) except (BotoCoreError, ClientError) as e: raise VPNConnectionException(msg="Failed while describing VPN connection.", exception=e) return find_connection_response(connections=existing_conn)
def modify_module(module_name, module_path, module_args, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False, become_method=None, become_user=None, become_password=None, environment=None): """ Used to insert chunks of code into modules before transfer rather than doing regular python imports. This allows for more efficient transfer in a non-bootstrapping scenario by not moving extra files over the wire and also takes care of embedding arguments in the transferred modules. This version is done in such a way that local imports can still be used in the module code, so IDEs don't have to be aware of what is going on. Example: from ansible.module_utils.basic import * ... will result in the insertion of basic.py into the module from the module_utils/ directory in the source tree. For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of properties not available here. """ task_vars = {} if task_vars is None else task_vars environment = {} if environment is None else environment with open(module_path, 'rb') as f: # read in the module source b_module_data = f.read() (b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, module_compression, async_timeout=async_timeout, become=become, become_method=become_method, become_user=become_user, become_password=become_password, environment=environment) if module_style == 'binary': return (b_module_data, module_style, to_text(shebang, nonstring='passthru')) elif shebang is None: lines = b_module_data.split(b"\n", 1) if lines[0].startswith(b"#!"): shebang = lines[0].strip() args = shlex.split(str(shebang[2:])) interpreter = args[0] interpreter = to_bytes(interpreter) new_shebang = to_bytes(_get_shebang(interpreter, task_vars, args[1:])[0], errors='surrogate_or_strict', nonstring='passthru') if new_shebang: lines[0] = shebang = new_shebang if os.path.basename(interpreter).startswith(b'python'): lines.insert(1, to_bytes(ENCODING_STRING)) else: # No shebang, assume a binary module? pass b_module_data = b"\n".join(lines) else: shebang = to_bytes(shebang, errors='surrogate_or_strict') return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None): """Retrieve information about the Vault and clean the data When data is saved, it has a header prepended and is formatted into 80 character lines. This method extracts the information from the header and then removes the header and the inserted newlines. The string returned is suitable for processing by the Cipher classes. :arg b_vaulttext: byte str containing the data from a save file :returns: a byte str suitable for passing to a Cipher class's decrypt() function. """ # used by decrypt default_vault_id = default_vault_id or C.DEFAULT_VAULT_IDENTITY b_tmpdata = b_vaulttext_envelope.splitlines() b_tmpheader = b_tmpdata[0].strip().split(b';') b_version = b_tmpheader[1].strip() cipher_name = to_text(b_tmpheader[2].strip()) vault_id = default_vault_id # vault_id = None # Only attempt to find vault_id if the vault file is version 1.2 or newer # if self.b_version == b'1.2': if len(b_tmpheader) >= 4: vault_id = to_text(b_tmpheader[3].strip()) b_ciphertext = b''.join(b_tmpdata[1:]) return b_ciphertext, b_version, cipher_name, vault_id
def path_dwim_relative_stack(self, paths, dirname, source): ''' find one file in first path in stack taking roles into account and adding play basedir as fallback :arg paths: A list of text strings which are the paths to look for the filename in. :arg dirname: A text string representing a directory. The directory is prepended to the source to form the path to search for. :arg source: A text string which is the filename to search for :rtype: A text string :returns: An absolute path to the filename ``source`` ''' b_dirname = to_bytes(dirname) b_source = to_bytes(source) result = None if source is None: display.warning('Invalid request to find a file that matches a "null" value') elif source and (source.startswith('~') or source.startswith(os.path.sep)): # path is absolute, no relative needed, check existence and return source test_path = unfrackpath(b_source) if os.path.exists(to_bytes(test_path, errors='surrogate_or_strict')): result = test_path else: search = [] for path in paths: upath = unfrackpath(path) b_upath = to_bytes(upath, errors='surrogate_or_strict') b_mydir = os.path.dirname(b_upath) # if path is in role and 'tasks' not there already, add it into the search if b_upath.endswith(b'tasks') and os.path.exists(os.path.join(b_upath, b'main.yml')) \ or os.path.exists(os.path.join(b_upath, b'tasks/main.yml')) \ or os.path.exists(os.path.join(b_mydir, b'tasks/main.yml')): if b_mydir.endswith(b'tasks'): search.append(os.path.join(os.path.dirname(b_mydir), b_dirname, b_source)) search.append(os.path.join(b_mydir, b_source)) else: # don't add dirname if user already is using it in source if b_source.split(b'/')[0] == b_dirname: search.append(os.path.join(b_upath, b_source)) else: search.append(os.path.join(b_upath, b_dirname, b_source)) search.append(os.path.join(b_upath, b'tasks', b_source)) elif b_dirname not in b_source.split(b'/'): # don't add dirname if user already is using it in source search.append(os.path.join(b_upath, b_dirname, b_source)) search.append(os.path.join(b_upath, b_source)) # always append basedir as last resort search.append(os.path.join(to_bytes(self.get_basedir()), b_dirname, b_source)) search.append(os.path.join(to_bytes(self.get_basedir()), b_source)) display.debug(u'search_path:\n\t%s' % to_text(b'\n\t'.join(search))) for b_candidate in search: display.vvvvv(u'looking for "%s" at "%s"' % (source, to_text(b_candidate))) if os.path.exists(b_candidate): result = to_text(b_candidate) break return result
def _clean_data(self, orig_data): ''' remove jinja2 template tags from data ''' if hasattr(orig_data, '__ENCRYPTED__'): ret = orig_data elif isinstance(orig_data, list): clean_list = [] for list_item in orig_data: clean_list.append(self._clean_data(list_item)) ret = clean_list elif isinstance(orig_data, (dict, Mapping)): clean_dict = {} for k in orig_data: clean_dict[self._clean_data(k)] = self._clean_data(orig_data[k]) ret = clean_dict elif isinstance(orig_data, string_types): # This will error with str data (needs unicode), but all strings should already be converted already. # If you get exception, the problem is at the data origin, do not add to_text here. with contextlib.closing(StringIO(orig_data)) as data: # these variables keep track of opening block locations, as we only # want to replace matched pairs of print/block tags print_openings = [] block_openings = [] for mo in self._clean_regex.finditer(orig_data): token = mo.group(0) token_start = mo.start(0) if token[0] == self.environment.variable_start_string[0]: if token == self.environment.block_start_string: block_openings.append(token_start) elif token == self.environment.variable_start_string: print_openings.append(token_start) elif token[1] == self.environment.variable_end_string[1]: prev_idx = None if token == self.environment.block_end_string and block_openings: prev_idx = block_openings.pop() elif token == self.environment.variable_end_string and print_openings: prev_idx = print_openings.pop() if prev_idx is not None: # replace the opening data.seek(prev_idx, os.SEEK_SET) data.write(to_text(self.environment.comment_start_string)) # replace the closing data.seek(token_start, os.SEEK_SET) data.write(to_text(self.environment.comment_end_string)) else: raise AnsibleError("Error while cleaning data for safety: unhandled regex match") ret = data.getvalue() else: ret = orig_data return ret
def run_commands(module, commands, check_rc=True): responses = list() for cmd in to_list(commands): rc, out, err = exec_command(module, cmd) if check_rc and rc != 0: module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc) responses.append(to_text(out, errors='surrogate_or_strict')) return responses
def _winrm_connect(self): ''' Establish a WinRM connection over HTTP/HTTPS. ''' display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host) winrm_host = self._winrm_host if HAS_IPADDRESS: display.vvvv("checking if winrm_host %s is an IPv6 address" % winrm_host) try: ipaddress.IPv6Address(winrm_host) except ipaddress.AddressValueError: pass else: winrm_host = "[%s]" % winrm_host netloc = '%s:%d' % (winrm_host, self._winrm_port) endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', '')) errors = [] for transport in self._winrm_transport: if transport == 'kerberos': if not HAVE_KERBEROS: errors.append('kerberos: the python kerberos library is not installed') continue if self._kerb_managed: self._kerb_auth(self._winrm_user, self._winrm_pass) display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host) try: winrm_kwargs = self._winrm_kwargs.copy() if self._winrm_connection_timeout: winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 1 protocol = Protocol(endpoint, transport=transport, **winrm_kwargs) # open the shell from connect so we know we're able to talk to the server if not self.shell_id: self.shell_id = protocol.open_shell(codepage=65001) # UTF-8 display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host) return protocol except Exception as e: err_msg = to_text(e).strip() if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I): raise AnsibleError('the connection attempt timed out') m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg) if m: code = int(m.groups()[0]) if code == 401: err_msg = 'the specified credentials were rejected by the server' elif code == 411: return protocol errors.append(u'%s: %s' % (transport, err_msg)) display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host) if errors: raise AnsibleConnectionFailure(', '.join(map(to_native, errors))) else: raise AnsibleError('No transport found for WinRM connection')
def fabric_ansible_display(self, msg, color=None, stderr=False, screen_only=False, log_only=False): """ Display a message to the user Note: msg *must* be a unicode string to prevent UnicodeError tracebacks. """ nocolor = msg if color: msg = stringc(msg, color) if not log_only: if not msg.endswith(u'\n'): msg2 = msg + u'\n' else: msg2 = msg msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr)) if sys.version_info >= (3,): # Convert back to text string on python3 # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace') # Note: After Display() class is refactored need to update the # log capture code in 'bin/ansible-connection' (and other # relevant places). if not stderr: fileobj = sys.stdout else: fileobj = sys.stderr fileobj.write(msg2) try: fileobj.flush() except IOError as e: # Ignore EPIPE in case fileobj has been prematurely closed, eg. # when piping to "head -n1" if e.errno != errno.EPIPE: raise if logger: msg2 = nocolor.lstrip(u'\n') msg2 = to_bytes(msg2) if sys.version_info >= (3,): # Convert back to text string on python3 # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_text(msg2, self._output_encoding(stderr=stderr)) if color == CONST.COLOR_ERROR: logger.error(msg2) else: logger.info(msg2)
def user_add_to_group(module): # Get username, and groupname from module parameters, and api base url # along with validate_certs from the cyberark_session established username = module.params["username"] group_name = module.params["group_name"] cyberark_session = module.params["cyberark_session"] api_base_url = cyberark_session["api_base_url"] validate_certs = cyberark_session["validate_certs"] # Prepare result, end_point, headers and payload result = {} end_point = "/PasswordVault/WebServices/PIMServices.svc//Groups/{0}/Users".format( group_name) headers = {'Content-Type': 'application/json'} headers["Authorization"] = cyberark_session["token"] payload = {"UserName": username} try: # execute REST action response = open_url( api_base_url + end_point, method="POST", headers=headers, data=json.dumps(payload), validate_certs=validate_certs) result = {"result": {}} return (True, result, response.getcode()) except (HTTPError, httplib.HTTPException) as http_exception: exception_text = to_text(http_exception) if http_exception.code == 409 and "ITATS262E" in exception_text: # User is already member of Group return (False, None, http_exception.code) else: module.fail_json( msg=("Error while performing user_add_to_group." "Please validate parameters provided." "\n*** end_point=%s%s\n ==> %s" % (api_base_url, end_point, exception_text)), payload=payload, headers=headers, exception=traceback.format_exc(), status_code=http_exception.code) except Exception as unknown_exception: module.fail_json( msg=("Unknown error while performing user_add_to_group." "\n*** end_point=%s%s\n%s" % (api_base_url, end_point, to_text(unknown_exception))), payload=payload, headers=headers, status_code=-1)
def json(self): if not self.body: if "body" in self.info: return json.loads(to_text(self.info["body"])) return None try: return json.loads(to_text(self.body)) except ValueError: return None
def modify_module(module_name, module_path, module_args, task_vars=dict(), module_compression='ZIP_STORED'): """ Used to insert chunks of code into modules before transfer rather than doing regular python imports. This allows for more efficient transfer in a non-bootstrapping scenario by not moving extra files over the wire and also takes care of embedding arguments in the transferred modules. This version is done in such a way that local imports can still be used in the module code, so IDEs don't have to be aware of what is going on. Example: from ansible.module_utils.basic import * ... will result in the insertion of basic.py into the module from the module_utils/ directory in the source tree. For powershell, there's equivalent conventions like this: # POWERSHELL_COMMON which results in the inclusion of the common code from powershell.ps1 """ with open(module_path, 'rb') as f: # read in the module source module_data = f.read() (module_data, module_style, shebang) = _find_snippet_imports(module_name, module_data, module_path, module_args, task_vars, module_compression) if module_style == 'binary': return (module_data, module_style, to_text(shebang, nonstring='passthru')) elif shebang is None: lines = module_data.split(b"\n", 1) if lines[0].startswith(b"#!"): shebang = lines[0].strip() args = shlex.split(str(shebang[2:])) interpreter = args[0] interpreter = to_bytes(interpreter) new_shebang = to_bytes(_get_shebang(interpreter, task_vars, args[1:])[0], errors='surrogate_or_strict', nonstring='passthru') if new_shebang: lines[0] = shebang = new_shebang if os.path.basename(interpreter).startswith(b'python'): lines.insert(1, to_bytes(ENCODING_STRING)) else: # No shebang, assume a binary module? pass module_data = b"\n".join(lines) else: shebang = to_bytes(shebang, errors='surrogate_or_strict') return (module_data, module_style, to_text(shebang, nonstring='passthru'))
def run(self): ''' Called when the process is started. Pushes the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' #import cProfile, pstats, StringIO #pr = cProfile.Profile() #pr.enable() if HAS_ATFORK: atfork() try: # execute the task and build a TaskResult from the result display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) executor_result = TaskExecutor( self._host, self._task, self._task_vars, self._play_context, self._new_stdin, self._loader, self._shared_loader_obj, self._rslt_q ).run() display.debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, executor_result) # put the result on the result queue display.debug("sending task result") self._rslt_q.put(task_result) display.debug("done sending task result") except AnsibleConnectionFailure: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except Exception as e: if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, dict(failed=True, exception=to_text(traceback.format_exc()), stdout='')) self._rslt_q.put(task_result, block=False) except: display.debug(u"WORKER EXCEPTION: %s" % to_text(e)) display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc())) display.debug("WORKER PROCESS EXITING")
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False): if msg and msg != '\n': nowTime = time.time() runTime = nowTime - self._lastTime self._lastTime = nowTime if msg[0] == '\n': msg = '\n(%06.1f) ' % runTime + msg[1:] else: msg = '(%06.1f) ' % runTime + msg nocolor = msg if color: msg = stringc(msg, color) if not log_only and self._isPrint: if not msg.endswith(u'\n'): msg2 = msg + u'\n' else: msg2 = msg msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr)) if sys.version_info >= (3,): # Convert back to text string on python3 # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace') if not stderr: fileobj = sys.stdout else: fileobj = sys.stderr fileobj.write(msg2) try: fileobj.flush() except IOError as e: # Ignore EPIPE in case fileobj has been prematurely closed, eg. # when piping to "head -n1" if e.errno != errno.EPIPE: raise if not screen_only: msg2 = nocolor.lstrip(u'\n') msg2 = to_bytes(msg2) if sys.version_info >= (3,): # Convert back to text string on python3 # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_text(msg2, self._output_encoding(stderr=stderr)) if color == C.COLOR_ERROR: self.append('error : ' + msg2) else: self.append(msg2)
def run_commands(module, commands, check_rc=True): responses = list() commands = to_commands(module, to_list(commands)) for cmd in commands: cmd = module.jsonify(cmd) rc, out, err = exec_command(module, cmd) if check_rc and rc != 0: module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc) responses.append(to_text(out, errors='surrogate_then_replace')) return responses
def execute_import(self): """ used to import a role into Ansible Galaxy """ colors = { 'INFO': 'normal', 'WARNING': C.COLOR_WARN, 'ERROR': C.COLOR_ERROR, 'SUCCESS': C.COLOR_OK, 'FAILED': C.COLOR_ERROR, } if len(self.args) < 2: raise AnsibleError("Expected a github_username and github_repository. Use --help.") github_repo = to_text(self.args.pop(), errors='surrogate_or_strict') github_user = to_text(self.args.pop(), errors='surrogate_or_strict') if self.options.check_status: task = self.api.get_import_task(github_user=github_user, github_repo=github_repo) else: # Submit an import request task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference, role_name=self.options.role_name) if len(task) > 1: # found multiple roles associated with github_user/github_repo display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo), color='yellow') display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED) for t in task: display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED) display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo), color=C.COLOR_CHANGED) return 0 # found a single role as expected display.display("Successfully submitted import request %d" % task[0]['id']) if not self.options.wait: display.display("Role name: %s" % task[0]['summary_fields']['role']['name']) display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo'])) if self.options.check_status or self.options.wait: # Get the status of the import msg_list = [] finished = False while not finished: task = self.api.get_import_task(task_id=task[0]['id']) for msg in task[0]['summary_fields']['task_messages']: if msg['id'] not in msg_list: display.display(msg['message_text'], color=colors[msg['message_type']]) msg_list.append(msg['id']) if task[0]['state'] in ['SUCCESS', 'FAILED']: finished = True else: time.sleep(10) return 0
def _get_diff(self, difflist): if not isinstance(difflist, list): difflist = [difflist] ret = [] for diff in difflist: try: with warnings.catch_warnings(): warnings.simplefilter('ignore') if 'dst_binary' in diff: ret.append("diff skipped: destination file appears to be binary\n") if 'src_binary' in diff: ret.append("diff skipped: source file appears to be binary\n") if 'dst_larger' in diff: ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger']) if 'src_larger' in diff: ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger']) if 'before' in diff and 'after' in diff: # format complex structures into 'files' for x in ['before', 'after']: if isinstance(diff[x], dict): diff[x] = json.dumps(diff[x], sort_keys=True, indent=4) if 'before_header' in diff: before_header = "before: %s" % diff['before_header'] else: before_header = 'before' if 'after_header' in diff: after_header = "after: %s" % diff['after_header'] else: after_header = 'after' differ = difflib.unified_diff(to_text(diff['before']).splitlines(True), to_text(diff['after']).splitlines(True), fromfile=before_header, tofile=after_header, fromfiledate='', tofiledate='', n=C.DIFF_CONTEXT) has_diff = False for line in differ: has_diff = True if line.startswith('+'): line = stringc(line, C.COLOR_DIFF_ADD) elif line.startswith('-'): line = stringc(line, C.COLOR_DIFF_REMOVE) elif line.startswith('@@'): line = stringc(line, C.COLOR_DIFF_LINES) ret.append(line) if has_diff: ret.append('\n') if 'prepared' in diff: ret.append(to_text(diff['prepared'])) except UnicodeDecodeError: ret.append(">> the files are different, but the diff library cannot compare unicode strings\n\n") return u''.join(ret)
def set_vm_power_state(content, vm, state, force, timeout=0): """ Set the power status for a VM determined by the current and requested states. force is forceful """ facts = gather_vm_facts(content, vm) expected_state = state.replace('_', '').replace('-', '').lower() current_state = facts['hw_power_status'].lower() result = dict( changed=False, failed=False, ) # Need Force if not force and current_state not in ['poweredon', 'poweredoff']: result['failed'] = True result[ 'msg'] = "Virtual Machine is in %s power state. Force is required!" % current_state return result # State is not already true if current_state != expected_state: task = None try: if expected_state == 'poweredoff': task = vm.PowerOff() elif expected_state == 'poweredon': task = vm.PowerOn() elif expected_state == 'restarted': if current_state in ('poweredon', 'poweringon', 'resetting', 'poweredoff'): task = vm.Reset() else: result['failed'] = True result[ 'msg'] = "Cannot restart virtual machine in the current state %s" % current_state elif expected_state == 'suspended': if current_state in ('poweredon', 'poweringon'): task = vm.Suspend() else: result['failed'] = True result[ 'msg'] = 'Cannot suspend virtual machine in the current state %s' % current_state elif expected_state in ['shutdownguest', 'rebootguest']: if current_state == 'poweredon': if vm.guest.toolsRunningStatus == 'guestToolsRunning': if expected_state == 'shutdownguest': task = vm.ShutdownGuest() if timeout > 0: result.update(wait_for_poweroff(vm, timeout)) else: task = vm.RebootGuest() # Set result['changed'] immediately because # shutdown and reboot return None. result['changed'] = True else: result['failed'] = True result[ 'msg'] = "VMware tools should be installed for guest shutdown/reboot" else: result['failed'] = True result[ 'msg'] = "Virtual machine %s must be in poweredon state for guest shutdown/reboot" % vm.name else: result['failed'] = True result[ 'msg'] = "Unsupported expected state provided: %s" % expected_state except Exception as e: result['failed'] = True result['msg'] = to_text(e) if task: wait_for_task(task) if task.info.state == 'error': result['failed'] = True result['msg'] = task.info.error.msg else: result['changed'] = True # need to get new metadata if changed result['instance'] = gather_vm_facts(content, vm) return result
def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False, convert_data=False): if self.jinja2_native and not isinstance(data, string_types): return data # For preserving the number of input newlines in the output (used # later in this method) data_newlines = _count_newlines_from_end(data) if fail_on_undefined is None: fail_on_undefined = self._fail_on_undefined_errors has_template_overrides = data.startswith(JINJA2_OVERRIDE) try: # NOTE Creating an overlay that lives only inside do_template means that overrides are not applied # when templating nested variables in AnsibleJ2Vars where Templar.environment is used, not the overlay. # This is historic behavior that is kept for backwards compatibility. if overrides: myenv = self.environment.overlay(overrides) elif has_template_overrides: myenv = self.environment.overlay() else: myenv = self.environment # Get jinja env overrides from template if has_template_overrides: eol = data.find('\n') line = data[len(JINJA2_OVERRIDE):eol] data = data[eol + 1:] for pair in line.split(','): (key, val) = pair.split(':') key = key.strip() setattr(myenv, key, ast.literal_eval(val.strip())) if escape_backslashes: # Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\". data = _escape_backslashes(data, myenv) try: t = myenv.from_string(data) except TemplateSyntaxError as e: raise AnsibleError("template error while templating string: %s. String: %s" % (to_native(e), to_native(data))) except Exception as e: if 'recursion' in to_native(e): raise AnsibleError("recursive loop detected in template string: %s" % to_native(data)) else: return data if disable_lookups: t.globals['query'] = t.globals['q'] = t.globals['lookup'] = self._fail_lookup jvars = AnsibleJ2Vars(self, t.globals) self.cur_context = new_context = t.new_context(jvars, shared=True) rf = t.root_render_func(new_context) try: if self.jinja2_native: res = ansible_native_concat(rf) else: res = ansible_concat(rf, convert_data, myenv.variable_start_string) unsafe = getattr(new_context, 'unsafe', False) if unsafe: res = wrap_var(res) except TypeError as te: if 'AnsibleUndefined' in to_native(te): errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native(data) errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native(te) raise AnsibleUndefinedVariable(errmsg) else: display.debug("failing because of a type error, template data is: %s" % to_text(data)) raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te))) if isinstance(res, string_types) and preserve_trailing_newlines: # The low level calls above do not preserve the newline # characters at the end of the input data, so we use the # calculate the difference in newlines and append them # to the resulting output for parity # # Using Environment's keep_trailing_newline instead would # result in change in behavior when trailing newlines # would be kept also for included templates, for example: # "Hello {% include 'world.txt' %}!" would render as # "Hello world\n!\n" instead of "Hello world!\n". res_newlines = _count_newlines_from_end(res) if data_newlines > res_newlines: res += self.environment.newline_sequence * (data_newlines - res_newlines) if unsafe: res = wrap_var(res) return res except (UndefinedError, AnsibleUndefinedVariable) as e: if fail_on_undefined: raise AnsibleUndefinedVariable(e) else: display.debug("Ignoring undefined failure: %s" % to_text(e)) return data
def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None): if not self.protocol: self.protocol = self._winrm_connect() self._connected = True if from_exec: display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) else: display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) command_id = None try: stdin_push_failed = False command_id = self.protocol.run_command( self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None)) # TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that comes from this) try: if stdin_iterator: for (data, is_last) in stdin_iterator: self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last) except Exception as ex: from traceback import format_exc display.warning("FATAL ERROR DURING FILE TRANSFER: %s" % format_exc(ex)) stdin_push_failed = True if stdin_push_failed: raise AnsibleError('winrm send_input failed') # NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy). # FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure. response = Response( self.protocol.get_command_output(self.shell_id, command_id)) # TODO: check result from response and set stdin_push_failed if we have nonzero if from_exec: display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host) else: display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host) display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host) display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host) if stdin_push_failed: raise AnsibleError( 'winrm send_input failed; \nstdout: %s\nstderr %s' % (response.std_out, response.std_err)) return response finally: if command_id: self.protocol.cleanup_command(self.shell_id, command_id)
def _lookup(self, name, *args, **kwargs): instance = lookup_loader.get(name, loader=self._loader, templar=self) if instance is None: raise AnsibleError("lookup plugin (%s) not found" % name) wantlist = kwargs.pop('wantlist', False) allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS) errors = kwargs.pop('errors', 'strict') loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False) # safely catch run failures per #5059 try: ran = instance.run(loop_terms, variables=self._available_variables, **kwargs) except (AnsibleUndefinedVariable, UndefinedError) as e: raise AnsibleUndefinedVariable(e) except AnsibleOptionsError as e: # invalid options given to lookup, just reraise raise e except AnsibleLookupError as e: # lookup handled error but still decided to bail msg = 'Lookup failed but the error is being ignored: %s' % to_native(e) if errors == 'warn': display.warning(msg) elif errors == 'ignore': display.display(msg, log_only=True) else: raise e return [] if wantlist else None except Exception as e: # errors not handled by lookup msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \ (name, type(e), to_text(e)) if errors == 'warn': display.warning(msg) elif errors == 'ignore': display.display(msg, log_only=True) else: display.vvv('exception during Jinja2 execution: {0}'.format(format_exc())) raise AnsibleError(to_native(msg), orig_exc=e) return [] if wantlist else None if ran and allow_unsafe is False: if self.cur_context: self.cur_context.unsafe = True if wantlist: return wrap_var(ran) try: if isinstance(ran[0], NativeJinjaText): ran = wrap_var(NativeJinjaText(",".join(ran))) else: ran = wrap_var(",".join(ran)) except TypeError: # Lookup Plugins should always return lists. Throw an error if that's not # the case: if not isinstance(ran, Sequence): raise AnsibleError("The lookup plugin '%s' did not return a list." % name) # The TypeError we can recover from is when the value *inside* of the list # is not a string if len(ran) == 1: ran = wrap_var(ran[0]) else: ran = wrap_var(ran) return ran
def get(self): response_data = self.get_resource().data return to_text(response_data)
def quote(a): ''' return its argument quoted for shell usage ''' return shlex_quote(to_text(a))
def _lookup(self, name, *args, **kwargs): instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self) if instance is not None: wantlist = kwargs.pop('wantlist', False) allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS) errors = kwargs.pop('errors', 'strict') from ansible.utils.listify import listify_lookup_plugin_terms loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False) # safely catch run failures per #5059 try: ran = instance.run(loop_terms, variables=self._available_variables, **kwargs) except (AnsibleUndefinedVariable, UndefinedError) as e: raise AnsibleUndefinedVariable(e) except Exception as e: if self._fail_on_lookup_errors: msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \ (name, type(e), to_text(e)) if errors == 'warn': display.warning(msg) elif errors == 'ignore': display.display(msg, log_only=True) else: raise AnsibleError(to_native(msg)) ran = None if ran and not allow_unsafe: if wantlist: ran = wrap_var(ran) else: try: ran = UnsafeProxy(",".join(ran)) except TypeError: # Lookup Plugins should always return lists. Throw an error if that's not # the case: if not isinstance(ran, Sequence): raise AnsibleError( "The lookup plugin '%s' did not return a list." % name) # The TypeError we can recover from is when the value *inside* of the list # is not a string if len(ran) == 1: ran = wrap_var(ran[0]) else: ran = wrap_var(ran) if self.cur_context: self.cur_context.unsafe = True return ran else: raise AnsibleError("lookup plugin (%s) not found" % name)
def run(self, tmp=None, task_vars=None): del tmp # tmp no longer has any effect socket_path = None if self._play_context.connection == 'network_cli': provider = self._task.args.get('provider', {}) if any(provider.values()): display.warning( 'provider is unnecessary when using network_cli and will be ignored' ) elif self._play_context.connection == 'local': provider = load_provider(dellos10_provider_spec, self._task.args) pc = copy.deepcopy(self._play_context) pc.connection = 'network_cli' pc.network_os = 'dellos10' pc.remote_addr = provider['host'] or self._play_context.remote_addr pc.port = int(provider['port'] or self._play_context.port or 22) pc.remote_user = provider[ 'username'] or self._play_context.connection_user pc.password = provider['password'] or self._play_context.password pc.private_key_file = provider[ 'ssh_keyfile'] or self._play_context.private_key_file pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) pc.become = provider['authorize'] or False if pc.become: pc.become_method = 'enable' pc.become_pass = provider['auth_pass'] display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr) connection = self._shared_loader_obj.connection_loader.get( 'persistent', pc, sys.stdin) socket_path = connection.run() display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) if not socket_path: return { 'failed': True, 'msg': 'unable to open shell. Please see: ' + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell' } task_vars['ansible_socket'] = socket_path # make sure we are in the right cli context which should be # enable mode and not config module if socket_path is None: socket_path = self._connection.socket_path conn = Connection(socket_path) out = conn.get_prompt() while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'): display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr) conn.send_command('exit') out = conn.get_prompt() result = super(ActionModule, self).run(task_vars=task_vars) return result
def b64decode(string, encoding='utf-8'): return to_text(base64.b64decode( to_bytes(string, errors='surrogate_or_strict')), encoding=encoding)
def map_obj_to_ele(module, want, top, value_map=None, param=None): if not HAS_LXML: module.fail_json(msg="lxml is not installed.") if not param: param = module.params root = Element("root") top_ele = top.split("/") ele = SubElement(root, top_ele[0]) if len(top_ele) > 1: for item in top_ele[1:-1]: ele = SubElement(ele, item) container = ele state = param.get("state") active = param.get("active") if active: oper = "active" else: oper = "inactive" # build xml subtree if container.tag != top_ele[-1]: node = SubElement(container, top_ele[-1]) else: node = container for fxpath, attributes in want.items(): for attr in attributes: tag_only = attr.get("tag_only", False) leaf_only = attr.get("leaf_only", False) value_req = attr.get("value_req", False) is_key = attr.get("is_key", False) parent_attrib = attr.get("parent_attrib", True) value = attr.get("value") field_top = attr.get("top") # operation 'delete' is added as element attribute # only if it is key or leaf only node if state == "absent" and not (is_key or leaf_only): continue # convert param value to device specific value if value_map and fxpath in value_map: value = value_map[fxpath].get(value) if (value is not None) or tag_only or leaf_only: ele = node if field_top: # eg: top = 'system/syslog/file' # field_top = 'system/syslog/file/contents' # <file> # <name>test</name> # <contents> # </contents> # </file> ele_list = root.xpath(top + "/" + field_top) if not len(ele_list): fields = field_top.split("/") ele = node for item in fields: inner_ele = root.xpath(top + "/" + item) if len(inner_ele): ele = inner_ele[0] else: ele = SubElement(ele, item) else: ele = ele_list[0] if value is not None and not isinstance(value, bool): value = to_text(value, errors="surrogate_then_replace") if fxpath: tags = fxpath.split("/") for item in tags: ele = SubElement(ele, item) if tag_only: if state == "present": if not value: # if value of tag_only node is false, delete the node ele.set("delete", "delete") elif leaf_only: if state == "present": ele.set(oper, oper) ele.text = value else: ele.set("delete", "delete") # Add value of leaf node if required while deleting. # in some cases if value is present while deleting, it # can result in error, hence the check if value_req: ele.text = value if is_key: par = ele.getparent() par.set("delete", "delete") else: ele.text = value par = ele.getparent() if parent_attrib: if state == "present": # set replace attribute at parent node if not par.attrib.get("replace"): par.set("replace", "replace") # set active/inactive at parent node if not par.attrib.get(oper): par.set(oper, oper) else: par.set("delete", "delete") return root.getchildren()[0]
def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False, return_results=False): """ Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel to the inventory base directory or in the same directory as the playbook. Variables in the playbook dir will win over the inventory dir if files are in both. """ results = {} scan_pass = 0 _basedir = self._basedir _playbook_basedir = self._playbook_basedir # look in both the inventory base directory and the playbook base directory # unless we do an update for a new playbook base dir if not new_pb_basedir and _playbook_basedir: basedirs = [_basedir, _playbook_basedir] else: basedirs = [_basedir] for basedir in basedirs: # this can happen from particular API usages, particularly if not run # from /usr/bin/ansible-playbook if basedir in ('', None): basedir = './' scan_pass = scan_pass + 1 # it's not an eror if the directory does not exist, keep moving if not os.path.exists(basedir): continue # save work of second scan if the directories are the same if _basedir == _playbook_basedir and scan_pass != 1: continue # Before trying to load vars from file, check that the directory contains relvant file names if host is None and any( map(lambda ext: group.name + ext in self._group_vars_files, C.YAML_FILENAME_EXTENSIONS)): # load vars in dir/group_vars/name_of_group base_path = to_text(os.path.abspath( os.path.join(to_bytes(basedir), b"group_vars/" + to_bytes(group.name))), errors='surrogate_or_strict') host_results = self._variable_manager.add_group_vars_file( base_path, self._loader) if return_results: results = combine_vars(results, host_results) elif group is None and any( map(lambda ext: host.name + ext in self._host_vars_files, C.YAML_FILENAME_EXTENSIONS)): # same for hostvars in dir/host_vars/name_of_host base_path = to_text(os.path.abspath( os.path.join(to_bytes(basedir), b"host_vars/" + to_bytes(host.name))), errors='surrogate_or_strict') group_results = self._variable_manager.add_host_vars_file( base_path, self._loader) if return_results: results = combine_vars(results, group_results) # all done, results is a dictionary of variables for this particular host. return results
def tostring(element, encoding="UTF-8"): if HAS_LXML: return xml_to_string(element, encoding="unicode") else: return to_text(xml_to_string(element, encoding), encoding=encoding)
def main(): """main entry point for module execution""" backup_spec = dict(filename=dict(), dir_path=dict(type="path")) argument_spec = dict( src=dict(type="str"), lines=dict(aliases=["commands"], type="list", elements="str"), parents=dict(type="list", elements="str"), before=dict(type="list", elements="str"), after=dict(type="list", elements="str"), match=dict( default="line", choices=["line", "strict", "exact", "none"] ), replace=dict(default="line", choices=["line", "block"]), multiline_delimiter=dict(default="@"), running_config=dict(aliases=["config"]), intended_config=dict(), defaults=dict(type="bool", default=False), backup=dict(type="bool", default=False), backup_options=dict(type="dict", options=backup_spec), save_when=dict( choices=["always", "never", "modified", "changed"], default="never" ), diff_against=dict(choices=["startup", "intended", "running"]), diff_ignore_lines=dict(type="list", elements="str"), ) argument_spec.update(ios_argument_spec) mutually_exclusive = [("lines", "src"), ("parents", "src")] required_if = [ ("match", "strict", ["lines"]), ("match", "exact", ["lines"]), ("replace", "block", ["lines"]), ("diff_against", "intended", ["intended_config"]), ] module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, required_if=required_if, supports_check_mode=True, ) result = {"changed": False} warnings = list() check_args(module, warnings) result["warnings"] = warnings diff_ignore_lines = module.params["diff_ignore_lines"] config = None contents = None flags = get_defaults_flag(module) if module.params["defaults"] else [] connection = get_connection(module) if ( module.params["backup"] or module._diff and module.params["diff_against"] == "running" ): contents = get_config(module, flags=flags) config = NetworkConfig(indent=1, contents=contents) if module.params["backup"]: result["__backup__"] = contents if any((module.params["lines"], module.params["src"])): match = module.params["match"] replace = module.params["replace"] path = module.params["parents"] candidate = get_candidate_config(module) running = get_running_config(module, contents, flags=flags) try: response = connection.get_diff( candidate=candidate, running=running, diff_match=match, diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=replace, ) except ConnectionError as exc: module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) config_diff = response["config_diff"] banner_diff = response["banner_diff"] if config_diff or banner_diff: commands = config_diff.split("\n") if module.params["before"]: commands[:0] = module.params["before"] if module.params["after"]: commands.extend(module.params["after"]) result["commands"] = commands result["updates"] = commands result["banners"] = banner_diff # send the configuration commands to the device and merge # them with the current running config if not module.check_mode: if commands: edit_config_or_macro(connection, commands) if banner_diff: connection.edit_banner( candidate=json.dumps(banner_diff), multiline_delimiter=module.params[ "multiline_delimiter" ], ) result["changed"] = True running_config = module.params["running_config"] startup_config = None if module.params["save_when"] == "always": save_config(module, result) elif module.params["save_when"] == "modified": output = run_commands( module, ["show running-config", "show startup-config"] ) running_config = NetworkConfig( indent=1, contents=output[0], ignore_lines=diff_ignore_lines ) startup_config = NetworkConfig( indent=1, contents=output[1], ignore_lines=diff_ignore_lines ) if running_config.sha1 != startup_config.sha1: save_config(module, result) elif module.params["save_when"] == "changed" and result["changed"]: save_config(module, result) if module._diff: if not running_config: output = run_commands(module, "show running-config") contents = output[0] else: contents = running_config # recreate the object in order to process diff_ignore_lines running_config = NetworkConfig( indent=1, contents=contents, ignore_lines=diff_ignore_lines ) if module.params["diff_against"] == "running": if module.check_mode: module.warn( "unable to perform diff against running-config due to check mode" ) contents = None else: contents = config.config_text elif module.params["diff_against"] == "startup": if not startup_config: output = run_commands(module, "show startup-config") contents = output[0] else: contents = startup_config.config_text elif module.params["diff_against"] == "intended": contents = module.params["intended_config"] if contents is not None: base_config = NetworkConfig( indent=1, contents=contents, ignore_lines=diff_ignore_lines ) if running_config.sha1 != base_config.sha1: if module.params["diff_against"] == "intended": before = running_config after = base_config elif module.params["diff_against"] in ("startup", "running"): before = base_config after = running_config result.update( { "changed": True, "diff": {"before": str(before), "after": str(after)}, } ) if result.get("changed") and any( (module.params["src"], module.params["lines"]) ): msg = ( "To ensure idempotency and correct diff the input configuration lines should be" " similar to how they appear if present in" " the running configuration on device" ) if module.params["src"]: msg += " including the indentation" if "warnings" in result: result["warnings"].append(msg) else: result["warnings"] = msg module.exit_json(**result)
def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iterate over each task, while there is one left to run result = self._tqm.RUN_OK work_to_do = True self._set_hosts_cache(iterator._play) while work_to_do and not self._tqm._terminated: try: display.debug("getting the remaining hosts for this loop") hosts_left = self.get_hosts_left(iterator) display.debug("done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) # skip control skip_rest = False choose_step = True # flag set if task is set to any_errors_fatal any_errors_fatal = False results = [] for (host, task) in host_tasks: if not task: continue if self._tqm._terminated: break run_once = False work_to_do = True # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task) continue display.debug("getting variables") task_vars = self._variable_manager.get_vars( play=iterator._play, host=host, task=task, _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. task_action = templar.template(task.action) try: action = action_loader.get( task_action, class_only=True, collection_list=task.collections) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None if task_action in C._ACTION_META: # for the linear strategy, we run meta tasks just once and for # all hosts currently being iterated over rather than one host results.extend( self._execute_meta(task, play_context, iterator, host)) if task.args.get('_raw_params', None) not in ('noop', 'reset_connection', 'end_host', 'role_complete'): run_once = True if (task.any_errors_fatal or run_once) and not task.ignore_errors: any_errors_fatal = True else: # handle step if needed, skip meta actions as they are used internally if self._step and choose_step: if self._take_step(task): choose_step = False else: skip_rest = True break run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if (task.any_errors_fatal or run_once) and not task.ignore_errors: any_errors_fatal = True if not callback_sent: display.debug( "sending task start callback, copying the task so we can template it temporarily" ) saved_name = task.name display.debug( "done copying, going to template now") try: task.name = to_text(templar.template( task.name, fail_on_undefined=False), nonstring='empty') display.debug("done templating") except Exception: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug( "templating failed for some reason") display.debug("here goes the callback...") self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) task.name = saved_name callback_sent = True display.debug("sending task start callback") self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) del task_vars # if we're bypassing the host loop, break out now if run_once: break results += self._process_pending_results( iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1))) # go to next host/task group if skip_rest: continue display.debug( "done queuing things up, now waiting for results queue to drain" ) if self._pending_results > 0: results += self._wait_on_pending_results(iterator) host_results.extend(results) self.update_active_connections(results) included_files = IncludedFile.process_include_results( host_results, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) if len(included_files) > 0: display.debug("we have included files to process") display.debug("generating all_blocks data") all_blocks = dict((host, []) for host in hosts_left) display.debug("done generating all_blocks data") for included_file in included_files: display.debug("processing included file: %s" % included_file._filename) # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: if included_file._is_role: new_ir = self._copy_included_file( included_file) new_blocks, handler_blocks = new_ir.get_block_list( play=iterator._play, variable_manager=self._variable_manager, loader=self._loader, ) else: new_blocks = self._load_included_file( included_file, iterator=iterator) display.debug( "iterating over new_blocks loaded from include file" ) for new_block in new_blocks: task_vars = self._variable_manager.get_vars( play=iterator._play, task=new_block.get_first_parent_include(), _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all, ) display.debug("filtering new block on tags") final_block = new_block.filter_tagged_tasks( task_vars) display.debug( "done filtering new block on tags") noop_block = self._prepare_and_create_noop_block_from( final_block, task._parent, iterator) for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) display.debug( "done iterating over new_blocks loaded from include file" ) except AnsibleParserError: raise except AnsibleError as e: for r in included_file._results: r._result['failed'] = True for host in included_file._hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.error(to_text(e), wrap_text=False) continue # finally go through all of the hosts and append the # accumulated blocks to their list of tasks display.debug( "extending task lists for all hosts with included blocks" ) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done extending task lists") display.debug("done processing included files") display.debug("results queue empty") display.debug("checking for any_errors_fatal") failed_hosts = [] unreachable_hosts = [] for res in results: # execute_meta() does not set 'failed' in the TaskResult # so we skip checking it with the meta tasks and look just at the iterator if (res.is_failed() or res._task.action in C._ACTION_META) and iterator.is_failed( res._host): failed_hosts.append(res._host.name) elif res.is_unreachable(): unreachable_hosts.append(res._host.name) # if any_errors_fatal and we had an error, mark all hosts as failed if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0): dont_fail_states = frozenset( [IteratingStates.RESCUE, IteratingStates.ALWAYS]) for host in hosts_left: (s, _) = iterator.get_next_task_for_host(host, peek=True) # the state may actually be in a child state, use the get_active_state() # method in the iterator to figure out the true active state s = iterator.get_active_state(s) if s.run_state not in dont_fail_states or \ s.run_state == IteratingStates.RESCUE and s.fail_state & FailedStates.RESCUE != 0: self._tqm._failed_hosts[host.name] = True result |= self._tqm.RUN_FAILED_BREAK_PLAY display.debug("done checking for any_errors_fatal") display.debug("checking for max_fail_percentage") if iterator._play.max_fail_percentage is not None and len( results) > 0: percentage = iterator._play.max_fail_percentage / 100.0 if (len(self._tqm._failed_hosts) / iterator.batch_size) > percentage: for host in hosts_left: # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states if host.name not in failed_hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result |= self._tqm.RUN_FAILED_BREAK_PLAY display.debug('(%s failed / %s total )> %s max fail' % (len(self._tqm._failed_hosts), iterator.batch_size, percentage)) display.debug("done checking for max_fail_percentage") display.debug( "checking to see if all hosts have failed and the running result is not ok" ) if result != self._tqm.RUN_OK and len( self._tqm._failed_hosts) >= len(hosts_left): display.debug("^ not ok, so returning result now") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') return result display.debug("done checking to see if all hosts have failed") except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return self._tqm.RUN_UNKNOWN_ERROR # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
def main(): argument_spec = aci_argument_spec() argument_spec.update( path=dict(type='str', required=True, aliases=['uri']), method=dict(type='str', default='get', choices=['delete', 'get', 'post'], aliases=['action']), src=dict(type='path', aliases=['config_file']), content=dict(type='raw'), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[['content', 'src']], ) content = module.params.get('content') path = module.params.get('path') src = module.params.get('src') # Report missing file file_exists = False if src: if os.path.isfile(src): file_exists = True else: module.fail_json(msg="Cannot find/access src '%s'" % src) # Find request type if path.find('.xml') != -1: rest_type = 'xml' if not HAS_LXML_ETREE: module.fail_json( msg= 'The lxml python library is missing, or lacks etree support.') if not HAS_XMLJSON_COBRA: module.fail_json( msg= 'The xmljson python library is missing, or lacks cobra support.' ) elif path.find('.json') != -1: rest_type = 'json' else: module.fail_json( msg='Failed to find REST API payload type (neither .xml nor .json).' ) aci = ACIRESTModule(module) aci.result['status'] = -1 # Ensure we always return a status # We include the payload as it may be templated payload = content if file_exists: with open(src, 'r') as config_object: # TODO: Would be nice to template this, requires action-plugin payload = config_object.read() # Validate payload if rest_type == 'json': if content and isinstance(content, dict): # Validate inline YAML/JSON payload = json.dumps(payload) elif payload and isinstance(payload, str) and HAS_YAML: try: # Validate YAML/JSON string payload = json.dumps(yaml.safe_load(payload)) except Exception as e: module.fail_json( msg='Failed to parse provided JSON/YAML payload: %s' % to_text(e), exception=to_text(e), payload=payload) elif rest_type == 'xml' and HAS_LXML_ETREE: if content and isinstance(content, dict) and HAS_XMLJSON_COBRA: # Validate inline YAML/JSON # FIXME: Converting from a dictionary to XML is unsupported at this time # payload = etree.tostring(payload) pass elif payload and isinstance(payload, str): try: # Validate XML string payload = lxml.etree.tostring(lxml.etree.fromstring(payload)) except Exception as e: module.fail_json( msg='Failed to parse provided XML payload: %s' % to_text(e), payload=payload) # Perform actual request using auth cookie (Same as aci.request(), but also supports XML) if 'port' in aci.params and aci.params.get('port') is not None: aci.url = '%(protocol)s://%(host)s:%(port)s/' % aci.params + path.lstrip( '/') else: aci.url = '%(protocol)s://%(host)s/' % aci.params + path.lstrip('/') if aci.params.get('method') != 'get': path += '?rsp-subtree=modified' aci.url = update_qsl(aci.url, {'rsp-subtree': 'modified'}) # Sign and encode request as to APIC's wishes if aci.params.get('private_key') is not None: aci.cert_auth(path=path, payload=payload) aci.method = aci.params.get('method').upper() # Perform request resp, info = fetch_url(module, aci.url, data=payload, headers=aci.headers, method=aci.method, timeout=aci.params.get('timeout'), use_proxy=aci.params.get('use_proxy')) aci.response = info.get('msg') aci.status = info.get('status') # Report failure if info.get('status') != 200: try: # APIC error aci.response_type(info.get('body'), rest_type) aci.fail_json(msg='APIC Error %(code)s: %(text)s' % aci.error) except KeyError: # Connection error aci.fail_json(msg='Connection failed for %(url)s. %(msg)s' % info) aci.response_type(resp.read(), rest_type) aci.result['imdata'] = aci.imdata aci.result['totalCount'] = aci.totalCount # Report success aci.exit_json(**aci.result)
def processAuthentication(module): # Getting parameters from module api_base_url = module.params["api_base_url"] validate_certs = module.params["validate_certs"] username = module.params["username"] password = module.params["password"] new_password = module.params["new_password"] use_radius = module.params["use_radius_authentication"] use_ldap = module.params["use_ldap_authentication"] use_windows = module.params["use_windows_authentication"] use_cyberark = module.params["use_cyberark_authentication"] connection_number = module.params["connection_number"] state = module.params["state"] cyberark_session = module.params["cyberark_session"] concurrentSessions = module.params["concurrentSessions"] # if in check mode it will not perform password changes if module.check_mode and new_password is not None: new_password = None # Defining initial values for open_url call headers = {"Content-Type": "application/json"} payload = "" if state == "present": # Logon Action # Different end_points based on the use of desired method of auth if use_ldap: end_point = "/PasswordVault/API/Auth/LDAP/Logon" elif use_radius: end_point = "/PasswordVault/API/Auth/radius/Logon" elif use_windows: end_point = "/PasswordVault/API/auth/Windows/Logon" else: use_cyberark = True end_point = "/PasswordVault/API/Auth/CyberArk/Logon" # The payload will contain username, password # and optionally use_radius_authentication and new_password payload_dict = {"username": username, "password": password} if new_password is not None and use_cyberark: payload_dict["newPassword"] = new_password # COMMENT: I dont know what this is for and the old api seems like it didnt have this field # if connection_number is not None: # payload_dict["connectionNumber"] = connection_number if concurrentSessions == True: payload_dict["concurrentSessions"] = True payload = json.dumps(payload_dict) else: # Logoff Action # Get values from cyberark_session already established api_base_url = cyberark_session["api_base_url"] validate_certs = cyberark_session["validate_certs"] headers["Authorization"] = cyberark_session["token"] # All off the logoff with the same endpoint end_point = "/PasswordVault/API/Auth/Logoff" result = None changed = False response = None try: response = open_url( api_base_url + end_point, method="POST", headers=headers, data=payload, validate_certs=validate_certs, ) except (HTTPError, http.client.HTTPException) as http_exception: module.fail_json( msg=( "Error while performing authentication." "Please validate parameters provided, and ability to logon to " "CyberArk.\n*** end_point=%s%s\n ==> %s" ) % (api_base_url, end_point, to_text(http_exception)), payload=payload, headers=headers, status_code=http_exception.code, ) except Exception as unknown_exception: module.fail_json( msg=( "Unknown error while performing authentication." "\n*** end_point=%s%s\n%s" % (api_base_url, end_point, to_text(unknown_exception)) ), payload=payload, headers=headers, status_code=-1, ) if response.getcode() == 200: # Success if state == "present": # Logon Action # Result token from REST Api uses a different key based # the use of shared logon authentication token = "" try: token = str(json.loads(response.read())) # the new one just returns a token # if use: # token = json.loads(response.read())["LogonResult"] # else: # token = json.loads(response.read())["CyberArkLogonResult"] except Exception as e: module.fail_json( msg="Error obtaining token\n%s" % (to_text(e)), payload=payload, headers=headers, status_code=-1, ) # Preparing result of the module result = { "cyberark_session": { "token": token, "api_base_url": api_base_url, "validate_certs": validate_certs, } } if new_password is not None: # Only marks change if new_password was received resulting # in a password change changed = True else: # Logoff Action clears cyberark_session result = {"cyberark_session": {}} return (changed, result, response.getcode()) else: module.fail_json(msg="error in end_point=>" + end_point, headers=headers)
def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False): if USE_JINJA2_NATIVE and not isinstance(data, string_types): return data # For preserving the number of input newlines in the output (used # later in this method) data_newlines = _count_newlines_from_end(data) if fail_on_undefined is None: fail_on_undefined = self._fail_on_undefined_errors try: # allows template header overrides to change jinja2 options. if overrides is None: myenv = self.environment.overlay() else: myenv = self.environment.overlay(overrides) # Get jinja env overrides from template if hasattr(data, 'startswith') and data.startswith(JINJA2_OVERRIDE): eol = data.find('\n') line = data[len(JINJA2_OVERRIDE):eol] data = data[eol + 1:] for pair in line.split(','): (key, val) = pair.split(':') key = key.strip() setattr(myenv, key, ast.literal_eval(val.strip())) # Adds Ansible custom filters and tests myenv.filters.update(self._get_filters(myenv.filters)) myenv.tests.update(self._get_tests()) if escape_backslashes: # Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\". data = _escape_backslashes(data, myenv) try: t = myenv.from_string(data) except TemplateSyntaxError as e: raise AnsibleError( "template error while templating string: %s. String: %s" % (to_native(e), to_native(data))) except Exception as e: if 'recursion' in to_native(e): raise AnsibleError( "recursive loop detected in template string: %s" % to_native(data)) else: return data if disable_lookups: t.globals['query'] = t.globals['q'] = t.globals[ 'lookup'] = self._fail_lookup else: t.globals['lookup'] = self._lookup t.globals['query'] = t.globals['q'] = self._query_lookup t.globals['finalize'] = self._finalize jvars = AnsibleJ2Vars(self, t.globals) self.cur_context = new_context = t.new_context(jvars, shared=True) rf = t.root_render_func(new_context) try: res = j2_concat(rf) if getattr(new_context, 'unsafe', False): res = wrap_var(res) except TypeError as te: if 'StrictUndefined' in to_native(te): errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native( data) errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native( te) raise AnsibleUndefinedVariable(errmsg) else: display.debug( "failing because of a type error, template data is: %s" % to_native(data)) raise AnsibleError( "Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te))) if USE_JINJA2_NATIVE and not isinstance(res, string_types): return res if preserve_trailing_newlines: # The low level calls above do not preserve the newline # characters at the end of the input data, so we use the # calculate the difference in newlines and append them # to the resulting output for parity # # jinja2 added a keep_trailing_newline option in 2.7 when # creating an Environment. That would let us make this code # better (remove a single newline if # preserve_trailing_newlines is False). Once we can depend on # that version being present, modify our code to set that when # initializing self.environment and remove a single trailing # newline here if preserve_newlines is False. res_newlines = _count_newlines_from_end(res) if data_newlines > res_newlines: res += self.environment.newline_sequence * (data_newlines - res_newlines) return res except (UndefinedError, AnsibleUndefinedVariable) as e: if fail_on_undefined: raise AnsibleUndefinedVariable(e) else: display.debug("Ignoring undefined failure: %s" % to_text(e)) return data
def get_config(p, section, key, env_var, default, value_type=None, expand_relative_paths=False): ''' return a configuration variable with casting :arg p: A ConfigParser object to look for the configuration in :arg section: A section of the ini config that should be examined for this section. :arg key: The config key to get this config from :arg env_var: An Environment variable to check for the config var. If this is set to None then no environment variable will be used. :arg default: A default value to assign to the config var if nothing else sets it. :kwarg value_type: The type of the value. This can be any of the following strings: :boolean: sets the value to a True or False value :integer: Sets the value to an integer or raises a ValueType error :float: Sets the value to a float or raises a ValueType error :list: Treats the value as a comma separated list. Split the value and return it as a python list. :none: Sets the value to None :path: Expands any environment variables and tilde's in the value. :tmp_path: Create a unique temporary directory inside of the directory specified by value and return its path. :pathlist: Treat the value as a typical PATH string. (On POSIX, this means colon separated strings.) Split the value and then expand each part for environment variables and tildes. :kwarg expand_relative_paths: for pathlist and path types, if this is set to True then also change any relative paths into absolute paths. The default is False. ''' value = _get_config(p, section, key, env_var, default) if value_type == 'boolean': value = mk_boolean(value) elif value: if value_type == 'integer': value = int(value) elif value_type == 'float': value = float(value) elif value_type == 'list': if isinstance(value, string_types): value = [x.strip() for x in value.split(',')] elif value_type == 'none': if value == "None": value = None elif value_type == 'path': value = shell_expand(value, expand_relative_paths=expand_relative_paths) elif value_type == 'tmppath': value = shell_expand(value) if not os.path.exists(value): makedirs_safe(value, 0o700) prefix = 'ansible-local-%s' % os.getpid() value = tempfile.mkdtemp(prefix=prefix, dir=value) elif value_type == 'pathlist': if isinstance(value, string_types): value = [shell_expand(x, expand_relative_paths=expand_relative_paths) \ for x in value.split(os.pathsep)] elif isinstance(value, string_types): value = unquote(value) return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
def setup_vault_secrets(loader, vault_ids, vault_password_files=None, ask_vault_pass=None, create_new_password=False, auto_prompt=True): # list of tuples vault_secrets = [] # Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id) # we need to show different prompts. This is for compat with older Towers that expect a # certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format. prompt_formats = {} # If there are configured default vault identities, they are considered 'first' # so we prepend them to vault_ids (from cli) here vault_password_files = vault_password_files or [] if C.DEFAULT_VAULT_PASSWORD_FILE: vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE) if create_new_password: prompt_formats['prompt'] = [ 'New vault password (%(vault_id)s): ', 'Confirm new vault password (%(vault_id)s): ' ] # 2.3 format prompts for --ask-vault-pass prompt_formats['prompt_ask_vault_pass'] = [ 'New Vault password: '******'Confirm New Vault password: '******'prompt'] = ['Vault password (%(vault_id)s): '] # The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$' prompt_formats['prompt_ask_vault_pass'] = ['Vault password: '******'prompt', 'prompt_ask_vault_pass']: # --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little # confusing since it will use the old format without the vault id in the prompt built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY # choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass # always gets the old format for Tower compatibility. # ie, we used --ask-vault-pass, so we need to use the old vault password prompt # format since Tower needs to match on that format. prompted_vault_secret = PromptVaultSecret( prompt_formats=prompt_formats[vault_id_value], vault_id=built_vault_id) # a empty or invalid password from the prompt will warn and continue to the next # without erroring globally try: prompted_vault_secret.load() except AnsibleError as exc: display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc)) raise vault_secrets.append((built_vault_id, prompted_vault_secret)) # update loader with new secrets incrementally, so we can load a vault password # that is encrypted with a vault secret provided earlier loader.set_vault_secrets(vault_secrets) continue # assuming anything else is a password file display.vvvvv('Reading vault password file: %s' % vault_id_value) # read vault_pass from a file file_vault_secret = get_file_vault_secret(filename=vault_id_value, vault_id=vault_id_name, loader=loader) # an invalid password file will error globally try: file_vault_secret.load() except AnsibleError as exc: display.warning( 'Error in vault password file loading (%s): %s' % (vault_id_name, to_text(exc))) raise if vault_id_name: vault_secrets.append((vault_id_name, file_vault_secret)) else: vault_secrets.append( (C.DEFAULT_VAULT_IDENTITY, file_vault_secret)) # update loader with as-yet-known vault secrets loader.set_vault_secrets(vault_secrets) return vault_secrets
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red') COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green') COLOR_CHANGED = get_config(p, 'colors', 'changed', 'ANSIBLE_COLOR_CHANGED', 'yellow') COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green') COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red') COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan') # diff DIFF_CONTEXT = get_config(p, 'diff', 'context', 'ANSIBLE_DIFF_CONTEXT', 3, value_type='integer') DIFF_ALWAYS = get_config(p, 'diff', 'always', 'ANSIBLE_DIFF_ALWAYS', False, value_type='bool') # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'win_command', 'shell', 'win_shell', 'raw', 'script'] MODULE_NO_JSON = ['command', 'win_command', 'shell', 'win_shell', 'raw'] DEFAULT_BECOME_PASS = None DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords DEFAULT_SUDO_PASS = None DEFAULT_REMOTE_PASS = None DEFAULT_SUBSET = None DEFAULT_SU_PASS = None VAULT_VERSION_MIN = 1.0 VAULT_VERSION_MAX = 1.0 TREE_DIR = None LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1']) # module search BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt') IGNORE_FILES = ["COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES"] INTERNAL_RESULT_KEYS = ['add_host', 'add_group'] RESTRICTED_RESULT_KEYS = ['ansible_rsync_path', 'ansible_playbook_python']
def __getitem__(self, key): self._load_ansible_plugins() try: if not isinstance(key, string_types): raise ValueError('key must be a string') key = to_native(key) if '.' not in key: # might be a built-in or legacy, check the delegatee dict first, then try for a last-chance base redirect func = self._delegatee.get(key) if func: return func # didn't find it in the pre-built Jinja env, assume it's a former builtin and follow the normal routing path leaf_key = key key = 'ansible.builtin.' + key else: leaf_key = key.split('.')[-1] acr = AnsibleCollectionRef.try_parse_fqcr(key, self._dirname) if not acr: raise KeyError('invalid plugin name: {0}'.format(key)) ts = _get_collection_metadata(acr.collection) # TODO: implement support for collection-backed redirect (currently only builtin) # TODO: implement cycle detection (unified across collection redir as well) routing_entry = ts.get('plugin_routing', {}).get(self._dirname, {}).get(leaf_key, {}) deprecation_entry = routing_entry.get('deprecation') if deprecation_entry: warning_text = deprecation_entry.get('warning_text') removal_date = deprecation_entry.get('removal_date') removal_version = deprecation_entry.get('removal_version') if not warning_text: warning_text = '{0} "{1}" is deprecated'.format(self._dirname, key) display.deprecated(warning_text, version=removal_version, date=removal_date, collection_name=acr.collection) tombstone_entry = routing_entry.get('tombstone') if tombstone_entry: warning_text = tombstone_entry.get('warning_text') removal_date = tombstone_entry.get('removal_date') removal_version = tombstone_entry.get('removal_version') if not warning_text: warning_text = '{0} "{1}" has been removed'.format(self._dirname, key) exc_msg = display.get_deprecation_message(warning_text, version=removal_version, date=removal_date, collection_name=acr.collection, removed=True) raise AnsiblePluginRemovedError(exc_msg) redirect_fqcr = routing_entry.get('redirect', None) if redirect_fqcr: acr = AnsibleCollectionRef.from_fqcr(ref=redirect_fqcr, ref_type=self._dirname) display.vvv('redirecting {0} {1} to {2}.{3}'.format(self._dirname, key, acr.collection, acr.resource)) key = redirect_fqcr # TODO: handle recursive forwarding (not necessary for builtin, but definitely for further collection redirs) func = self._collection_jinja_func_cache.get(key) if func: return func try: pkg = import_module(acr.n_python_package_name) except ImportError: raise KeyError() parent_prefix = acr.collection if acr.subdirs: parent_prefix = '{0}.{1}'.format(parent_prefix, acr.subdirs) # TODO: implement collection-level redirect for dummy, module_name, ispkg in pkgutil.iter_modules(pkg.__path__, prefix=parent_prefix + '.'): if ispkg: continue try: plugin_impl = self._pluginloader.get(module_name) except Exception as e: raise TemplateSyntaxError(to_native(e), 0) method_map = getattr(plugin_impl, self._method_map_name) try: func_items = method_map().items() except Exception as e: display.warning( "Skipping %s plugin %s as it seems to be invalid: %r" % (self._dirname, to_text(plugin_impl._original_path), e), ) continue for func_name, func in func_items: fq_name = '.'.join((parent_prefix, func_name)) # FIXME: detect/warn on intra-collection function name collisions if self._pluginloader.class_name == 'FilterModule': if fq_name.startswith(('ansible.builtin.', 'ansible.legacy.')) and \ func_name in C.STRING_TYPE_FILTERS: self._collection_jinja_func_cache[fq_name] = _wrap_native_text(func) else: self._collection_jinja_func_cache[fq_name] = _unroll_iterator(func) else: self._collection_jinja_func_cache[fq_name] = func function_impl = self._collection_jinja_func_cache[key] return function_impl except AnsiblePluginRemovedError as apre: raise TemplateSyntaxError(to_native(apre), 0) except KeyError: raise except Exception as ex: display.warning('an unexpected error occurred during Jinja2 environment setup: {0}'.format(to_native(ex))) display.vvv('exception during Jinja2 environment setup: {0}'.format(format_exc())) raise TemplateSyntaxError(to_native(ex), 0)
def install_packages(module, state, packages, prompts): install_c = 0 has_prompt = bool(prompts) default_stdin = "\n" if has_prompt: nb_prompts = len(prompts) nb_packages = len(packages) if nb_prompts > 0 and (nb_prompts != nb_packages): if nb_prompts > nb_packages: diff = nb_prompts - nb_packages msg = "%s packages to install but %s prompts to expect. %s prompts will be ignored" % ( to_text(nb_packages), to_text(nb_prompts), to_text(diff)) else: diff = nb_packages - nb_prompts msg = "%s packages to install but only %s prompts to expect. %s packages won't be expected to have a prompt" \ % (to_text(nb_packages), to_text(nb_prompts), to_text(diff)) module.warn(msg) # Preparing prompts answer according to item type tmp_prompts = [] for _item in prompts: # If the current item is a dict then we expect it's key to be the prompt regex and it's value to be the answer # We also expect here that the dict only has ONE key and the first key will be taken if isinstance(_item, dict): key = list(_item.keys())[0] answer = _item[key] + "\n" tmp_prompts.append((key, answer)) elif not _item: tmp_prompts.append((None, default_stdin)) else: tmp_prompts.append((_item, default_stdin)) prompts = tmp_prompts for i, package in enumerate(packages): # if the package is installed and state == present # or state == latest and is up-to-date then skip installed, updated = query_package(module, package) if installed and (state == 'present' or (state == 'latest' and updated)): continue if state == 'present': command = 'install' if state == 'latest': command = 'upgrade' if has_prompt and i < len(prompts): prompt_regex = prompts[i][0] data = prompts[i][1] else: prompt_regex = None data = default_stdin cmd = "%s %s %s" % (_get_pear_path(module), command, package) rc, stdout, stderr = module.run_command(cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True) if rc != 0: module.fail_json(msg="failed to install %s: %s" % (package, to_text(stdout + stderr))) install_c += 1 if install_c > 0: module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) module.exit_json(changed=False, msg="package(s) already installed")
def _load_ansible_plugins(self): if self._ansible_plugins_loaded: return for plugin in self._pluginloader.all(): try: method_map = getattr(plugin, self._method_map_name) self._delegatee.update(method_map()) except Exception as e: display.warning("Skipping %s plugin %s as it seems to be invalid: %r" % (self._dirname, to_text(plugin._original_path), e)) continue if self._pluginloader.class_name == 'FilterModule': for plugin_name, plugin in self._delegatee.items(): if plugin_name in C.STRING_TYPE_FILTERS: self._delegatee[plugin_name] = _wrap_native_text(plugin) else: self._delegatee[plugin_name] = _unroll_iterator(plugin) self._ansible_plugins_loaded = True
def main(): # get supported pkg managers PKG_MANAGERS = get_all_pkg_managers() PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()] # start work global module module = AnsibleModule(argument_spec=dict(manager={ 'type': 'list', 'default': ['auto'] }, strategy={ 'choices': ['first', 'all'], 'default': 'first' }), supports_check_mode=True) packages = {} results = {'ansible_facts': {}} managers = [x.lower() for x in module.params['manager']] strategy = module.params['strategy'] if 'auto' in managers: # keep order from user, we do dedupe below managers.extend(PKG_MANAGER_NAMES) managers.remove('auto') unsupported = set(managers).difference(PKG_MANAGER_NAMES) if unsupported: if 'auto' in module.params['manager']: msg = 'Could not auto detect a usable package manager, check warnings for details.' else: msg = 'Unsupported package managers requested: %s' % ( ', '.join(unsupported)) module.fail_json(msg=msg) found = 0 seen = set() for pkgmgr in managers: if found and strategy == 'first': break # dedupe as per above if pkgmgr in seen: continue seen.add(pkgmgr) try: try: # manager throws exception on init (calls self.test) if not usable. manager = PKG_MANAGERS[pkgmgr]() if manager.is_available(): found += 1 packages.update(manager.get_packages()) except Exception as e: if pkgmgr in module.params['manager']: module.warn( 'Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e))) continue except Exception as e: if pkgmgr in module.params['manager']: module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e))) if found == 0: msg = ( 'Could not detect a supported package manager from the following list: %s, ' 'or the required Python library is not installed. Check warnings for details.' % managers) module.fail_json(msg=msg) # Set the facts, this will override the facts in ansible_facts that might exist from previous runs # when using operating system level or distribution package managers results['ansible_facts']['packages'] = packages module.exit_json(**results)
def main(): # the command module is the one ansible module that does not take key=value args # hence don't copy this one if you are looking to build others! module = AnsibleModule( argument_spec=dict( _raw_params=dict(), _uses_shell=dict(type='bool', default=False), argv=dict(type='list', elements='str'), chdir=dict(type='path'), executable=dict(), creates=dict(type='path'), removes=dict(type='path'), # The default for this really comes from the action plugin warn=dict(type='bool', default=False, removed_in_version='2.14', removed_from_collection='ansible.builtin'), stdin=dict(required=False), stdin_add_newline=dict(type='bool', default=True), strip_empty_ends=dict(type='bool', default=True), ), supports_check_mode=True, ) shell = module.params['_uses_shell'] chdir = module.params['chdir'] executable = module.params['executable'] args = module.params['_raw_params'] argv = module.params['argv'] creates = module.params['creates'] removes = module.params['removes'] warn = module.params['warn'] stdin = module.params['stdin'] stdin_add_newline = module.params['stdin_add_newline'] strip = module.params['strip_empty_ends'] if not shell and executable: module.warn( "As of Ansible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable) executable = None if (not args or args.strip() == '') and not argv: module.fail_json(rc=256, msg="no command given") if args and argv: module.fail_json(rc=256, msg="only command or argv can be given, not both") if not shell and args: args = shlex.split(args) args = args or argv # All args must be strings if is_iterable(args, include_strings=False): args = [ to_native(arg, errors='surrogate_or_strict', nonstring='simplerepr') for arg in args ] if chdir: try: chdir = to_bytes(os.path.abspath(chdir), errors='surrogate_or_strict') except ValueError as e: module.fail_json(msg='Unable to use supplied chdir: %s' % to_text(e)) try: os.chdir(chdir) except (IOError, OSError) as e: module.fail_json( msg='Unable to change directory before execution: %s' % to_text(e)) if creates: # do not run the command if the line contains creates=filename # and the filename already exists. This allows idempotence # of command executions. if glob.glob(creates): module.exit_json(cmd=args, stdout="skipped, since %s exists" % creates, changed=False, rc=0) if removes: # do not run the command if the line contains removes=filename # and the filename does not exist. This allows idempotence # of command executions. if not glob.glob(removes): module.exit_json(cmd=args, stdout="skipped, since %s does not exist" % removes, changed=False, rc=0) if warn: check_command(module, args) startd = datetime.datetime.now() if not module.check_mode: rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin, binary_data=(not stdin_add_newline)) elif creates or removes: rc = 0 out = err = b'Command would have run if not in check mode' else: module.exit_json(msg="skipped, running in check mode", skipped=True) endd = datetime.datetime.now() delta = endd - startd if strip: out = out.rstrip(b"\r\n") err = err.rstrip(b"\r\n") result = dict( cmd=args, stdout=out, stderr=err, rc=rc, start=str(startd), end=str(endd), delta=str(delta), changed=True, ) if rc != 0: module.fail_json(msg='non-zero return code', **result) module.exit_json(**result)
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) try: source_dir = self._task.args.get('dir') source_file = self._task.args.get('file') content = self._task.args['content'] except KeyError as exc: return {'failed': True, 'msg': 'missing required argument: %s' % exc} if source_dir and source_file: return {'failed': True, 'msg': '`dir` and `file` are mutually exclusive arguments'} if source_dir: sources = self.get_files(to_list(source_dir)) else: if source_file: sources = to_list(source_file) else: searchpath = [] searchpath = task_vars.get('ansible_search_path', []) if not searchpath: searchpath.append(self._loader._basedir) if 'parser_templates' in os.listdir(searchpath[0]): subdir_searchpath = os.path.join(searchpath[0], 'parser_templates') # parser in {{ playbook_dir }}/parser_templates/{{ ansible_network_os }} if task_vars['ansible_network_os'] in os.listdir(subdir_searchpath): newsearchpath = os.path.join(subdir_searchpath, task_vars['ansible_network_os']) sources = self.get_parser(path=newsearchpath) # parser in {{ playbook_dir }}/parser_templates else: sources = self.get_parser(path=subdir_searchpath) # parser in {{ playbook_dir }} else: sources = self.get_parser(path=searchpath[0]) facts = {} self.template = template_loader.get('json_template', self._templar) for src in sources: src_path = os.path.expanduser(src) if not os.path.exists(src_path) and not os.path.isfile(src_path): raise AnsibleError("src [%s] is either missing or invalid" % src_path) tasks = self._loader.load_from_file(src) self.ds = {'content': content} self.ds.update(task_vars) for task in tasks: name = task.pop('name', None) display.vvvv('processing directive: %s' % name) register = task.pop('register', None) extend = task.pop('extend', None) if extend: extend = self.template(extend, self.ds) export = task.pop('export', False) export_as = task.pop('export_as', 'list') export_as = self.template(export_as, self.ds) if export_as not in self.VALID_EXPORT_AS: raise AnsibleError('invalid value for export_as, got %s' % export_as) if 'export_facts' in task: task['set_vars'] = task.pop('export_facts') export = True elif 'set_vars' not in task: if export and not register: warning('entry will not be exported due to missing register option') when = task.pop('when', None) if when is not None: if not self._check_conditional(when, self.ds): display.vvv('command_parser: skipping task [%s] due to conditional check' % name) continue loop = task.pop('loop', None) loop_var = task.pop('loop_control', {}).get('loop_var') or 'item' if loop is not None: loop = self.template(loop, self.ds) if not loop: display.vvv('command_parser: loop option was defined but no loop data found') res = list() if loop: # loop is a hash so break out key and value if isinstance(loop, Mapping): for loop_key, loop_value in iteritems(loop): self.ds[loop_var] = {'key': loop_key, 'value': loop_value} resp = self._process_directive(task) res.append(resp) # loop is either a list or a string else: for loop_item in loop: self.ds[loop_var] = loop_item resp = self._process_directive(task) res.append(resp) if 'set_vars' in task: if register: self.ds[register] = res if export: if extend: facts.update(self.merge_facts(task_vars, extend, register, res)) else: facts[register] = res else: self.ds.update(res) if export: facts.update(res) elif register: self.ds[register] = res if export: if export_as in ('dict', 'hash', 'object'): if extend: facts.update(self.merge_facts(task_vars, extend, register, res, expand=True)) else: if register not in facts: facts[register] = {} for item in res: facts[register] = self.rec_update(facts[register], item) else: if extend: facts.update(self.merge_facts(task_vars, extend, register, res)) else: facts[register] = res else: res = self._process_directive(task) if 'set_vars' in task: if register: self.ds[register] = res if export: if extend: facts.update(self.merge_facts(task_vars, extend, register, res)) else: facts[register] = res else: self.ds.update(res) if export: facts.update(res) elif res and register: self.ds[register] = res if export: if register: if extend: facts.update(self.merge_facts(task_vars, extend, register, res)) else: facts[register] = res else: for r in to_list(res): for k, v in iteritems(r): facts.update({to_text(k): v}) task_vars.update(facts) result.update({ 'ansible_facts': facts, 'included': sources }) return result
def abort(self, admin=False): prompt = to_text(self._connection.get_prompt(), errors='surrogate_or_strict').strip() if prompt.endswith(')#'): self.send_command('abort') if admin and 'admin-' in prompt: self.send_command('exit')
def parse_inventory(self, host_list): if isinstance(host_list, string_types): if "," in host_list: host_list = host_list.split(",") host_list = [h for h in host_list if h and h.strip()] self.parser = None # Always create the 'all' and 'ungrouped' groups, even if host_list is # empty: in this case we will subsequently an the implicit 'localhost' to it. ungrouped = Group('ungrouped') all = Group('all') all.add_child_group(ungrouped) base_groups = frozenset([all, ungrouped]) self.groups = dict(all=all, ungrouped=ungrouped) if host_list is None: pass elif isinstance(host_list, list): for h in host_list: try: (host, port) = parse_address(h, allow_ranges=False) except AnsibleError as e: display.vvv( "Unable to parse address from hostname, leaving unchanged: %s" % to_text(e)) host = h port = None new_host = Host(host, port) if h in C.LOCALHOST: # set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'. if self.localhost is not None: display.warning( "A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name)) display.vvvv("Set default localhost to %s" % h) self.localhost = new_host all.add_host(new_host) elif self._loader.path_exists(host_list): # TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins' if self.is_directory(host_list): # Ensure basedir is inside the directory host_list = os.path.join(self.host_list, "") self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list) else: self.parser = get_file_parser(host_list, self.groups, self._loader) vars_loader.add_directory(self._basedir, with_subdir=True) if not self.parser: # should never happen, but JIC raise AnsibleError( "Unable to parse %s as an inventory source" % host_list) else: display.warning("Host file not found: %s" % to_text(host_list)) self._vars_plugins = [x for x in vars_loader.all(self)] ### POST PROCESS groups and hosts after specific parser was invoked group_names = set() # set group vars from group_vars/ files and vars plugins for g in self.groups: group = self.groups[g] group.vars = combine_vars(group.vars, self.get_group_variables(group.name)) self.get_group_vars(group) group_names.add(group.name) host_names = set() # get host vars from host_vars/ files and vars plugins for host in self.get_hosts(ignore_limits=True, ignore_restrictions=True): host.vars = combine_vars(host.vars, self.get_host_variables(host.name)) self.get_host_vars(host) host_names.add(host.name) mygroups = host.get_groups() # ensure hosts are always in 'all' if all not in mygroups: all.add_host(host) if ungrouped in mygroups: # clear ungrouped of any incorrectly stored by parser if set(mygroups).difference(base_groups): host.remove_group(ungrouped) else: # add ungrouped hosts to ungrouped length = len(mygroups) if length == 0 or (length == 1 and all in mygroups): ungrouped.add_host(host) # warn if overloading identifier as both group and host for conflict in group_names.intersection(host_names): display.warning("Found both group and host with same name: %s" % conflict)
def configure(self, admin=False): prompt = to_text(self._connection.get_prompt(), errors='surrogate_or_strict').strip() if not prompt.endswith(')#'): if admin and 'admin-' not in prompt: self.send_command('admin') self.send_command('configure terminal')
def parse(self, inventory, loader, path, cache=True): super(InventoryModule, self).parse(inventory, loader, path, cache) try: # The built-in loader uses ruamel-incompatible data types #data = self.loader.load_from_file(path, cache=False) with open(path, 'r') as f: data = yaml.load(f) except Exception as e: raise AnsibleParserError(e) if not data: raise AnsibleParserError('Parsed empty YAML file') elif not isinstance(data, MutableMapping): raise AnsibleParserError( 'YAML inventory has invalid structure, it should be a dictionary, got: %s' % type(data)) try: tosca_group = self.inventory.add_group('tosca') except AnsibleError as e: raise AnsibleParserError("Unable to add group %s: %s" % ('tosca', to_text(e))) for service in data.get('services', []): name = service.get('name') if (not name) or (name == 'tosca'): group = tosca_group else: try: group = self.inventory.add_group(name) self.inventory.add_child(tosca_group, group) except AnsibleError as e: raise AnsibleParserError("Unable to add group %s: %s" % (name, to_text(e))) template = service.get('template', '') inputs = service.get('inputs', {}) self.display.banner('Compiling TOSCA: %s' % template) try: clout = puccini.tosca.compile(template, inputs) except puccini.tosca.Problems as e: for problem in e.problems: self.display.warning(str(problem)) raise e except Exception as e: self.display.warning(str(e)) raise e node_types = service.get('node_types') capability_types = service.get('capability_types') for vertex in clout.get('vertexes', {}).values(): try: if vertex.get('metadata', {}).get('puccini', {}).get('kind', '') == 'NodeTemplate': node_template = vertex['properties'] if _is_allowed(node_template, node_types, capability_types): self.inventory.add_host(node_template.get( 'name', ''), group=group) except: pass