def get_docker_parent_infos(pluginref, parent_name): ## note: docker_image_info will never pull an image and only ## examines local images, so we need this extra explicit pull before pluginref.exec_module('community.docker.docker_image', modargs={ 'name': parent_name, 'source': 'pull', 'force_source': True }) tmp = pluginref.exec_module('community.docker.docker_image_info', modargs={'name': parent_name}) errmsg = "Bad docker parent info query for parent"\ " name '{}'.".format(parent_name) + " {}."\ + " Raw result: " + str(tmp).replace('{', '{{').replace('}', '}}') t2 = tmp.get('images', None) ansible_assert(t2, errmsg.format("No Images returned")) ansible_assert( len(t2) == 1, errmsg.format( "Expected exactly one image returned, but got '{}'".format( len(t2)))) tmp = t2[0] return tmp
def get_target_envvars(self, key_includes=None, key_excludes=None, single=False): ansible_assert( not key_includes or not key_excludes, "key_includes and excludes params are mutually exclusive") tmp = self.exec_powershell_script('dir env:', data_return=True) envdict = {} for ev in tmp['result_json']: if key_excludes and ev['Key'] in key_excludes: continue if key_includes and ev['Key'] not in key_includes: continue envdict[ev['Key']] = ev['Value'] if single: ansible_assert(len(envdict) == 1, "For querying envvars in single mode resulting dict must"\ " contain exactly one key, but had '{}': {}".format( len(envdict), envdict ) ) return envdict[next(iter(envdict.keys()))] return envdict
def _update_upn(self): tmp = self.user if not tmp: self._upn = None return tmp = tmp.split('@') ansible_assert( len(tmp) <= 2, "ldap user cannot contain more than one '@': {}".format(self.user)) if len(tmp) < 2: domain = self.domain if not domain: raise AnsibleOptionsError( "could not determine UPN, either give user as"\ " complete principal name ('*****@*****.**') or set domain parameter" ) tmp.append(domain) self._upn = '@'.join(tmp)
def _postproc_json(self, jsonres): if not jsonres: return jsonres ## in general this can return more than one matching command if not isinstance(jsonres, list): jsonres = [jsonres] res = [] for jo in jsonres: ## note: for some reason powershell prefer to return stuff ## like command type as non-saying integer id's in json ## instead of strings like "Application", fix this here if 'CommandType' in jo: ct = jo['CommandType'] tmp = COMMAND_TYPES.get(ct, None) ansible_assert(tmp, "Unsupported CommandType: {}".format(ct)) jo['CommandType'] = tmp res.append(jo) return res
def run_specific(self, result): cfg = self.get_taskparam('config') cfgvar = self.get_taskparam('config_ansvar') if not cfgvar and not cfg: raise AnsibleOptionsError( "if param 'config_ansvar' is unset, param 'config' must be set" ) cfg = self._handle_merging(cfg) if not cfg: ## no merging and no explicit cfg param, obtain ## cfg from defined cfgvar if self.allow_empty: cfg = self.get_ansible_var(cfgvar, default={}) else: cfg = self.get_ansible_var(cfgvar) ## do domain specific normalization ansible_assert( self.normalizer, "bad normalizer module: normalizer class hierarchy undefined") cfg = self.normalizer(cfg) ## return merged "normaly" a custom value of result dict result['normalized'] = cfg ## update ansible var directly if cfgvar: self.set_ansible_vars(**{cfgvar: cfg}) return result
def handle_selfref(v, topmap): if not isinstance(v, string_types): return v matches = [] for m in re.finditer(SELFREF_START + '\s*(\S*)\s*' + SELFREF_END, v): selfref_key = m.group(1) ansible_assert(selfref_key, "bad self reference inside string '{}': cannot"\ " be empty".format(v) ) selfref_key = selfref_key.split('.') tmp = get_subdict(topmap, selfref_key) if isinstance(tmp, collections.abc.Mapping) \ or isinstance(tmp, list): ansible_assert(not matches, "bad self reference inside string '{}': multiple self"\ " references inside a single value are only supported"\ " atm for simple types, not complex collection types"\ " like maps and lists".format(v) ) matches.append({ 'key': m.group(0), 'replacement': tmp}) if len(matches) == 1: return matches[0]['replacement'] for m in matches: v = v.sub(m['key'], str(m['replacement']), v) return v
def set_subdict(d, keychain, val): ansible_assert(keychain, "keychain cannot be empty when setting subdict") parent_kc = keychain[:-1] sd = get_subdict(d, parent_kc) sd[keychain[-1]] = val return d
def _handle_specifics_presub(self, cfg, my_subcfg, cfgpath_abs): def add_new_envkey(k, v, resmap, keylist): if k in keylist: raise AnsibleOptionsError( "duplicate environment key '{}'".format(k) ) resmap[k] = v keylist.append(k) def handle_shellers(shellmap, env_keys, resmap): for (k, v) in iteritems(shellmap): modret = self.pluginref.exec_module('shell', modargs={'cmd': v} ) add_new_envkey(k, modret['stdout'], resmap, env_keys) constenv = get_subdict(my_subcfg, ['static'], default_empty=True) env_keys = list(constenv.keys()) proxy = cfg.get('proxy', None) if proxy: ## if proxy is set, copy proxy vars to env proxy = proxy.get('vars', None) ansible_assert(proxy, "bad docker config, proxy set but proxy vars are empty" ) for (k, v) in iteritems(proxy): add_new_envkey(k, v, constenv, env_keys) for (k, v) in iteritems(self.pluginref.get_taskparam('extra_env')): add_new_envkey(k, v, constenv, env_keys) dynenv = my_subcfg.get('dynamic', None) if not dynenv: ## no dynamic cfg set, nothing to do return my_subcfg tmp = {} for (k, v) in iteritems(dynenv.get('expand', {})): # use standard env shelling mechanism for expanding env vars tmp[k] = 'echo "{}"'.format(v) resmap = {} handle_shellers(tmp, env_keys, resmap) shellers = dynenv.get('shell', {}) handle_shellers(shellers, env_keys, resmap) constenv.update(resmap) return my_subcfg
def domain_user(self): tmp = self.user.split('\\') if len(tmp) < 2: ansible_assert(self.domain, "bad winad domain user, either give user as complete"\ " domain user ('domain\\user') or set domain parameter" ) tmp = [self.domain] + tmp return '\\'.join(tmp)
def change_pw(self, newpw, connection=None, user=None, oldpw=None): connection = connection or self.default_connection ## if no user is specified, use default auth user if not user: user = self.upn oldpw = self.usrpw else: ansible_assert(oldpw, 'if you set user, you also must set oldpw') if oldpw == newpw: return False uo = self.get_user_object(connection=connection, user=user, empty_match_error=True) display.vv("WinAD :: change pw :: found matching user object: " + str(uo)) import ldap3 if not ldap3.extend.microsoft.modifyPassword.ad_modify_password( connection, uo['dn'], newpw, oldpw, controls=None): errmsg = \ "failed to change win-ad password for user '{}'".format(user) res = connection.result if re.search(r'(?i) Operation not allowed through GC port', res['message']): errmsg += \ '(possible reasons: connect to server using insecure'\ ' / non-ssl port (upgrading the connection later with'\ ' start_tls does not help))' elif res.get('description', '').lower() == 'unwillingtoperform': errmsg += \ '(possible reasons: new password does not comply with'\ ' password complexity policy)' elif res.get('description', '').lower() == 'constraintviolation': errmsg += \ '(possible reasons: trying to re-use a previous'\ ' password, password to young (it is not unusual'\ ' that you must wait for example one or two days'\ ' before changing password again))' errmsg += ', details: ' + str(res) raise AnsibleError(errmsg) return True
def get_subdict(d, keychain, **kwargs): ansible_assert(SUBDICT_METAKEY_ANY not in keychain, "use get_subdict only with a simple keychain with just one" " result, use get_subdicts instead for wildcards with" " multiple possible results" ) d = list(get_subdicts(d, keychain, **kwargs)) ansible_assert(len(d) == 1, "get_subdict produced more than one result, this should never happen" ) return d[0][0]
def get_subdicts(d, keychain, kciter=None, kcout=None, **kwargs): if not keychain: yield (d, kcout) return if not kciter: kcout = [] yield from get_subdicts(d, keychain, iter(keychain), kcout, **kwargs) return nextkeys = next(kciter, None) if not nextkeys: yield (d, kcout) return if nextkeys == SUBDICT_METAKEY_ANY: nextkeys = d.keys() else: nextkeys = [nextkeys] for k in nextkeys: tmp = d.get(k, None) if tmp: if not isinstance(tmp, collections.abc.Mapping): ansible_assert(kwargs.get('allow_nondict_leaves', False), "invalid subdicts keychain {}, child element of key" " '{}' is not a dictionary: {}".format(keychain, k, tmp) ) yield (tmp, kcout[:] + [k]) continue elif kwargs.get('default_empty', False): tmp = {} d[k] = tmp else: raise KeyError( "invalid keychain {}, could not find" " subkey '{}'".format(keychain, k) ) yield from get_subdicts( tmp, keychain, kciter, kcout[:] + [k], **kwargs )
def check_paramtype(param, value, typespec, errmsg): if typespec == []: # no type restriction ==> noop return if callable(typespec): return typespec(value) type_match = False display.vvv( "[PLUGIN] :: handle args, do type check: {}".format(typespec) ) for xt in typespec: display.vvv( "[PLUGIN] :: handle args, type test: {}".format(xt) ) sub_types = None if isinstance(xt, list): xt = list sub_types = xt if isinstance(value, xt): type_match = True break if not type_match: if not errmsg: errmsg = "Must be one of the following types: {}".format(typespec) raise AnsibleOptionsError( "Value '{}' for param '{}' failed its type" " check: {}".format(value, param, errmsg) ) if isinstance(value, list): ansible_assert(sub_types, 'bad typespec') display.vvv( "[PLUGIN] :: handle args, do subtype check: {}".format(sub_types) ) for vx in value: check_paramtype(param, vx, sub_types, errmsg)
def _handle_docker_update(self, result): display.vv( "[UPDATE] :: check if newer docker version is avaible" ) curver = self.get_currently_installed_docker_version() latest_ver = self._get_docker_version(get_cmd='Find-Package') ## ## note: technically we only check here that both versions differ, ## theoretically this could also mean that curver is newer than ## latest_ver, but practically this can never happen, as the ## call we use to set latest_ver is guaranteed to return the ## newest version avaible, so if curver is not that version, ## it must be older ## if curver == latest_ver: ## installed docker is already the newest version, noop return display.v( "[UPDATE] :: docker update found, will now begin updating ..." ) result['changed'] = True tmp = self._get_dockinfo(result) tmp['updated'] = { 'from': curver } self.exec_powershell_script( 'Install-Package -Name Docker -ProviderName DockerMsftProvider -update -force' ) ## note: it seems after an update of docker restarting ## the service is sufficient, no reboot needed apparently self.exec_powershell_script('Restart-Service docker') tmp = self.get_currently_installed_docker_version() ansible_assert(tmp == latest_ver, "Updating docker installation from '{}' to '{}'"\ " failed".format(curver, latest_ver) )
def check_types_filter(cfg): setfilter = cfg.get('types', None) if setfilter: # dont change pre-existing filter, but do some sanity checks tmp = None if isinstance(setfilter, list): tmp = setfilter elif not setfilter.get('exclude', False): tmp = setfilter['list'] if tmp: ansible_assert('directory' not in tmp, "User explicitly specified a types filter criteria for"\ " recursive templating which includes dirs. This does"\ " not make sense as templating only works on files." ) return setfilter
def get_user_object(self, user=None, check_attrs=None, empty_match_error=False, **kwargs): check_attrs = check_attrs or ['userPrincipalName'] user = user or self.upn if not isinstance(user, list): user = [user] sfilt = [] for ca in check_attrs: for u in user: sfilt.append(ca + '=' + u) sfilt = combine_ldap_filter( combine_ldap_filter(*sfilt, method='|', enclose=False), 'objectClass=person') display.vvv('LDAP :: get_user_object :: final search filter: ' + sfilt) res = list( filter(lambda x: x['type'] == 'searchResEntry', self.search(search_filter=sfilt, **kwargs))) if res: ansible_assert(len(res) == 1, "bad result for getting ldap user object, expect to match"\ " just one user, but found '{}': {}".format(len(res), res) ) return res[0] elif empty_match_error: raise AnsibleError( "could not find matching ldap object for given"\ " user: {}".format(user) ) return res
def run_specific(self, result): caps = self.get_taskparam('caps') state = self.get_taskparam('state') ## normalize caps arg if not isinstance(caps, list): ## assume single string caps = [caps] tmp = [] for c in caps: if not isinstance(c, collections.abc.Mapping): ## assume single string which represents name (plus optional meta) c = c.split('~') nc = {'name': c[0]} if len(c) > 1: nc.update(locale=c[3], version=c[4]) c = nc tmp.append(c) caps = tmp cap_info = {} restart_needed = False for c in caps: cap_name = [c['name']] name_suffix = [] if 'locale' in c: name_suffix.append(c['locale']) if 'version' in c: name_suffix.append(c['version']) if name_suffix: name_suffix = ['', ''] + name_suffix cap_name = '~'.join(cap_name + name_suffix) ## ## check if caps are already installed ## cmd = ['Get-WindowsCapability', '-Online', '-Name', cap_name] if state == 'absent': ## ## note: LimitAccess is an important parameter here, because ## it possible that because of AD/Domain restrictions ## installation of capabilities is forbidden, but we can ## still use this module for removing capabilities, but the ## thing is that normally just listing caps with "Get-Cap" ## already errors out when installing is forbidden, as this ## on default already tries to query online resources for ## caps, but with the access limit param we actually only ## query the local machine, which should always be fine ## cmd += ['-LimitAccess', '| where state -eq "Installed"'] tmp = self.exec_powershell_script(' '.join(cmd), data_return=True, on_error=self._on_getcap_error) tmp = tmp['result_json'] if not tmp: if state == 'absent': ## caps we want to remove are not installed, noop continue raise AnsibleError( "Failed to install capability for given name '{}':"\ " could not match name to capability".format(cap_name) ) ## found installed caps if not isinstance(tmp, list): ## possible that we find one ore multiple ## matching caps, unify both cases tmp = [tmp] for ec in tmp: cap_info[ec['Name']] = {'details': ec} if state == 'present': new_caps = [] ## filter out already installed caps for fc in tmp: if CAP_STATES.get(fc['State'], 'UNKNOWN') != 'INSTALLED': new_caps.append(fc) if not new_caps: ## caps we want to install already there, noop continue ## install new caps if len(tmp) > 1: # TODO: support latest mode where we auto select latest version (but still assert that name base is the same for all possible candidates raise AnsibleOptionsError( "Given caps name '{}' is ambigious (matches more"\ " than one capability). For installing caps"\ " (state == present) name must match exactly one"\ " capability.".format(cap_name) ) result['changed'] = True ansible_assert(False, "TODO: handle installing new caps") continue ## remove existing caps result['changed'] = True for ec in tmp: cmd = 'Remove-WindowsCapability -Online -Name "{}"'.format( ec['Name']) rmres = self.exec_powershell_script(cmd, data_return=True) rmres = rmres['result_json'] cap_info[ec['Name']]['remove'] = rmres restart_needed = restart_needed or rmres['RestartNeeded'] result['capabilities'] = cap_info result['restart_needed'] = restart_needed return result
def _handle_taskargs(self, argspec, args_in, args_out): display.vvv( "[PLUGIN] :: handle args, argspec: {}".format(argspec) ) argspec = copy.deepcopy(argspec) args_set = copy.deepcopy(args_in) args_found = {} args_meta = argspec.pop(MAGIC_ARGSPECKEY_META, {}) for (k, v) in iteritems(argspec): display.vv( "[PLUGIN] :: handle args, do param '{}'".format(k) ) ## first normalize argspec # convert convenience short forms to norm form if isinstance(v, collections.abc.Mapping): display.vvv( "[PLUGIN] :: handle args, argspec is dict,"\ " nothing to normalize" ) pass # noop elif isinstance(v, tuple): tmp = {} display.vvv( "[PLUGIN] :: handle args, argspec is short form,"\ " normalizing ..." ) for i in range(0, len(v)): vx = v[i] if i == 0: tmp['type'] = vx elif i == 1: tmp['defaulting'] = { 'fallback': vx } elif i == 2: if isinstance(vx, collections.abc.Mapping): tmp['subspec'] = vx else: tmp['choice'] = vx else: raise AnsibleInternalError( "Unsupported short form argspec tuple: '{}'".format(v) ) v = tmp else: ## assume a single value for arg type v = { 'type': v } # normalize norm form ansible_assert('type' in v, "Bad argspec for param '{}': Mandatory type field missing".format(k) ) vdef = v.get('defaulting', None) mandatory = not vdef ## TODO: min and max sizes for collection types # get param key_hits = [] aliases = v.get('aliases', []) display.vvv( "[PLUGIN] :: handle args, get set val / handle"\ " aliasing: {}".format(aliases) ) for x in [k] + aliases: ansible_assert(x not in args_found, "Bad argspec for param '{}': duplicate alias" " name '{}'".format(k, x) ) if x in args_set: key_hits.append(x) pval = args_set.pop(x) args_found[k] = True if len(key_hits) > 1: raise AnsibleOptionsError( "Bad param '{}': Use either key or one of its aliases" " '{}', but not more than one at a time".format(k, aliases) ) if len(key_hits) == 0: display.vv("[PLUGIN] :: handle args, do defaulting") # param unset, do defaulting pval = default_param_value( k, vdef, self._ansible_varspace, getattr(self, '_templar', None) ) display.vv( "[PLUGIN] :: handle args, final pvalue: |{}|".format(pval) ) display.vv( "[PLUGIN] :: handle args, check param"\ " type: {}".format(v['type']) ) ## at this point param is either set explicitly or by ## defaulting mechanism, proceed with value tests check_paramtype(k, pval, v['type'], v.get('type_err', None)) ## optionally handle choice choice = v.get('choice', None) if choice: display.vvv( "[PLUGIN] :: handle args, handle choice: {}".format(choice) ) ansible_assert(isinstance(choice, list), "bad argspec[{}]: choice must be list,"\ " but was '{}': {}".format(k, type(choice), choice) ) ansible_assert( not isinstance(pval, (list, collections.abc.Mapping)), "bad argspec[{}]: if choice is specified, param"\ " cannot be collection type, it must be scalar".format(k) ) if pval not in choice: raise AnsibleOptionsError( "Bad param '{}': given value was '{}' but it"\ " must be one of these: {}".format(k, pval, choice) ) args_out[k] = pval subspec = v.get('subspec', None) if isinstance(pval, collections.abc.Mapping) and subspec: display.vvv( "[PLUGIN] :: handle args, do subspec: {}".format(subspec) ) self._handle_taskargs(subspec, pval, pval) if args_set: raise AnsibleOptionsError( "Unsupported parameters given: {}".format(list(args_set.keys())) ) ## check mutual exclusions: for exlst in args_meta.get('mutual_exclusions', []): tmp = [] for x in exlst: if x in args_found: tmp.append(x) if len(tmp) > 1: raise AnsibleOptionsError( "It is not allowed to set mutual exclusive" " params '{}' and '{}' together".format(*tmp) )
def _handle_specifics_presub_specific(self, cfg, my_subcfg, cfgpath_abs, auto_detect): jvm_homedir = my_subcfg.get('jvm_homedir', None) if jvm_homedir: return my_subcfg ## jvm home not explicitly given, try auto detecting ansenv = self.pluginref.get_ansible_var('ansible_env', {}) jhome_envvar = ansenv.get('JAVA_HOME', None) mres = self.pluginref.run_other_action_plugin( command_which.ActionModule, plugin_args={'cmd': 'java'}, ignore_error=True) jhome_from_exe = None if not mres.get('failed', False): jhome_from_exe = os.path.dirname(os.path.dirname(mres['linksrc'])) if not jhome_envvar and not jhome_from_exe: ## auto detecting could not find any java home, ## assume that java is not a thing for system currently handled if not auto_detect: raise AnsibleOptionsError( "Auto detect failed to find any java home but user"\ " explicitly activated java cert handling, either set"\ " 'activate' key to '{}' if this is acceptable or give"\ " an explicit java home path with the 'jvm_homedir' key"\ " if you are sure that a java installation exist and"\ " should be cert handled.".format(CONFIG_KEYWORD_AUTODETECT) ) return my_subcfg if jhome_envvar and jhome_from_exe: if jhome_envvar != jhome_from_exe: raise AnsibleOptionsError( "Auto detect found more than one possible java home,"\ " '{}' from environment and '{}' from java executable"\ " $PATH, choose one explicitly by setting the"\ " 'jvm_homedir' key".format(jhome_envvar, jhome_from_exe) ) my_subcfg['jvm_homedir'] = jhome_envvar elif jhome_envvar: my_subcfg['jvm_homedir'] = jhome_envvar else: my_subcfg['jvm_homedir'] = jhome_from_exe testpath = os.path.join(my_subcfg['jvm_homedir'], 'bin', 'java') mres = self.pluginref.exec_module('ansible.builtin.stat', modargs={'path': testpath}) ansible_assert(mres['stat']['isreg'], "bad cfg: final value for jvm_homedir '{}' seems not to"\ " have a valid java home structure".format(my_subcfg['jvm_homedir']) ) return my_subcfg