def _save_ssh_host_keys(self, filename): ''' not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks don't complain about it :) ''' if not self._any_keys_added(): return False path = os.path.expanduser("~/.ssh") makedirs_safe(path) f = open(filename, 'w') for hostname, keys in iteritems(self.ssh._host_keys): for keytype, key in iteritems(keys): # was f.write added_this_time = getattr(key, '_added_by_ansible_this_time', False) if not added_this_time: f.write("%s %s %s\n" % (hostname, keytype, key.get_base64())) for hostname, keys in iteritems(self.ssh._host_keys): for keytype, key in iteritems(keys): added_this_time = getattr(key, '_added_by_ansible_this_time', False) if added_this_time: f.write("%s %s %s\n" % (hostname, keytype, key.get_base64())) f.close()
def __init__(self, module, attrs=None, args=None, keys=None, from_argspec=False): args = [] if args is None else args self._attributes = attrs or {} self._module = module for arg in args: self._attributes[arg] = dict() if from_argspec: self._attributes[arg]['read_from'] = arg if keys and arg in keys: self._attributes[arg]['key'] = True self.attr_names = frozenset(self._attributes.keys()) _has_key = False for name, attr in iteritems(self._attributes): if attr.get('read_from'): if attr['read_from'] not in self._module.argument_spec: module.fail_json(msg='argument %s does not exist' % attr['read_from']) spec = self._module.argument_spec.get(attr['read_from']) for key, value in iteritems(spec): if key not in attr: attr[key] = value if attr.get('key'): if _has_key: module.fail_json(msg='only one key value can be specified') _has_key = True attr['required'] = True
def populate_interfaces(self, data): facts = dict() for key, value in iteritems(data['interfaces']): intf = dict() for remote, local in iteritems(self.INTERFACE_MAP): if remote in value: intf[local] = value[remote] if 'interfaceAddress' in value: intf['ipv4'] = dict() for entry in value['interfaceAddress']: intf['ipv4']['address'] = entry['primaryIp']['address'] intf['ipv4']['masklen'] = entry['primaryIp']['maskLen'] self.add_ip_address(entry['primaryIp']['address'], 'ipv4') if 'interfaceAddressIp6' in value: intf['ipv6'] = dict() for entry in value['interfaceAddressIp6']['globalUnicastIp6s']: intf['ipv6']['address'] = entry['address'] intf['ipv6']['subnet'] = entry['subnet'] self.add_ip_address(entry['address'], 'ipv6') facts[key] = intf return facts
def map_obj_to_commands(want, have, module): commands = list() if module.params['state'] == 'absent': if have: templatized_command = ("%(ovs-vsctl)s -t %(timeout)s del-port" " %(bridge)s %(port)s") command = templatized_command % module.params commands.append(command) else: if have: if want['tag'] != have['tag']: templatized_command = ("%(ovs-vsctl)s -t %(timeout)s" " set port %(port)s tag=%(tag)s") command = templatized_command % module.params commands.append(command) if want['external_ids'] != have['external_ids']: for k, v in iteritems(want['external_ids']): if (not have['external_ids'] or k not in have['external_ids'] or want['external_ids'][k] != have['external_ids'][k]): if v is None: templatized_command = ("%(ovs-vsctl)s -t %(timeout)s" " remove port %(port)s" " external_ids " + k) command = templatized_command % module.params commands.append(command) else: templatized_command = ("%(ovs-vsctl)s -t %(timeout)s" " set port %(port)s" " external_ids:") command = templatized_command % module.params command += k + "=" + v commands.append(command) else: templatized_command = ("%(ovs-vsctl)s -t %(timeout)s add-port" " %(bridge)s %(port)s") command = templatized_command % module.params if want['tag']: templatized_command = " tag=%(tag)s" command += templatized_command % module.params if want['set']: templatized_command = " -- set %(set)s" command += templatized_command % module.params commands.append(command) if want['external_ids']: for k, v in iteritems(want['external_ids']): templatized_command = ("%(ovs-vsctl)s -t %(timeout)s" " set port %(port)s external_ids:") command = templatized_command % module.params command += k + "=" + v commands.append(command) return commands
def _any_keys_added(self): for hostname, keys in iteritems(self.ssh._host_keys): for keytype, key in iteritems(keys): added_this_time = getattr(key, '_added_by_ansible_this_time', False) if added_this_time: return True return False
def map_obj_to_commands(want, have, module): commands = list() if module.params['state'] == 'absent': if have: templatized_command = ("%(ovs-vsctl)s -t %(timeout)s del-br" " %(bridge)s") command = templatized_command % module.params commands.append(command) else: if have: if want['fail_mode'] != have['fail_mode']: templatized_command = ("%(ovs-vsctl)s -t %(timeout)s" " set-fail-mode %(bridge)s" " %(fail_mode)s") command = templatized_command % module.params commands.append(command) if want['external_ids'] != have['external_ids']: templatized_command = ("%(ovs-vsctl)s -t %(timeout)s" " br-set-external-id %(bridge)s") command = templatized_command % module.params if want['external_ids']: for k, v in iteritems(want['external_ids']): if (k not in have['external_ids'] or want['external_ids'][k] != have['external_ids'][k]): command += " " + k + " " + v commands.append(command) else: templatized_command = ("%(ovs-vsctl)s -t %(timeout)s add-br" " %(bridge)s") command = templatized_command % module.params if want['parent']: templatized_command = "%(parent)s %(vlan)s" command += " " + templatized_command % module.params if want['set']: templatized_command = " -- set %(set)s" command += templatized_command % module.params commands.append(command) if want['fail_mode']: templatized_command = ("%(ovs-vsctl)s -t %(timeout)s" " set-fail-mode %(bridge)s" " %(fail_mode)s") command = templatized_command % module.params commands.append(command) if want['external_ids']: for k, v in iteritems(want['external_ids']): templatized_command = ("%(ovs-vsctl)s -t %(timeout)s" " br-set-external-id %(bridge)s") command = templatized_command % module.params command += " " + k + " " + v commands.append(command) return commands
def debug_prepared_request(url, method, headers, data=None): result = "curl -k -X {0} {1}".format(method.upper(), url) for k, v in iteritems(headers): result = result + " -H '{0}: {1}'".format(k, v) if any(v == 'application/json' for k, v in iteritems(headers)): if data: kwargs = _json.loads(data.decode('utf-8')) result = result + " -d '" + _json.dumps(kwargs, sort_keys=True) + "'" return result
def resource_to_parameters(self, resource): """ Converts a resource definition to module parameters """ parameters = {} for key, value in iteritems(resource): if key in ('apiVersion', 'kind', 'status'): continue elif key == 'metadata' and isinstance(value, dict): for meta_key, meta_value in iteritems(value): if meta_key in ('name', 'namespace', 'resourceVersion'): parameters[meta_key] = meta_value return parameters
def remoteServers(self): """Return changed list of remote servers The order of this list does not matter as BIG-IP will send to all the items in it. :return: """ changed = False if self.want.remote_host is None: return None if self.have.remoteServers is None: remote = dict() else: remote = self.have.remoteServers current_hosts = dict((d['host'], d) for (i, d) in enumerate(remote)) if self.want.state == 'absent': del current_hosts[self.want.remote_host] result = [v for (k, v) in iteritems(current_hosts)] return result if self.want.remote_host in current_hosts: item = current_hosts[self.want.remote_host] if self.want.remote_port is not None: if int(item['remotePort']) != self.want.remote_port: item['remotePort'] = self.want.remote_port self._remote_port = self.want.remote_port changed = True if self.want.local_ip is not None: if item['localIp'] != self.want.local_ip: item['localIp'] = self.want.local_ip self._local_ip = self.want.local_ip changed = True else: changed = True count = len(current_hosts.keys()) + 1 host = self.want.remote_host current_hosts[self.want.remote_host] = dict( name="/Common/remotesyslog{0}".format(count), host=host ) if self.want.remote_port is not None: current_hosts[host]['remotePort'] = self.want.remote_port self._remote_port = self.want.remote_port if self.want.local_ip is not None: current_hosts[host]['localIp'] = self.want.local_ip self._local_ip = self.want.local_ip if changed: result = [v for (k, v) in iteritems(current_hosts)] return result return None
def exec_module(self): results = [] for collection, type in iteritems(self.types): facts = self.read_facts(collection) if not facts: continue for x in facts: x.update({'type': type}) for item in facts: attrs = item.to_return() filtered = [(k, v) for k, v in iteritems(attrs) if self.filter_matches_name(k)] if filtered: results.append(dict(filtered)) return results
def __compare_list(self, src_values, request_values, param_name): """ Compare src_values list with request_values list, and append any missing request_values to src_values. """ if not request_values: return if not src_values: src_values += request_values if type(src_values[0]).__name__ in PRIMITIVES: if set(src_values) >= set(request_values): # src_value list includes request_value list return # append the missing elements from request value src_values += list(set(request_values) - set(src_values)) elif type(src_values[0]).__name__ == 'dict': missing = [] for request_dict in request_values: match = False for src_dict in src_values: if '__cmp__' in dir(src_dict): # python < 3 if src_dict >= request_dict: match = True break elif iteritems(src_dict) == iteritems(request_dict): # python >= 3 match = True break if not match: missing.append(request_dict) src_values += missing elif type(src_values[0]).__name__ == 'list': missing = [] for request_list in request_values: match = False for src_list in src_values: if set(request_list) >= set(src_list): match = True break if not match: missing.append(request_list) src_values += missing else: raise KubernetesException( "Evaluating {0}: encountered unimplemented type {1} in " "__compare_list()".format(param_name, type(src_values[0]).__name__) )
def resource_to_parameters(self, resource): """ Converts a resource definition to module parameters """ parameters = {} for key, value in iteritems(resource): if key in ('apiVersion', 'kind', 'status'): continue elif key == 'metadata' and isinstance(value, dict): for meta_key, meta_value in iteritems(value): if meta_key in ('name', 'namespace', 'labels', 'annotations'): parameters[meta_key] = meta_value elif key in self.helper.argspec and value is not None: parameters[key] = value elif isinstance(value, dict): self._add_parameter(value, [to_snake(key)], parameters) return parameters
def _split_role_params(self, ds): ''' Splits any random role params off from the role spec and store them in a dictionary of params for parsing later ''' role_def = dict() role_params = dict() base_attribute_names = frozenset(self._valid_attrs.keys()) for (key, value) in iteritems(ds): # use the list of FieldAttribute values to determine what is and is not # an extra parameter for this role (or sub-class of this role) # FIXME: hard-coded list of exception key names here corresponds to the # connection fields in the Base class. There may need to be some # other mechanism where we exclude certain kinds of field attributes, # or make this list more automatic in some way so we don't have to # remember to update it manually. if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'): if key in ('connection', 'port', 'remote_user'): display.deprecated("Using '%s' as a role param has been deprecated. " % key + "In the future, these values should be entered in the `vars:` " + "section for roles, but for now we'll store it as both a param and an attribute.", version="2.7") role_def[key] = value # this key does not match a field attribute, so it must be a role param role_params[key] = value else: # this is a field attribute, so copy it over directly role_def[key] = value return (role_def, role_params)
def _generate_ospf_commands(self): req_router_id = self._required_config['router_id'] req_ospf_id = self._required_config['ospf'] curr_router_id = self._current_config.get('router_id') curr_ospf_id = self._current_config.get('ospf') if curr_ospf_id != req_ospf_id or req_router_id != curr_router_id: cmd = 'router ospf %s' % req_ospf_id self._commands.append(cmd) if req_router_id != curr_router_id: if req_router_id: cmd = 'router-id %s' % req_router_id else: cmd = 'no router-id' self._commands.append(cmd) self._commands.append('exit') req_interfaces = self._required_config['interfaces'] curr_interfaces = self._current_config.get('interfaces', dict()) for if_name, area in iteritems(req_interfaces): curr_area = curr_interfaces.get(if_name) if curr_area != area: cmd = self._get_interface_area_cmd(if_name, area) self._commands.append(cmd) for if_name in curr_interfaces: if if_name not in req_interfaces: cmd = self._get_interface_area_cmd(if_name, None) self._commands.append(cmd)
def parse_xml(output, tmpl): if not os.path.exists(tmpl): raise AnsibleError('unable to locate parse_cli template: %s' % tmpl) if not isinstance(output, string_types): raise AnsibleError('parse_xml works on string input, but given input of : %s' % type(output)) root = fromstring(output) try: template = Template() except ImportError as exc: raise AnsibleError(str(exc)) spec = yaml.safe_load(open(tmpl).read()) obj = {} for name, attrs in iteritems(spec['keys']): value = attrs['value'] try: variables = spec.get('vars', {}) value = template(value, variables) except: pass if 'items' in attrs: obj[name] = _extract_param(template, root, attrs, value) else: obj[name] = value return obj
def create_schedule_config(self, cursor): query_string = \ """INSERT INTO scheduler ( active, interval_ms, filename""" cols = 0 query_data = \ [self.active, self.interval_ms, self.filename] for col, val in iteritems(self.config_data): if val is not None: cols += 1 query_data.append(val) query_string += ",\n" + col query_string += \ (")\n" + "VALUES (%s, %s, %s" + ", %s" * cols + ")") cursor.execute(query_string, query_data) return True
def has_different_config(self): """ Return the list of differences between the current parameters and the existing volume. :return: list of options that differ """ differences = [] if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']: differences.append('driver') if self.parameters.driver_options: if not self.existing_volume.get('Options'): differences.append('driver_options') else: for key, value in iteritems(self.parameters.driver_options): if (not self.existing_volume['Options'].get(key) or value != self.existing_volume['Options'][key]): differences.append('driver_options.%s' % key) if self.parameters.labels: existing_labels = self.existing_volume.get('Labels', {}) all_labels = set(self.parameters.labels) | set(existing_labels) for label in all_labels: if existing_labels.get(label) != self.parameters.labels.get(label): differences.append('labels.%s' % label) return differences
def options(module): ''' Transforms the module argument into a valid WAPI struct This function will transform the options argument into a structure that is a valid WAPI structure in the format of: { name: <value>, num: <value>, value: <value>, use_option: <value>, vendor_class: <value> } It will remove any options that are set to None since WAPI will error on that condition. It will also verify that either `name` or `num` is set in the structure but does not validate the values are equal. The remainder of the value validation is performed by WAPI ''' options = list() for item in module.params['options']: opt = dict([(k, v) for k, v in iteritems(item) if v is not None]) if 'name' not in opt and 'num' not in opt: module.fail_json(msg='one of `name` or `num` is required for option value') options.append(opt) return options
def __init__(self, params=None): self._values = defaultdict(lambda: None) if params: for k, v in iteritems(params): if self.api_map is not None and k in self.api_map: dict_to_use = self.api_map map_key = self.api_map[k] else: dict_to_use = self._values map_key = k # Handle weird API parameters like `dns.proxy.__iter__` by # using a map provided by the module developer class_attr = getattr(type(self), map_key, None) if isinstance(class_attr, property): # There is a mapped value for the api_map key if class_attr.fset is None: # If the mapped value does not have an associated setter self._values[map_key] = v else: # The mapped value has a setter setattr(self, map_key, v) else: # If the mapped value is not a @property self._values[map_key] = v
def get_rule_config(self, cursor, created_rule_id=None): query_string = \ """SELECT * FROM mysql_query_rules""" if created_rule_id: query_data = [created_rule_id, ] query_string += "\nWHERE rule_id = %s" cursor.execute(query_string, query_data) rule = cursor.fetchone() else: cols = 0 query_data = [] for col, val in iteritems(self.config_data): if val is not None: cols += 1 query_data.append(val) if cols == 1: query_string += "\n WHERE " + col + " = %s" else: query_string += "\n AND " + col + " = %s" if cols > 0: cursor.execute(query_string, query_data) else: cursor.execute(query_string) rule = cursor.fetchall() return rule
def _parse_group(self, group, data): self.inventory.add_group(group) if not isinstance(data, dict): data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars elif not any(k in data for k in ('hosts', 'vars', 'children')): data = {'hosts': [group], 'vars': data} if 'hosts' in data: if not isinstance(data['hosts'], list): raise AnsibleError("You defined a group '%s' with bad data for the host list:\n %s" % (group, data)) for hostname in data['hosts']: self._hosts.add(hostname) self.inventory.add_host(hostname, group) if 'vars' in data: if not isinstance(data['vars'], dict): raise AnsibleError("You defined a group '%s' with bad data for variables:\n %s" % (group, data)) for k, v in iteritems(data['vars']): self.inventory.set_variable(group, k, v) if group != '_meta' and isinstance(data, dict) and 'children' in data: for child_name in data['children']: self.inventory.add_group(child_name) self.inventory.add_child(group, child_name)
def create_rule_config(self, cursor): query_string = \ """INSERT INTO mysql_query_rules (""" cols = 0 query_data = [] for col, val in iteritems(self.config_data): if val is not None: cols += 1 query_data.append(val) query_string += "\n" + col + "," query_string = query_string[:-1] query_string += \ (")\n" + "VALUES (" + "%s ," * cols) query_string = query_string[:-2] query_string += ")" cursor.execute(query_string, query_data) new_rule_id = cursor.lastrowid return True, new_rule_id
def create_user_config(self, cursor): query_string = \ """INSERT INTO mysql_users ( username, backend, frontend""" cols = 3 query_data = \ [self.username, self.backend, self.frontend] for col, val in iteritems(self.config_data): if val is not None: cols += 1 query_data.append(val) query_string += ",\n" + col query_string += \ (")\n" + "VALUES (" + "%s ," * cols) query_string = query_string[:-2] query_string += ")" cursor.execute(query_string, query_data) return True
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect facts = dict() cacheable = boolean(self._task.args.pop('cacheable', False)) if self._task.args: for (k, v) in iteritems(self._task.args): k = self._templar.template(k) if not isidentifier(k): result['failed'] = True result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only " "letters, numbers and underscores." % k) return result if isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'): v = boolean(v, strict=False) facts[k] = v result['changed'] = False result['ansible_facts'] = facts result['_ansible_facts_cacheable'] = cacheable return result
def update_user_config(self, cursor): query_string = """UPDATE mysql_users""" cols = 0 query_data = [] for col, val in iteritems(self.config_data): if val is not None: cols += 1 query_data.append(val) if cols == 1: query_string += "\nSET " + col + "= %s," else: query_string += "\n " + col + " = %s," query_string = query_string[:-1] query_string += ("\nWHERE username = %s\n AND backend = %s" + "\n AND frontend = %s") query_data.append(self.username) query_data.append(self.backend) query_data.append(self.frontend) cursor.execute(query_string, query_data) return True
def merge_hash(a, b): """ Recursively merges hash b into a so that keys from b take precedence over keys from a """ _validate_mutable_mappings(a, b) # if a is empty or equal to b, return b if a == {} or a == b: return b.copy() # if b is empty the below unfolds quickly result = a.copy() # next, iterate over b keys and values for k, v in iteritems(b): # if there's already such key in a # and that key contains a MutableMapping if k in result and isinstance(result[k], MutableMapping) and isinstance(v, MutableMapping): # merge those dicts recursively result[k] = merge_hash(result[k], v) else: # otherwise, just copy the value from b to a result[k] = v return result
def create_server_config(self, cursor): query_string = \ """INSERT INTO mysql_servers ( hostgroup_id, hostname, port""" cols = 3 query_data = \ [self.hostgroup_id, self.hostname, self.port] for col, val in iteritems(self.config_data): if val is not None: cols += 1 query_data.append(val) query_string += ",\n" + col query_string += \ (")\n" + "VALUES (" + "%s ," * cols) query_string = query_string[:-2] query_string += ")" cursor.execute(query_string, query_data) return True
def update_server_config(self, cursor): query_string = """UPDATE mysql_servers""" cols = 0 query_data = [] for col, val in iteritems(self.config_data): if val is not None: cols += 1 query_data.append(val) if cols == 1: query_string += "\nSET " + col + "= %s," else: query_string += "\n " + col + " = %s," query_string = query_string[:-1] query_string += ("\nWHERE hostgroup_id = %s\n AND hostname = %s" + "\n AND port = %s") query_data.append(self.hostgroup_id) query_data.append(self.hostname) query_data.append(self.port) cursor.execute(query_string, query_data) return True
def dict_merge(base, other): """ Return a new dict object that combines base and other This will create a new dict object that is a combination of the key/value pairs from base and other. When both keys exist, the value will be selected from other. If the value is a list object, the two lists will be combined and duplicate entries removed. :param base: dict object to serve as base :param other: dict object to combine with base :returns: new combined dict object """ if not isinstance(base, dict): raise AssertionError("`base` must be of type <dict>") if not isinstance(other, dict): raise AssertionError("`other` must be of type <dict>") combined = dict() for key, value in iteritems(base): if isinstance(value, dict): if key in other: item = other.get(key) if item is not None: combined[key] = dict_merge(value, other[key]) else: combined[key] = item else: combined[key] = value elif isinstance(value, list): if key in other: item = other.get(key) if item is not None: try: combined[key] = list(set(chain(value, item))) except TypeError: value.extend([i for i in item if i not in value]) combined[key] = value else: combined[key] = item else: combined[key] = value else: if key in other: other_value = other.get(key) if other_value is not None: if sort_list(base[key]) != sort_list(other_value): combined[key] = other_value else: combined[key] = value else: combined[key] = other_value else: combined[key] = value for key in set(other.keys()).difference(base.keys()): combined[key] = other.get(key) return combined
def validate(self, all_vars=None): ''' validation that is done at parse time, not load time ''' all_vars = {} if all_vars is None else all_vars if not self._validated: # walk all fields in the object for (name, attribute) in iteritems(self._valid_attrs): if name in self._alias_attrs: name = self._alias_attrs[name] # run validator only if present method = getattr(self, '_validate_%s' % name, None) if method: method(attribute, name, getattr(self, name)) else: # and make sure the attribute is of the type it should be value = self._attributes[name] if value is not None: if attribute.isa == 'string' and isinstance(value, (list, dict)): raise AnsibleParserError( "The field '%s' is supposed to be a string type," " however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds() ) self._validated = True
def __init__(self, *args, **kwargs): attrs = kwargs.pop('params', {}) for key, value in iteritems(attrs): setattr(self, key, value)
def dmli(base, other): """ Return a new dict object that combines base and other This will create a new dict object that is a combination of the key/value pairs from base and other. When both keys exist, the value will be selected from other. If the value is a list object, the two lists will be combined based on list index. (This is different than the dict_merge in network_utils common) :param base: dict object to serve as base :param other: dict object to combine with base :returns: new combined dict object """ if not isinstance(base, dict): raise AssertionError("`base` must be of type <dict>") if not isinstance(other, dict): raise AssertionError("`other` must be of type <dict>") combined = dict() for key, value in iteritems(base): if isinstance(value, dict): if key in other: item = other.get(key) if item is not None: if isinstance(other[key], Mapping): combined[key] = dmli(value, other[key]) else: combined[key] = other[key] else: combined[key] = item else: combined[key] = value elif isinstance(value, list): if key in other: item = other.get(key) if item is not None: left = {idx: val for idx, val in enumerate(value)} right = {idx: val for idx, val in enumerate(item)} for k, val in right.items(): if k in left: if val is not None: left[k] = dmli({"tmp": left[k]}, {"tmp": val})['tmp'] else: left[k] = val combined[key] = list(left.values()) else: combined[key] = item else: combined[key] = value else: if key in other: other_value = other.get(key) if other_value is not None: if sort_list(base[key]) != sort_list(other_value): combined[key] = other_value else: combined[key] = value else: combined[key] = other_value else: combined[key] = value for key in set(other.keys()).difference(base.keys()): combined[key] = other.get(key) return combined
def diff_banners(want, have): candidate = {} for key, value in iteritems(want): if value != have.get(key): candidate[key] = value return candidate
def get_api_client(self, **auth_params): auth_params = auth_params or getattr(self, 'params', {}) auth = {} # If authorization variables aren't defined, look for them in environment variables for true_name, arg_name in AUTH_ARG_MAP.items(): if auth_params.get(arg_name) is None: env_value = os.getenv('K8S_AUTH_{0}'.format( arg_name.upper()), None) or os.getenv( 'K8S_AUTH_{0}'.format(true_name.upper()), None) if env_value is not None: if AUTH_ARG_SPEC[arg_name].get('type') == 'bool': env_value = env_value.lower() not in [ '0', 'false', 'no' ] auth[true_name] = env_value else: auth[true_name] = auth_params[arg_name] def auth_set(*names): return all([auth.get(name) for name in names]) if auth_set('username', 'password', 'host') or auth_set( 'api_key', 'host'): # We have enough in the parameters to authenticate, no need to load incluster or kubeconfig pass elif auth_set('kubeconfig') or auth_set('context'): try: kubernetes.config.load_kube_config( auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config')) except Exception as err: self.fail(msg='Failed to load kubeconfig due to %s' % to_native(err)) else: # First try to do incluster config, then kubeconfig try: kubernetes.config.load_incluster_config() except kubernetes.config.ConfigException: try: kubernetes.config.load_kube_config( auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config')) except Exception as err: self.fail(msg='Failed to load kubeconfig due to %s' % to_native(err)) # Override any values in the default configuration with Ansible parameters # As of kubernetes-client v12.0.0, get_default_copy() is required here try: configuration = kubernetes.client.Configuration().get_default_copy( ) except AttributeError: configuration = kubernetes.client.Configuration() for key, value in iteritems(auth): if key in AUTH_ARG_MAP.keys() and value is not None: if key == 'api_key': setattr(configuration, key, {'authorization': "Bearer {0}".format(value)}) else: setattr(configuration, key, value) kubernetes.client.Configuration.set_default(configuration) try: return DynamicClient(kubernetes.client.ApiClient(configuration)) except Exception as err: self.fail(msg='Failed to get client due to %s' % to_native(err))
def populate_ipv6_interfaces(self, interfaces): facts = dict() for key, value in iteritems(interfaces): intf = dict() intf['ipv6'] = self.parse_ipv6_address(value) facts[key] = intf
def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to, _hosts=None, _hosts_all=None): ''' Returns a dictionary of so-called "magic" variables in Ansible, which are special variables we set internally for use. ''' variables = {} variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir()) variables['ansible_playbook_python'] = sys.executable variables['ansible_config_file'] = C.CONFIG_FILE if play: # This is a list of all role names of all dependencies for all roles for this play dependency_role_names = list( set([ d.get_name() for r in play.roles for d in r.get_all_dependencies() ])) # This is a list of all role names of all roles for this play play_role_names = [r.get_name() for r in play.roles] # ansible_role_names includes all role names, dependent or directly referenced by the play variables['ansible_role_names'] = list( set(dependency_role_names + play_role_names)) # ansible_play_role_names includes the names of all roles directly referenced by this play # roles that are implicitly referenced via dependencies are not listed. variables['ansible_play_role_names'] = play_role_names # ansible_dependent_role_names includes the names of all roles that are referenced via dependencies # dependencies that are also explicitly named as roles are included in this list variables['ansible_dependent_role_names'] = dependency_role_names # DEPRECATED: role_names should be deprecated in favor of ansible_role_names or ansible_play_role_names variables['role_names'] = variables['ansible_play_role_names'] variables['ansible_play_name'] = play.get_name() if task: if task._role: variables['role_name'] = task._role.get_name( include_role_fqcn=False) variables['role_path'] = task._role._role_path variables['role_uuid'] = text_type(task._role._uuid) variables[ 'ansible_collection_name'] = task._role._role_collection variables['ansible_role_name'] = task._role.get_name() if self._inventory is not None: variables['groups'] = self._inventory.get_groups_dict() if play: templar = Templar(loader=self._loader) if templar.is_template(play.hosts): pattern = 'all' else: pattern = play.hosts or 'all' # add the list of hosts in the play, as adjusted for limit/filters if not _hosts_all: _hosts_all = [ h.name for h in self._inventory.get_hosts( pattern=pattern, ignore_restrictions=True) ] if not _hosts: _hosts = [h.name for h in self._inventory.get_hosts()] variables['ansible_play_hosts_all'] = _hosts_all[:] variables['ansible_play_hosts'] = [ x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts ] variables['ansible_play_batch'] = [ x for x in _hosts if x not in play._removed_hosts ] # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch, # however this would take work in the templating engine, so for now we'll add both variables['play_hosts'] = variables['ansible_play_batch'] # the 'omit' value allows params to be left out if the variable they are based on is undefined variables['omit'] = self._omit_token # Set options vars for option, option_value in iteritems(self._options_vars): variables[option] = option_value if self._hostvars is not None and include_hostvars: variables['hostvars'] = self._hostvars return variables
def run(self, terms, variables=None, api_token=None, project=None, team=None): """ :param terms: a list of resources lookups to run. :param variables: ansible variables active at the time of the lookup :param api_token: API token :param project: optional project label :param team: optional team label :return: a dictionary of resources credentials """ if not api_token: api_token = os.getenv('MANIFOLD_API_TOKEN') if not api_token: raise AnsibleError( 'API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var' ) try: labels = terms client = ManifoldApiClient(api_token) if team: team_data = client.get_teams(team) if len(team_data) == 0: raise AnsibleError( "Team '{0}' does not exist".format(team)) team_id = team_data[0]['id'] else: team_id = None if project: project_data = client.get_projects(project) if len(project_data) == 0: raise AnsibleError( "Project '{0}' does not exist".format(project)) project_id = project_data[0]['id'] else: project_id = None if len( labels ) == 1: # Use server-side filtering if one resource is requested resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0]) else: # Get all resources and optionally filter labels resources_data = client.get_resources(team_id=team_id, project_id=project_id) if labels: resources_data = list( filter(lambda x: x['body']['label'] in labels, resources_data)) if labels and len(resources_data) < len(labels): fetched_labels = [r['body']['label'] for r in resources_data] not_found_labels = [ label for label in labels if label not in fetched_labels ] raise AnsibleError("Resource(s) {0} do not exist".format( ', '.join(not_found_labels))) credentials = {} cred_map = {} for resource in resources_data: resource_credentials = client.get_credentials(resource['id']) if len(resource_credentials ) and resource_credentials[0]['body']['values']: for cred_key, cred_val in six.iteritems( resource_credentials[0]['body']['values']): label = resource['body']['label'] if cred_key in credentials: display.warning( "'{cred_key}' with label '{old_label}' was replaced by resource data " "with label '{new_label}'".format( cred_key=cred_key, old_label=cred_map[cred_key], new_label=label)) credentials[cred_key] = cred_val cred_map[cred_key] = label ret = [credentials] return ret except ApiError as e: raise AnsibleError('API Error: {0}'.format(str(e))) except AnsibleError as e: raise e except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() raise AnsibleError( format_exception(exc_type, exc_value, exc_traceback))
def _render_areas(self, attr, want, have, opr=True): """ This function forms the set/delete commands based on the 'opr' type for ospf area attributes. :param attr: attribute name. :param w: the desired config. :param h: the target config. :param opr: True/False. :return: generated commands list. """ commands = [] h_lst = {} w_lst = want.get(attr) or [] l_set = ("area_id", "shortcut", "authentication") if have: h_lst = have.get(attr) or [] if not opr and not h_lst: commands.append(self._form_attr_cmd(attr="area", opr=opr)) elif w_lst: for w_area in w_lst: cmd = ( self._compute_command( key="area", attr=_bool_to_str(w_area["area_id"]), opr=opr, ) + " " ) h_area = self.search_obj_in_have(h_lst, w_area, "area_id") if not opr and not h_area: commands.append( self._form_attr_cmd( key="area", attr=w_area["area_id"], opr=opr ) ) else: for (key, val) in iteritems(w_area): if ( opr and key in l_set and not _is_w_same(w_area, h_area, key) ): if key == "area_id": commands.append( self._form_attr_cmd( attr="area", val=_bool_to_str(val), opr=opr, ) ) else: commands.append( cmd + key + " " + _bool_to_str(val).replace("_", "-") ) elif not opr and key in l_set: if key == "area_id" and not _in_target( h_area, key ): commands.append(cmd) continue elif key != "area_id" and not _in_target( h_area, key ): commands.append(cmd + val + " " + key) elif key == "area_type": commands.extend( self._render_area_type( w_area, h_area, key, cmd, opr ) ) elif key == "network": commands.extend( self._render_list_param( key, w_area, h_area, cmd, opr ) ) elif key == "range": commands.extend( self._render_list_dict_param( key, w_area, h_area, cmd, opr ) ) elif key == "virtual_link": commands.extend( self._render_vlink( key, w_area, h_area, cmd, opr ) ) return commands
def gather_vm_facts(content, vm): """ Gather facts from vim.VirtualMachine object. """ facts = { 'module_hw': True, 'hw_name': vm.config.name, 'hw_power_status': vm.summary.runtime.powerState, 'hw_guest_full_name': vm.summary.guest.guestFullName, 'hw_guest_id': vm.summary.guest.guestId, 'hw_product_uuid': vm.config.uuid, 'hw_processor_count': vm.config.hardware.numCPU, 'hw_memtotal_mb': vm.config.hardware.memoryMB, 'hw_interfaces': [], 'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')), 'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')), 'ipv4': None, 'ipv6': None, 'annotation': vm.config.annotation, 'customvalues': {}, 'snapshots': [], 'current_snapshot': None, } cfm = content.customFieldsManager # Resolve custom values for value_obj in vm.summary.customValue: kn = value_obj.key if cfm is not None and cfm.field: for f in cfm.field: if f.key == value_obj.key: kn = f.name # Exit the loop immediately, we found it break facts['customvalues'][kn] = value_obj.value net_dict = {} vmnet = _get_vm_prop(vm, ('guest', 'net')) if vmnet: for device in vmnet: net_dict[device.macAddress] = list(device.ipAddress) for k, v in iteritems(net_dict): for ipaddress in v: if ipaddress: if '::' in ipaddress: facts['ipv6'] = ipaddress else: facts['ipv4'] = ipaddress ethernet_idx = 0 for idx, entry in enumerate(vm.config.hardware.device): if not hasattr(entry, 'macAddress'): continue if entry.macAddress: mac_addr = entry.macAddress mac_addr_dash = mac_addr.replace(':', '-') else: mac_addr = mac_addr_dash = None factname = 'hw_eth' + str(ethernet_idx) facts[factname] = { 'addresstype': entry.addressType, 'label': entry.deviceInfo.label, 'macaddress': mac_addr, 'ipaddresses': net_dict.get(entry.macAddress, None), 'macaddress_dash': mac_addr_dash, 'summary': entry.deviceInfo.summary, } facts['hw_interfaces'].append('eth' + str(ethernet_idx)) ethernet_idx += 1 snapshot_facts = list_snapshots(vm) if 'snapshots' in snapshot_facts: facts['snapshots'] = snapshot_facts['snapshots'] facts['current_snapshot'] = snapshot_facts['current_snapshot'] return facts
def main(): args = parse_args() for config_file in CONFIG_FILES: if os.path.exists(config_file): break else: sys.stderr.write( 'unable to locate config file at /etc/ansible/infoblox.yaml\n') sys.exit(-1) try: loader = DataLoader() config = loader.load_from_file(config_file) provider = config.get('provider') or {} wapi = WapiInventory(provider) except Exception as exc: sys.stderr.write(to_text(exc)) sys.exit(-1) if args.host: host_filter = {'name': args.host} else: host_filter = {} config_filters = config.get('filters') if config_filters.get('view') is not None: host_filter['view'] = config_filters['view'] if config_filters.get('extattrs'): extattrs = normalize_extattrs(config_filters['extattrs']) else: extattrs = {} hostvars = {} inventory = {'_meta': {'hostvars': hostvars}} return_fields = ['name', 'view', 'extattrs', 'ipv4addrs'] hosts = wapi.get_object('record:host', host_filter, extattrs=extattrs, return_fields=return_fields) if hosts: for item in hosts: view = item['view'] name = item['name'] if view not in inventory: inventory[view] = {'hosts': []} inventory[view]['hosts'].append(name) hostvars[name] = {'view': view} if item.get('extattrs'): for key, value in iteritems(flatten_extattrs( item['extattrs'])): if key.startswith('ansible_'): hostvars[name][key] = value else: if 'extattrs' not in hostvars[name]: hostvars[name]['extattrs'] = {} hostvars[name]['extattrs'][key] = value sys.stdout.write(json.dumps(inventory, indent=4)) sys.exit(0)
def main(): argument_spec = pgutils.postgres_common_argument_spec() argument_spec.update( db=dict(type='str', required=True, aliases=['name']), owner=dict(type='str', default=''), template=dict(type='str', default=''), encoding=dict(type='str', default=''), lc_collate=dict(type='str', default=''), lc_ctype=dict(type='str', default=''), state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']), target=dict(type='path', default=''), target_opts=dict(type='str', default=''), maintenance_db=dict(type='str', default="postgres"), session_role=dict(type='str'), conn_limit=dict(type='str', default=''), tablespace=dict(type='path', default=''), ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) db = module.params["db"] owner = module.params["owner"] template = module.params["template"] encoding = module.params["encoding"] lc_collate = module.params["lc_collate"] lc_ctype = module.params["lc_ctype"] target = module.params["target"] target_opts = module.params["target_opts"] state = module.params["state"] changed = False maintenance_db = module.params['maintenance_db'] session_role = module.params["session_role"] conn_limit = module.params['conn_limit'] tablespace = module.params['tablespace'] raw_connection = state in ("dump", "restore") if not raw_connection: pgutils.ensure_required_libs(module) # To use defaults values, keyword arguments must be absent, so # check which values are empty and don't include in the **kw # dictionary params_map = { "login_host": "host", "login_user": "******", "login_password": "******", "port": "port", "ssl_mode": "sslmode", "ca_cert": "sslrootcert" } kw = dict((params_map[k], v) for (k, v) in iteritems(module.params) if k in params_map and v != '' and v is not None) # If a login_unix_socket is specified, incorporate it here. is_localhost = "host" not in kw or kw["host"] == "" or kw[ "host"] == "localhost" if is_localhost and module.params["login_unix_socket"] != "": kw["host"] = module.params["login_unix_socket"] if target == "": target = "{0}/{1}.sql".format(os.getcwd(), db) target = os.path.expanduser(target) if not raw_connection: try: db_connection = psycopg2.connect(database=maintenance_db, **kw) # Enable autocommit so we can create databases if psycopg2.__version__ >= '2.4.2': db_connection.autocommit = True else: db_connection.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) cursor = db_connection.cursor( cursor_factory=psycopg2.extras.DictCursor) except pgutils.LibraryError as e: module.fail_json(msg="unable to connect to database: {0}".format( to_native(e)), exception=traceback.format_exc()) except TypeError as e: if 'sslrootcert' in e.args[0]: module.fail_json( msg= 'Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}' .format(to_native(e)), exception=traceback.format_exc()) module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) except Exception as e: module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) if session_role: try: cursor.execute('SET ROLE %s' % pg_quote_identifier(session_role, 'role')) except Exception as e: module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc()) try: if module.check_mode: if state == "absent": changed = db_exists(cursor, db) elif state == "present": changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) module.exit_json(changed=changed, db=db) if state == "absent": try: changed = db_delete(cursor, db) except SQLParseError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) elif state == "present": try: changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) except SQLParseError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) elif state in ("dump", "restore"): method = state == "dump" and db_dump or db_restore try: rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw) if rc != 0: module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd) else: module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd) except SQLParseError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except NotSupportedError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except SystemExit: # Avoid catching this on Python 2.4 raise except Exception as e: module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc()) module.exit_json(changed=changed, db=db)
def main(): spec = dict( gather_subset=dict(default=['!config'], type='list') ) spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=spec, supports_check_mode=True) warnings = list() check_args(module, warnings) gather_subset = module.params['gather_subset'] runable_subsets = set() exclude_subsets = set() for subset in gather_subset: if subset == 'all': runable_subsets.update(VALID_SUBSETS) continue if subset.startswith('!'): subset = subset[1:] if subset == 'all': exclude_subsets.update(VALID_SUBSETS) continue exclude = True else: exclude = False if subset not in VALID_SUBSETS: module.fail_json(msg='Bad subset') if exclude: exclude_subsets.add(subset) else: runable_subsets.add(subset) if not runable_subsets: runable_subsets.update(VALID_SUBSETS) runable_subsets.difference_update(exclude_subsets) runable_subsets.add('default') facts = dict() facts['gather_subset'] = list(runable_subsets) instances = list() for key in runable_subsets: instances.append(FACT_SUBSETS[key](module)) for inst in instances: inst.populate() facts.update(inst.facts) warnings.extend(inst.warnings) ansible_facts = dict() for key, value in iteritems(facts): # this is to maintain capability with nxos_facts 2.1 if key.startswith('_'): ansible_facts[key[1:]] = value else: key = 'ansible_net_%s' % key ansible_facts[key] = value module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
def load_params(module): provider = module.params.get('provider') or dict() for key, value in iteritems(provider): if key in eos_argument_spec: if module.params.get(key) is None and value is not None: module.params[key] = value
def to_tuple(self, items): result = [] for x in items: tmp = [(str(k), str(v)) for k, v in iteritems(x)] result += tmp return result
def _render_area_type(self, want, have, attr, cmd, opr=True): """ This function forms the set/delete commands based on the 'opr' type for area_types attributes. :param attr: attribute name. :param w: the desired config. :param h: the target config. :param cmd: command to prepend. :param opr: True/False. :return: generated commands list. """ commands = [] h_type = {} w_type = want.get(attr) or [] if have: h_type = have.get(attr) or {} if not opr and not h_type: commands.append(cmd + attr.replace("_", "-")) elif w_type: key = "normal" if ( opr and key in w_type.keys() and not _is_w_same(w_type, h_type, key) ): if not w_type[key] and h_type and h_type[key]: commands.append( cmd.replace("set", "delete") + attr.replace("_", "-") + " " + key ) elif w_type[key]: commands.append(cmd + attr.replace("_", "-") + " " + key) elif ( not opr and key in w_type.keys() and not (h_type and key in h_type.keys()) ): commands.append( cmd + want["area"] + " " + attr.replace("_", "-") ) a_type = { "nssa": ("set", "default_cost", "no_summary", "translate"), "stub": ("set", "default_cost", "no_summary"), } for key in a_type: w_area = want[attr].get(key) or {} h_area = {} if w_area: if h_type and key in h_type.keys(): h_area = h_type.get(key) or {} for (item, val) in iteritems(w_type[key]): if ( opr and item in a_type[key] and not _is_w_same(w_type[key], h_area, item) ): if item == "set" and val: commands.append( cmd + attr.replace("_", "-") + " " + key ) elif not val and h_area and h_area[item]: commands.append( cmd.replace("set", "delete") + attr.replace("_", "-") + " " + key ) elif item != "set": commands.append( cmd + attr.replace("_", "-") + " " + key + " " + item.replace("_", "-") + " " + str(val) ) elif ( not opr and item in a_type[key] and not (h_type and key in h_type) ): if item == "set": commands.append( cmd + attr.replace("_", "-") + " " + key ) else: commands.append( cmd + want["area"] + " " + attr.replace("_", "-") + " " + key + " " + item.replace("_", "-") ) return commands
def main(): argument_spec = url_argument_spec() argument_spec.update( dest=dict(type='path'), url_username=dict(type='str', aliases=['user']), url_password=dict(type='str', aliases=['password'], no_log=True), body=dict(type='raw'), body_format=dict(type='str', default='raw', choices=['form-urlencoded', 'json', 'raw']), src=dict(type='path'), method=dict(type='str', default='GET'), return_content=dict(type='bool', default=False), follow_redirects=dict( type='str', default='safe', choices=['all', 'no', 'none', 'safe', 'urllib2', 'yes']), creates=dict(type='path'), removes=dict(type='path'), status_code=dict(type='list', default=[200]), timeout=dict(type='int', default=30), headers=dict(type='dict', default={}), unix_socket=dict(type='path'), ) module = AnsibleModule( argument_spec=argument_spec, add_file_common_args=True, mutually_exclusive=[['body', 'src']], ) url = module.params['url'] body = module.params['body'] body_format = module.params['body_format'].lower() method = module.params['method'].upper() dest = module.params['dest'] return_content = module.params['return_content'] creates = module.params['creates'] removes = module.params['removes'] status_code = [int(x) for x in list(module.params['status_code'])] socket_timeout = module.params['timeout'] dict_headers = module.params['headers'] if not re.match('^[A-Z]+$', method): module.fail_json( msg= "Parameter 'method' needs to be a single word in uppercase, like GET or POST." ) if body_format == 'json': # Encode the body unless its a string, then assume it is pre-formatted JSON if not isinstance(body, string_types): body = json.dumps(body) if 'content-type' not in [header.lower() for header in dict_headers]: dict_headers['Content-Type'] = 'application/json' elif body_format == 'form-urlencoded': if not isinstance(body, string_types): try: body = form_urlencoded(body) except ValueError as e: module.fail_json( msg='failed to parse body as form_urlencoded: %s' % to_native(e), elapsed=0) if 'content-type' not in [header.lower() for header in dict_headers]: dict_headers['Content-Type'] = 'application/x-www-form-urlencoded' if creates is not None: # do not run the command if the line contains creates=filename # and the filename already exists. This allows idempotence # of uri executions. if os.path.exists(creates): module.exit_json(stdout="skipped, since '%s' exists" % creates, changed=False) if removes is not None: # do not run the command if the line contains removes=filename # and the filename does not exist. This allows idempotence # of uri executions. if not os.path.exists(removes): module.exit_json(stdout="skipped, since '%s' does not exist" % removes, changed=False) # Make the request start = datetime.datetime.utcnow() resp, content, dest = uri(module, url, dest, body, body_format, method, dict_headers, socket_timeout) resp['elapsed'] = (datetime.datetime.utcnow() - start).seconds resp['status'] = int(resp['status']) resp['changed'] = False # Write the file out if requested if dest is not None: if resp['status'] in status_code and resp['status'] != 304: write_file(module, url, dest, content, resp) # allow file attribute changes resp['changed'] = True module.params['path'] = dest file_args = module.load_file_common_arguments(module.params) file_args['path'] = dest resp['changed'] = module.set_fs_attributes_if_different( file_args, resp['changed']) resp['path'] = dest # Transmogrify the headers, replacing '-' with '_', since variables don't # work with dashes. # In python3, the headers are title cased. Lowercase them to be # compatible with the python2 behaviour. uresp = {} for key, value in iteritems(resp): ukey = key.replace("-", "_").lower() uresp[ukey] = value if 'location' in uresp: uresp['location'] = absolute_location(url, uresp['location']) # Default content_encoding to try content_encoding = 'utf-8' if 'content_type' in uresp: # Handle multiple Content-Type headers charsets = [] content_types = [] for value in uresp['content_type'].split(','): ct, params = cgi.parse_header(value) if ct not in content_types: content_types.append(ct) if 'charset' in params: if params['charset'] not in charsets: charsets.append(params['charset']) if content_types: content_type = content_types[0] if len(content_types) > 1: module.warn( 'Received multiple conflicting Content-Type values (%s), using %s' % (', '.join(content_types), content_type)) if charsets: content_encoding = charsets[0] if len(charsets) > 1: module.warn( 'Received multiple conflicting charset values (%s), using %s' % (', '.join(charsets), content_encoding)) u_content = to_text(content, encoding=content_encoding) if any(candidate in content_type for candidate in JSON_CANDIDATES): try: js = json.loads(u_content) uresp['json'] = js except Exception: if PY2: sys.exc_clear( ) # Avoid false positive traceback in fail_json() on Python 2 else: u_content = to_text(content, encoding=content_encoding) if resp['status'] not in status_code: uresp['msg'] = 'Status code was %s and not %s: %s' % ( resp['status'], status_code, uresp.get('msg', '')) module.fail_json(content=u_content, **uresp) elif return_content: module.exit_json(content=u_content, **uresp) else: module.exit_json(**uresp)
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) try: source_dir = self._task.args.get('dir') source_file = self._task.args.get('file') contents = self._task.args['contents'] except KeyError as exc: return { 'failed': True, 'msg': 'missing required argument: %s' % exc } if not source_dir and not source_file: return { 'failed': True, 'msg': 'one of `dir` or `file` must be specified' } elif source_dir and source_file: return { 'failed': True, 'msg': '`dir` and `file` are mutually exclusive arguments' } if source_dir: sources = self.get_files(to_list(source_dir)) else: sources = to_list(source_file) self.facts = {} for src in sources: if not os.path.exists(src) and not os.path.isfile(src): raise AnsibleError("src is either missing or invalid") tasks = self._loader.load_from_file(src) self.ds = {'contents': contents} self.ds.update(task_vars) for task in tasks: name = task.pop('name', None) register = task.pop('register', None) export = task.pop('export', False) display.vvvv('processing directive: %s' % name) when = task.pop('when', None) if when is not None: if not self._check_conditional(when, task_vars): warning( 'skipping task due to conditional check failure') continue loop = task.pop('loop', None) if loop: loop = self.template(loop, self.ds) res = list() # loop is a hash so break out key and value if isinstance(loop, collections.Mapping): for loop_key, loop_value in iteritems(loop): self.ds['item'] = { 'key': loop_key, 'value': loop_value } resp = self._process_directive(task) res.append(resp) # loop is either a list or a string else: for loop_item in loop: self.ds['item'] = loop_item resp = self._process_directive(task) res.append(resp) else: res = self._process_directive(task) if register: self.ds[register] = res if export: kwargs = {register: res} self.do_export_facts(**kwargs) if 'facts' in self.ds: self.facts.update(self.ds['facts']) result['ansible_facts'] = self.facts return result
def assigned_role(self): if self._values['assigned_role'] is None: return None rmap = dict((v, k) for k, v in iteritems(self.role_map)) return rmap.get(self._values['assigned_role'], self._values['assigned_role'])
def main(): """main entry point for module execution """ argument_spec = dict( gather_subset=dict(default=["!config", "!mpls"], type='list') ) argument_spec.update(ironware_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) gather_subset = module.params['gather_subset'] runable_subsets = set() exclude_subsets = set() for subset in gather_subset: if subset == 'all': runable_subsets.update(VALID_SUBSETS) continue if subset.startswith('!'): subset = subset[1:] if subset == 'all': exclude_subsets.update(VALID_SUBSETS) continue exclude = True else: exclude = False if subset not in VALID_SUBSETS: module.fail_json(msg='Bad subset') if exclude: exclude_subsets.add(subset) else: runable_subsets.add(subset) if not runable_subsets: runable_subsets.update(VALID_SUBSETS) runable_subsets.difference_update(exclude_subsets) runable_subsets.add('default') facts = dict() facts['gather_subset'] = list(runable_subsets) instances = list() for key in runable_subsets: instances.append(FACT_SUBSETS[key](module)) for inst in instances: inst.populate() facts.update(inst.facts) ansible_facts = dict() for key, value in iteritems(facts): key = 'ansible_net_%s' % key ansible_facts[key] = value check_args(module) module.exit_json(ansible_facts=ansible_facts)
def update(self, tables, params, owner, check_mode=True): """Update the publication. Args: tables (list): List with names of the tables that need to be presented in the publication. params (dict): Dict contains optional publication parameters and their values. owner (str): Name of the publication owner. Kwargs: check_mode (bool): If True, don't actually change anything, just make SQL, add it to ``self.executed_queries`` and return True. Returns: changed (bool): True if publication has been updated, otherwise False. """ changed = False # Add or drop tables from published tables suit: if tables and not self.attrs['alltables']: # 1. If needs to add table to the publication: for tbl in tables: if tbl not in self.attrs['tables']: # If needs to add table to the publication: changed = self.__pub_add_table(tbl, check_mode=check_mode) # 2. if there is a table in targeted tables # that's not presented in the passed tables: for tbl in self.attrs['tables']: if tbl not in tables: changed = self.__pub_drop_table(tbl, check_mode=check_mode) elif tables and self.attrs['alltables']: changed = self.__pub_set_tables(tables, check_mode=check_mode) # Update pub parameters: if params: for key, val in iteritems(params): if self.attrs['parameters'].get(key): # In PostgreSQL 10/11 only 'publish' optional parameter is presented. if key == 'publish': # 'publish' value can be only a string with comma-separated items # of allowed DML operations like 'insert,update' or # 'insert,update,delete', etc. # Make dictionary to compare with current attrs later: val_dict = self.attrs['parameters']['publish'].copy() val_list = val.split(',') for v in val_dict: if v in val_list: val_dict[v] = True else: val_dict[v] = False # Compare val_dict and the dict with current 'publish' parameters, # if they're different, set new values: if val_dict != self.attrs['parameters']['publish']: changed = self.__pub_set_param( key, val, check_mode=check_mode) # Default behavior for other cases: elif self.attrs['parameters'][key] != val: changed = self.__pub_set_param(key, val, check_mode=check_mode) else: # If the parameter was not set before: changed = self.__pub_set_param(key, val, check_mode=check_mode) # Update pub owner: if owner: if owner != self.attrs['owner']: changed = self.__pub_set_owner(owner, check_mode=check_mode) return changed
def get_required_config(self): self._required_config = dict() module_params = self._module.params for key, val in iteritems(module_params): if key in self.PROTOCOL_MAPPING and val is not None: self._required_config[key] = val
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) context_validation_error = None try: # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override( task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, if the remote_addr is not set # default to using the host's address field as the remote address if not self._play_context.remote_addr: self._play_context.remote_addr = self._host.address # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) except AnsibleError as e: # save the error, which we'll raise later if we don't end up # skipping this task during the conditional evaluation step context_validation_error = e # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): display.debug("when evaluation is False, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log) except AnsibleError: # loop error takes precedence if self._loop_eval_error is not None: raise self._loop_eval_error # skip conditional exception in the case of includes as the vars needed might not be available except in the included tasks or due to tags if self._task.action not in [ 'include', 'include_tasks', 'include_role' ]: raise # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task if self._loop_eval_error is not None: raise self._loop_eval_error # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: raise context_validation_error # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action in ('include', 'include_tasks'): include_variables = self._task.args.copy() include_file = include_variables.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_variables=include_variables) # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host elif self._task.action == 'include_role': include_variables = self._task.args.copy() return dict(include_role=self._task, include_variables=include_variables) # Now we do final validation on the task, which sets all fields to their final values. self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): display.deprecated( "Using variables for task params is unsafe, especially if the variables come from an external source like facts", version="2.6") variable_params.update(self._task.args) self._task.args = variable_params # get the connection and the handler for this execution if (not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr): self._connection = self._get_connection(variables=variables, templar=templar) hostvars = variables.get('hostvars', None) # only template the vars if the connection actually implements set_host_overrides # NB: this is expensive, and should be removed once connection-specific vars are being handled by play_context sho_impl = getattr(type(self._connection), 'set_host_overrides', None) if hostvars and sho_impl and sho_impl != ConnectionBase.set_host_overrides: try: target_hostvars = hostvars.get(self._host.name) except: # FIXME: this should catch the j2undefined error here # specifically instead of all exceptions target_hostvars = dict() self._connection.set_host_overrides(host=self._host, hostvars=target_hostvars) else: # if connection is reused, its _play_context is no longer valid and needs # to be replaced with the one templated above, in case other data changed self._connection._play_context = self._play_context self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token) # Read some values from the task, so that we can modify them if need be if self._task.until: retries = self._task.retries if retries is None: retries = 3 elif retries <= 0: retries = 1 else: retries += 1 else: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() display.debug("starting attempt loop") result = None for attempt in range(1, retries + 1): display.debug("running the handler") try: result = self._handler.run(task_vars=variables) except AnsibleActionSkip as e: return dict(skipped=True, msg=to_text(e)) except AnsibleActionFail as e: return dict(failed=True, msg=to_text(e)) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=to_text(e)) display.debug("handler run complete") # preserve no log result["_ansible_no_log"] = self._play_context.no_log # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = wrap_var(result.copy()) if self._task. async > 0: if self._task.poll > 0 and not result.get( 'skipped') and not result.get('failed'): result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy) # FIXME callback 'v2_runner_on_async_poll' here # ensure no log is preserved result["_ansible_no_log"] = self._play_context.no_log # helper methods for use below in evaluating changed/failed_when def _evaluate_changed_when_result(result): if self._task.changed_when is not None and self._task.changed_when: cond = Conditional(loader=self._loader) cond.when = self._task.changed_when result['changed'] = cond.evaluate_conditional( templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when: cond = Conditional(loader=self._loader) cond.when = self._task.failed_when failed_when_result = cond.evaluate_conditional( templar, vars_copy) result['failed_when_result'] = result[ 'failed'] = failed_when_result else: failed_when_result = False return failed_when_result if 'ansible_facts' in result: if not C.NAMESPACE_FACTS: vars_copy.update(result['ansible_facts']) vars_copy.update({'ansible_facts': result['ansible_facts']}) # set the failed property if it was missing. if 'failed' not in result: # rc is here for backwards compatibility and modules that use it instead of 'failed' if 'rc' in result and result['rc'] not in [0, "0"]: result['failed'] = True else: result['failed'] = False # set the changed property if it was missing. if 'changed' not in result: result['changed'] = False # if we didn't skip this task, use the helpers to evaluate the changed/ # failed_when properties if 'skipped' not in result: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) if retries > 1: cond = Conditional(loader=self._loader) cond.when = self._task.until result['attempts'] = attempt if cond.evaluate_conditional(templar, vars_copy): break else: # no conditional check, or it failed, so sleep for the specified time if attempt < retries: result['_ansible_retry'] = True result['retries'] = retries display.debug('Retrying task, attempt %d of %d' % (attempt, retries)) self._rslt_q.put(TaskResult( self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False) time.sleep(delay)
def main(): argument_spec = postgres_common_argument_spec() argument_spec.update( idxname=dict(type='str', required=True, aliases=['name']), db=dict(type='str', aliases=['login_db']), state=dict(type='str', default='present', choices=['absent', 'present']), concurrent=dict(type='bool', default=True), table=dict(type='str'), idxtype=dict(type='str', aliases=['type']), columns=dict(type='list', aliases=['column']), cond=dict(type='str'), session_role=dict(type='str'), tablespace=dict(type='str'), storage_params=dict(type='list'), cascade=dict(type='bool', default=False), schema=dict(type='str'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) if not HAS_PSYCOPG2: module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR) idxname = module.params["idxname"] state = module.params["state"] concurrent = module.params["concurrent"] table = module.params["table"] idxtype = module.params["idxtype"] columns = module.params["columns"] cond = module.params["cond"] sslrootcert = module.params["ca_cert"] session_role = module.params["session_role"] tablespace = module.params["tablespace"] storage_params = module.params["storage_params"] cascade = module.params["cascade"] schema = module.params["schema"] if concurrent and cascade: module.fail_json( msg="Cuncurrent mode and cascade parameters are mutually exclusive" ) if state == 'present': if not table: module.fail_json(msg="Table must be specified") if not columns: module.fail_json(msg="At least one column must be specified") else: if table or columns or cond or idxtype or tablespace: module.fail_json( msg="Index %s is going to be removed, so it does not " "make sense to pass a table name, columns, conditions, " "index type, or tablespace" % idxname) if cascade and state != 'absent': module.fail_json(msg="cascade parameter used only with state=absent") # To use defaults values, keyword arguments must be absent, so # check which values are empty and don't include in the **kw # dictionary params_map = { "login_host": "host", "login_user": "******", "login_password": "******", "port": "port", "db": "database", "ssl_mode": "sslmode", "ca_cert": "sslrootcert" } kw = dict((params_map[k], v) for (k, v) in iteritems(module.params) if k in params_map and v != "" and v is not None) # If a login_unix_socket is specified, incorporate it here. is_localhost = "host" not in kw or kw["host"] is None or kw[ "host"] == "localhost" if is_localhost and module.params["login_unix_socket"] != "": kw["host"] = module.params["login_unix_socket"] if psycopg2.__version__ < '2.4.3' and sslrootcert is not None: module.fail_json( msg= 'psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter' ) try: db_connection = psycopg2.connect(**kw) if concurrent: if psycopg2.__version__ >= '2.4.2': db_connection.set_session(autocommit=True) else: db_connection.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) cursor = db_connection.cursor( cursor_factory=psycopg2.extras.DictCursor) except TypeError as e: if 'sslrootcert' in e.args[0]: module.fail_json( msg= 'Postgresql server must be at least version 8.4 to support sslrootcert' ) module.fail_json(msg="Unable to connect to database: %s" % to_native(e)) except Exception as e: module.fail_json(msg="Unable to connect to database: %s" % to_native(e)) if session_role: try: cursor.execute('SET ROLE %s' % session_role) except Exception as e: module.fail_json(msg="Could not switch role: %s" % to_native(e)) # Set defaults: changed = False # Do job: index = Index(module, cursor, schema, idxname) kw = index.get_info() kw['query'] = '' # # check_mode start if module.check_mode: if state == 'present' and index.exists: kw['changed'] = False module.exit_json(**kw) elif state == 'present' and not index.exists: kw['changed'] = True module.exit_json(**kw) elif state == 'absent' and not index.exists: kw['changed'] = False module.exit_json(**kw) elif state == 'absent' and index.exists: kw['changed'] = True module.exit_json(**kw) # check_mode end # if state == "present": if idxtype and idxtype.upper() not in VALID_IDX_TYPES: module.fail_json( msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname)) columns = ','.join(columns) if storage_params: storage_params = ','.join(storage_params) changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent) if changed: kw = index.get_info() kw['state'] = 'present' kw['query'] = index.executed_query else: changed = index.drop(schema, cascade, concurrent) if changed: kw['state'] = 'absent' kw['query'] = index.executed_query if not kw['valid']: db_connection.rollback() module.warn("Index %s is invalid! ROLLBACK" % idxname) if not concurrent: db_connection.commit() kw['changed'] = changed module.exit_json(**kw)
def gen_config(self): """Select the appropriate function based on the state provided :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ if self.want: wantd = {} for entry in self.want.get("processes", []): wantd.update({(entry["process_id"], entry.get("vrf")): entry}) else: wantd = {} if self.have: haved = {} for entry in self.have.get("processes", []): haved.update({(entry["process_id"], entry.get("vrf")): entry}) else: haved = {} # turn all lists of dicts into dicts prior to merge for thing in wantd, haved: for _pid, proc in iteritems(thing): for area in proc.get("areas", []): ranges = {} for entry in area.get("ranges", []): ranges.update({entry["address"]: entry}) if bool(ranges): area["ranges"] = ranges filter_list = {} for entry in area.get("filter_list", []): filter_list.update({entry["direction"]: entry}) if bool(filter_list): area["filter_list"] = filter_list temp = {} for entry in proc.get("areas", []): temp.update({entry["area_id"]: entry}) proc["areas"] = temp if proc.get("distribute_list"): if "acls" in proc.get("distribute_list"): temp = {} for entry in proc["distribute_list"].get("acls", []): temp.update({entry["name"]: entry}) proc["distribute_list"]["acls"] = temp # if state is merged, merge want onto have if self.state == "merged": wantd = dict_merge(haved, wantd) # if state is deleted, limit the have to anything in want # set want to nothing if self.state == "deleted": temp = {} for k, v in iteritems(haved): if k in wantd or not wantd: temp.update({k: v}) haved = temp wantd = {} # delete processes first so we do run into "more than one" errors if self.state in ["overridden", "deleted"]: for k, have in iteritems(haved): if k not in wantd: self.addcmd(have, "pid", True) for k, want in iteritems(wantd): self._compare(want=want, have=haved.pop(k, {}))
def get_device_facts(self): device_facts = {} device_facts['devices'] = {} lspci = self.module.get_bin_path('lspci') if lspci: rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace') else: pcidata = None try: block_devs = os.listdir("/sys/block") except OSError: return device_facts devs_wwn = {} try: devs_by_id = os.listdir("/dev/disk/by-id") except OSError: pass else: for link_name in devs_by_id: if link_name.startswith("wwn-"): try: wwn_link = os.readlink(os.path.join("/dev/disk/by-id", link_name)) except OSError: continue devs_wwn[os.path.basename(wwn_link)] = link_name[4:] links = self.get_all_device_links() device_facts['device_links'] = links for block in block_devs: virtual = 1 sysfs_no_links = 0 try: path = os.readlink(os.path.join("/sys/block/", block)) except OSError: e = sys.exc_info()[1] if e.errno == errno.EINVAL: path = block sysfs_no_links = 1 else: continue sysdir = os.path.join("/sys/block", path) if sysfs_no_links == 1: for folder in os.listdir(sysdir): if "device" in folder: virtual = 0 break d = {} d['virtual'] = virtual d['links'] = {} for (link_type, link_values) in iteritems(links): d['links'][link_type] = link_values.get(block, []) diskname = os.path.basename(sysdir) for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']: d[key] = get_file_content(sysdir + "/device/" + key) sg_inq = self.module.get_bin_path('sg_inq') if sg_inq: device = "/dev/%s" % (block) rc, drivedata, err = self.module.run_command([sg_inq, device]) if rc == 0: serial = re.search(r"Unit serial number:\s+(\w+)", drivedata) if serial: d['serial'] = serial.group(1) for key, test in [('removable', '/removable'), ('support_discard', '/queue/discard_granularity'), ]: d[key] = get_file_content(sysdir + test) if diskname in devs_wwn: d['wwn'] = devs_wwn[diskname] d['partitions'] = {} for folder in os.listdir(sysdir): m = re.search("(" + diskname + r"[p]?\d+)", folder) if m: part = {} partname = m.group(1) part_sysdir = sysdir + "/" + partname part['links'] = {} for (link_type, link_values) in iteritems(links): part['links'][link_type] = link_values.get(partname, []) part['start'] = get_file_content(part_sysdir + "/start", 0) part['sectors'] = get_file_content(part_sysdir + "/size", 0) part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size") if not part['sectorsize']: part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512) part['size'] = bytes_to_human((float(part['sectors']) * 512.0)) part['uuid'] = get_partition_uuid(partname) self.get_holders(part, part_sysdir) d['partitions'][partname] = part d['rotational'] = get_file_content(sysdir + "/queue/rotational") d['scheduler_mode'] = "" scheduler = get_file_content(sysdir + "/queue/scheduler") if scheduler is not None: m = re.match(r".*?(\[(.*)\])", scheduler) if m: d['scheduler_mode'] = m.group(2) d['sectors'] = get_file_content(sysdir + "/size") if not d['sectors']: d['sectors'] = 0 d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size") if not d['sectorsize']: d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512) d['size'] = bytes_to_human(float(d['sectors']) * 512.0) d['host'] = "" # domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7). m = re.match(r".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir) if m and pcidata: pciid = m.group(1) did = re.escape(pciid) m = re.search("^" + did + r"\s(.*)$", pcidata, re.MULTILINE) if m: d['host'] = m.group(1) self.get_holders(d, sysdir) device_facts['devices'][diskname] = d return device_facts
def _compare_af(self, want, have): """Custom handling of afs option :params want: the want BGP dictionary :params have: the have BGP dictionary """ wafs = want.get("address_family", {}) hafs = have.get("address_family", {}) vrf_want_have = [] for name, entry in iteritems(wafs): begin = len(self.commands) af_have = hafs.pop(name, {}) if "vrf" in entry: vrf_want_have.append((entry, af_have)) else: self.compare(parsers=self.parsers, want=entry, have=af_have) self._compare_lists(want=entry, have=af_have) if len(self.commands) != begin: self.commands.insert( begin, self._tmplt.render( { "afi": entry.get("afi"), "safi": entry.get("safi"), }, "address_family", False, ), ) # compare af under vrf separately to ensure correct generation of commands for waf, haf in vrf_want_have: begin = len(self.commands) self.compare(parsers=self.parsers, want=waf, have=haf) self._compare_lists(want=waf, have=haf) if len(self.commands) != begin: self.commands.insert( begin, self._tmplt.render( { "afi": waf.get("afi"), "safi": waf.get("safi") }, "address_family", False, ), ) self.commands.insert( begin, self._tmplt.render({"vrf": waf.get("vrf")}, "vrf", False), ) # for deleted and overridden state if self.state != "replaced": for name, entry in iteritems(hafs): if "vrf" in entry: self.addcmd({"vrf": entry.get("vrf")}, "vrf", False) self.addcmd( { "afi": entry.get("afi"), "safi": entry.get("safi") }, "address_family", True, )
def parse_cli(output, tmpl): try: template = Template() except ImportError as exc: raise AnsibleError(str(exc)) spec = yaml.safe_load(open(tmpl).read()) obj = {} for name, attrs in iteritems(spec['keys']): value = attrs['value'] if template.can_template(value): variables = spec.get('vars', {}) value = template(value, variables) if 'start_block' in attrs and 'end_block' in attrs: start_block = re.compile(attrs['start_block']) end_block = re.compile(attrs['end_block']) blocks = list() lines = None block_started = False for line in output.split('\n'): match_start = start_block.match(line) match_end = end_block.match(line) if match_start: if lines: blocks.append('\n'.join(lines)) lines = list() lines.append(line) block_started = True elif match_end: if lines: lines.append(line) block_started = False elif block_started: if lines: lines.append(line) regex_items = [re.compile(r) for r in attrs['items']] objects = list() for block in blocks: if isinstance(value, Mapping) and 'key' not in value: items = list() for regex in regex_items: match = regex.search(block) if match: item_values = match.groupdict() item_values['match'] = list(match.groups()) items.append(item_values) else: items.append(None) obj = {} for k, v in iteritems(value): try: obj[k] = template(v, {'item': items}, fail_on_undefined=False) except: obj[k] = None objects.append(obj) elif isinstance(value, Mapping): items = list() for regex in regex_items: match = regex.search(block) if match: item_values = match.groupdict() item_values['match'] = list(match.groups()) items.append(item_values) else: items.append(None) key = template(value['key'], {'item': items}) values = dict([(k, template(v, {'item': items})) for k, v in iteritems(value['values'])]) objects.append({key: values}) return objects elif 'items' in attrs: regexp = re.compile(attrs['items']) when = attrs.get('when') conditional = "{%% if %s %%}True{%% else %%}False{%% endif %%}" % when if isinstance(value, Mapping) and 'key' not in value: values = list() for item in re_matchall(regexp, output): entry = {} for item_key, item_value in iteritems(value): entry[item_key] = template(item_value, {'item': item}) if when: if template(conditional, {'item': entry}): values.append(entry) else: values.append(entry) obj[name] = values elif isinstance(value, Mapping): values = dict() for item in re_matchall(regexp, output): entry = {} for item_key, item_value in iteritems(value['values']): entry[item_key] = template(item_value, {'item': item}) key = template(value['key'], {'item': item}) if when: if template(conditional, {'item': {'key': key, 'value': entry}}): values[key] = entry else: values[key] = entry obj[name] = values else: item = re_search(regexp, output) obj[name] = template(value, {'item': item}) else: obj[name] = value return obj
def set_task_and_variable_override(self, task, variables, templar): ''' Sets attributes from the task if they are set, which will override those from the play. :arg task: the task object with the parameters that were set on it :arg variables: variables from inventory :arg templar: templar instance if templating variables is needed ''' new_info = self.copy() # loop through a subset of attributes on the task object and set # connection fields based on their values for attr in TASK_ATTRIBUTE_OVERRIDES: if hasattr(task, attr): attr_val = getattr(task, attr) if attr_val is not None: setattr(new_info, attr, attr_val) # next, use the MAGIC_VARIABLE_MAPPING dictionary to update this # connection info object with 'magic' variables from the variable list. # If the value 'ansible_delegated_vars' is in the variables, it means # we have a delegated-to host, so we check there first before looking # at the variables in general if task.delegate_to is not None: # In the case of a loop, the delegated_to host may have been # templated based on the loop variable, so we try and locate # the host name in the delegated variable dictionary here delegated_host_name = templar.template(task.delegate_to) delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict()) delegated_transport = C.DEFAULT_TRANSPORT for transport_var in C.MAGIC_VARIABLE_MAPPING.get('connection'): if transport_var in delegated_vars: delegated_transport = delegated_vars[transport_var] break # make sure this delegated_to host has something set for its remote # address, otherwise we default to connecting to it by name. This # may happen when users put an IP entry into their inventory, or if # they rely on DNS for a non-inventory hostname for address_var in ( 'ansible_%s_host' % delegated_transport, ) + C.MAGIC_VARIABLE_MAPPING.get('remote_addr'): if address_var in delegated_vars: break else: display.debug( "no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % delegated_host_name) delegated_vars['ansible_host'] = delegated_host_name # reset the port back to the default if none was specified, to prevent # the delegated host from inheriting the original host's setting for port_var in ('ansible_%s_port' % delegated_transport, ) + C.MAGIC_VARIABLE_MAPPING.get('port'): if port_var in delegated_vars: break else: if delegated_transport == 'winrm': delegated_vars['ansible_port'] = 5986 else: delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT # and likewise for the remote user for user_var in ('ansible_%s_user' % delegated_transport, ) + C.MAGIC_VARIABLE_MAPPING.get('remote_user'): if user_var in delegated_vars and delegated_vars[user_var]: break else: delegated_vars[ 'ansible_user'] = task.remote_user or self.remote_user else: delegated_vars = dict() # setup shell for exe_var in C.MAGIC_VARIABLE_MAPPING.get('executable'): if exe_var in variables: setattr(new_info, 'executable', variables.get(exe_var)) attrs_considered = [] for (attr, variable_names) in iteritems(C.MAGIC_VARIABLE_MAPPING): for variable_name in variable_names: if attr in attrs_considered: continue # if delegation task ONLY use delegated host vars, avoid delegated FOR host vars if task.delegate_to is not None: if isinstance(delegated_vars, dict) and variable_name in delegated_vars: setattr(new_info, attr, delegated_vars[variable_name]) attrs_considered.append(attr) elif variable_name in variables: setattr(new_info, attr, variables[variable_name]) attrs_considered.append(attr) # no else, as no other vars should be considered # become legacy updates -- from inventory file (inventory overrides # commandline) for become_pass_name in C.MAGIC_VARIABLE_MAPPING.get('become_pass'): if become_pass_name in variables: break else: # This is a for-else if new_info.become_method == 'sudo': for sudo_pass_name in C.MAGIC_VARIABLE_MAPPING.get( 'sudo_pass'): if sudo_pass_name in variables: setattr(new_info, 'become_pass', variables[sudo_pass_name]) break elif new_info.become_method == 'su': for su_pass_name in C.MAGIC_VARIABLE_MAPPING.get('su_pass'): if su_pass_name in variables: setattr(new_info, 'become_pass', variables[su_pass_name]) break # make sure we get port defaults if needed if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None: new_info.port = int(C.DEFAULT_REMOTE_PORT) # special overrides for the connection setting if len(delegated_vars) > 0: # in the event that we were using local before make sure to reset the # connection type to the default transport for the delegated-to host, # if not otherwise specified for connection_type in C.MAGIC_VARIABLE_MAPPING.get('connection'): if connection_type in delegated_vars: break else: remote_addr_local = new_info.remote_addr in C.LOCALHOST inv_hostname_local = delegated_vars.get( 'inventory_hostname') in C.LOCALHOST if remote_addr_local and inv_hostname_local: setattr(new_info, 'connection', 'local') elif getattr(new_info, 'connection', None) == 'local' and (not remote_addr_local or not inv_hostname_local): setattr(new_info, 'connection', C.DEFAULT_TRANSPORT) # if the final connection type is local, reset the remote_user value to that of the currently logged in user # this ensures any become settings are obeyed correctly # we store original in 'connection_user' for use of network/other modules that fallback to it as login user # connection_user to be deprecated once connection=local is removed for # network modules if new_info.connection == 'local': if not new_info.connection_user: new_info.connection_user = new_info.remote_user new_info.remote_user = pwd.getpwuid(os.getuid()).pw_name # set no_log to default if it was not previously set if new_info.no_log is None: new_info.no_log = C.DEFAULT_NO_LOG if task.check_mode is not None: new_info.check_mode = task.check_mode if task.diff is not None: new_info.diff = task.diff return new_info
def _render_nested_dict_param(self, attr, want, have, opr=True): """ This function forms the set/delete commands based on the 'opr' type for attributes with in desired nested dicts. :param attr: attribute name. :param w: the desired config. :param h: the target config. :param cmd: commands to be prepend. :param opr: True/False. :return: generated commands list. """ commands = [] attr_dict = { "default_information": "originate", "max_metric": "router_lsa", } leaf_dict = { "default_information": ( "always", "metric", "metric_type", "route_map", ), "max_metric": ("administrative", "on_startup", "on_shutdown"), } h = {} w = want.get(attr) or {} if have: h = have.get(attr) or {} if not opr and not h: commands.append(self._form_attr_cmd(attr=attr, opr=opr)) elif w: key = attr_dict[attr] w_attrib = want[attr].get(key) or {} cmd = self._compute_command(opr=opr) h_attrib = {} if w_attrib: leaf = leaf_dict[attr] if h and key in h.keys(): h_attrib = h.get(key) or {} for (item, val) in iteritems(w[key]): if ( opr and item in leaf and not _is_w_same(w[key], h_attrib, item) ): if item in ("administrative", "always") and val: commands.append( cmd + attr.replace("_", "-") + " " + key.replace("_", "-") + " " + item.replace("_", "-") ) elif item not in ("administrative", "always"): commands.append( cmd + attr.replace("_", "-") + " " + key.replace("_", "-") + " " + item.replace("_", "-") + " " + str(val) ) elif ( not opr and item in leaf and not _in_target(h_attrib, item) ): commands.append(cmd + attr + " " + item) return commands
def _setup_instance_vars(self, **kwargs): for key, val in iteritems(kwargs): setattr(self, key, val)