def resize_lun(self): """ Resize requested LUN. :return: True if LUN was actually re-sized, false otherwise. :rtype: bool """ path = '/vol/%s/%s' % (self.flexvol_name, self.name) lun_resize = netapp_utils.zapi.NaElement.create_node_with_children( 'lun-resize', **{'path': path, 'size': str(self.size), 'force': str(self.force_resize)}) try: self.server.invoke_successfully(lun_resize, enable_tunneling=True) except netapp_utils.zapi.NaApiError as e: if to_native(e.code) == "9042": # Error 9042 denotes the new LUN size being the same as the # old LUN size. This happens when there's barely any difference # in the two sizes. For example, from 8388608 bytes to # 8194304 bytes. This should go away if/when the default size # requested/reported to/from the controller is changed to a # larger unit (MB/GB/TB). return False else: self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(e)), exception=traceback.format_exc()) return True
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id): endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id) url = api_url + endpoint post_data = json.dumps(body) try: request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd, headers=HEADERS) except Exception as e: module.fail_json( msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)), exception=traceback.format_exc()) status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id) status_url = api_url + status_endpoint try: rc, status = request(status_url, method='GET', url_username=api_usr, url_password=api_pwd, headers=HEADERS) except Exception as e: module.fail_json( msg="Failed to check status of AMG after role reversal. " "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)), exception=traceback.format_exc()) # Here we wait for the role reversal to complete if 'roleChangeProgress' in status: while status['roleChangeProgress'] != "none": try: rc, status = request(status_url, method='GET', url_username=api_usr, url_password=api_pwd, headers=HEADERS) except Exception as e: module.fail_json( msg="Failed to check status of AMG after role reversal. " "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)), exception=traceback.format_exc()) return status
def fail_json_aws(self, exception, msg=None): """call fail_json with processed exception function for converting exceptions thrown by AWS SDK modules, botocore, boto3 and boto, into nice error messages. """ last_traceback = traceback.format_exc() # to_native is trusted to handle exceptions that str() could # convert to text. try: except_msg = to_native(exception.message) except AttributeError: except_msg = to_native(exception) if msg is not None: message = '{0}: {1}'.format(msg, except_msg) else: message = except_msg try: response = exception.response except AttributeError: response = None if response is None: self._module.fail_json(msg=message, exception=last_traceback) else: self._module.fail_json(msg=message, exception=last_traceback, **camel_dict_to_snake_dict(response))
def remove_host_port_group(self, host_system, portgroup_name, vswitch_name): """ Function to remove port group depending upon host system, port group name and vswitch name Args: host_system: Name of Host System portgroup_name: Name of Portgroup vswitch_name: Name of vSwitch """ changed = False desired_pgs = self.get_port_group_by_name(host_system=host_system, portgroup_name=portgroup_name, vswitch_name=vswitch_name) if desired_pgs: try: host_system.configManager.networkSystem.RemovePortGroup(pgName=self.portgroup_name) changed = True except vim.fault.NotFound as not_found: self.module.fail_json(msg="Failed to remove Portgroup as it was" " not found: %s" % to_native(not_found.msg)) except vim.fault.ResourceInUse as resource_in_use: self.module.fail_json(msg="Failed to remove Portgroup as it is" " in use: %s" % to_native(resource_in_use.msg)) except vim.fault.HostConfigFault as host_config_fault: self.module.fail_json(msg="Failed to remove Portgroup due to configuration" " failures: %s" % to_native(host_config_fault.msg)) except Exception as generic_exception: self.module.fail_json(msg="Failed to remove Portgroup due to generic" " exception : %s" % to_native(generic_exception)) return changed
def query_service_type_for_vmks(self, service_type): """ Function to return list of VMKernels Args: service_type: Name of service type Returns: List of VMKernel which belongs to that service type """ vmks_list = [] query = None try: query = self.esxi_host_obj.configManager.virtualNicManager.QueryNetConfig(service_type) except vim.fault.HostConfigFault as config_fault: self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" " host config fault : %s" % (service_type, to_native(config_fault.msg))) except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" " invalid arguments : %s" % (service_type, to_native(invalid_argument.msg))) except Exception as e: self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" "%s" % (service_type, to_native(e))) if not query.selectedVnic: return vmks_list selected_vnics = [vnic for vnic in query.selectedVnic] vnics_with_service_type = [vnic.device for vnic in query.candidateVnic if vnic.key in selected_vnics] return vnics_with_service_type
def run(self, terms, variables, **kwargs): try: resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json') amazon_response = json.load(resp)['prefixes'] except getattr(json.decoder, 'JSONDecodeError', ValueError) as e: # on Python 3+, json.decoder.JSONDecodeError is raised for bad # JSON. On 2.x it's a ValueError raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e)) except HTTPError as e: raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e)) except SSLValidationError as e: raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e)) except URLError as e: raise AnsibleError("Failed look up IP range service: %s" % to_native(e)) except ConnectionError as e: raise AnsibleError("Error connecting to IP range service: %s" % to_native(e)) if 'region' in kwargs: region = kwargs['region'] amazon_response = (item for item in amazon_response if item['region'] == region) if 'service' in kwargs: service = str.upper(kwargs['service']) amazon_response = (item for item in amazon_response if item['service'] == service) return [item['ip_prefix'] for item in amazon_response]
def save(self): if len(self.repofile.sections()): # Write data into the file try: fd = open(self.params['dest'], 'w') except IOError as e: self.module.fail_json( msg="Cannot open repo file %s." % self.params['dest'], details=to_native(e)) self.repofile.write(fd) try: fd.close() except IOError as e: self.module.fail_json( msg="Cannot write repo file %s." % self.params['dest'], details=to_native(e)) else: # Remove the file if there are not repos try: os.remove(self.params['dest']) except OSError as e: self.module.fail_json( msg=( "Cannot remove empty repo file %s." % self.params['dest']), details=to_native(e))
def update_ntp_servers(self, host, ntp_servers, operation='add'): changed = False host_date_time_manager = host.configManager.dateTimeSystem if host_date_time_manager: available_ntp_servers = host_date_time_manager.dateTimeInfo.ntpConfig.server if operation == 'add': available_ntp_servers = available_ntp_servers + ntp_servers elif operation == 'delete': for server in ntp_servers: if server in available_ntp_servers: available_ntp_servers.remove(server) ntp_config_spec = vim.host.NtpConfig() ntp_config_spec.server = available_ntp_servers date_config_spec = vim.host.DateTimeConfig() date_config_spec.ntpConfig = ntp_config_spec try: host_date_time_manager.UpdateDateTimeConfig(date_config_spec) self.results[host.name]['after_change_ntp_servers'] = host_date_time_manager.dateTimeInfo.ntpConfig.server changed = True except vim.fault.HostConfigFault as e: self.results[host.name]['error'] = to_native(e.msg) except Exception as e: self.results[host.name]['error'] = to_native(e) return changed
def __getitem__(self, varname): if varname not in self._templar._available_variables: if varname in self._locals: return self._locals[varname] for i in self._extras: if varname in i: return i[varname] if varname in self._globals: return self._globals[varname] else: raise KeyError("undefined variable: %s" % varname) variable = self._templar._available_variables[varname] # HostVars is special, return it as-is, as is the special variable # 'vars', which contains the vars structure from ansible.vars.hostvars import HostVars if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars) or hasattr(variable, '__UNSAFE__'): return variable else: value = None try: value = self._templar.template(variable) except AnsibleUndefinedVariable: raise except Exception as e: msg = getattr(e, 'message') or to_native(e) raise AnsibleError("An unhandled exception occurred while templating '%s'. " "Error was a %s, original message: %s" % (to_native(variable), type(e), msg)) return value
def create_datacenter(self): folder = self.content.rootFolder changed = False try: if not self.datacenter_obj and not self.module.check_mode: changed = True folder.CreateDatacenter(name=self.datacenter_name) self.module.exit_json(changed=changed) except vim.fault.DuplicateName as duplicate_name: self.module.exit_json(changed=changed) except vim.fault.InvalidName as invalid_name: self.module.fail_json(msg="Specified datacenter name '%s' is an" " invalid name : %s" % (self.datacenter_name, to_native(invalid_name.msg))) except vmodl.fault.NotSupported as not_supported: # This should never happen self.module.fail_json(msg="Trying to create a datacenter '%s' on" " an incorrect folder object : %s" % (self.datacenter_name, to_native(not_supported.msg))) except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault: self.module.fail_json(msg="Failed to create a datacenter" " '%s' due to : %s" % (self.datacenter_name, to_native(runtime_fault.msg))) except Exception as generic_exc: self.module.fail_json(msg="Failed to create a datacenter" " '%s' due to generic error: %s" % (self.datacenter_name, to_native(generic_exc)))
def state_destroy_vswitch(self): """ Function to remove vSwitch from configuration """ results = dict(changed=False, result="") try: self.host_system.configManager.networkSystem.RemoveVirtualSwitch(self.vss.name) results['changed'] = True results['result'] = "vSwitch '%s' removed successfully." % self.vss.name except vim.fault.NotFound as vswitch_not_found: results['result'] = "vSwitch '%s' not available. %s" % (self.switch, to_native(vswitch_not_found.msg)) except vim.fault.ResourceInUse as vswitch_in_use: self.module.fail_json(msg="Failed to remove vSwitch '%s' as vSwitch" " is used by several virtual" " network adapters: %s" % (self.switch, to_native(vswitch_in_use.msg))) except vim.fault.HostConfigFault as host_config_fault: self.module.fail_json(msg="Failed to remove vSwitch '%s' due to host" " configuration fault : %s" % (self.switch, to_native(host_config_fault.msg))) except Exception as generic_exc: self.module.fail_json(msg="Failed to remove vSwitch '%s' due to generic" " exception : %s" % (self.switch, to_native(generic_exc))) self.module.exit_json(**results)
def process_state(self): # Currently state_update_host is not implemented. host_states = { 'absent': { 'present': self.state_remove_host, 'absent': self.state_exit_unchanged, }, 'present': { 'present': self.state_exit_unchanged, 'absent': self.state_add_host, }, 'add_or_reconnect': { 'present': self.state_reconnect_host, 'absent': self.state_add_host, }, 'reconnect': { 'present': self.state_reconnect_host, } } try: host_states[self.state][self.check_host_state()]() except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) except vmodl.MethodFault as method_fault: self.module.fail_json(msg=to_native(method_fault.msg)) except Exception as e: self.module.fail_json(msg=to_native(e))
def _read_config_data(self, path): ''' validate config and set options as appropriate ''' config = {} try: # avoid loader cache so meta: refresh_inventory can pick up config changes # if we read more than once, fs cache should be good enough config = self.loader.load_from_file(path, cache=False) except Exception as e: raise AnsibleParserError(to_native(e)) if not config: # no data raise AnsibleParserError("%s is empty" % (to_native(path))) elif config.get('plugin') != self.NAME: # this is not my config file raise AnsibleParserError("Incorrect plugin name in file: %s" % config.get('plugin', 'none found')) elif not isinstance(config, Mapping): # configs are dictionaries raise AnsibleParserError('inventory source has invalid structure, it should be a dictionary, got: %s' % type(config)) self.set_options(direct=config) if self._options.get('cache'): self._set_cache_options(self._options) return config
def load_configuration(self): if not os.path.isfile(self.src): self.module.fail_json(msg="Source file {} does not exist".format(self.src)) url = self.host.configManager.firmwareSystem.QueryFirmwareConfigUploadURL() url = url.replace('*', self.host.name) # find manually the url if there is a redirect because urllib2 -per RFC- doesn't do automatic redirects for PUT requests try: request = open_url(url=url, method='HEAD', validate_certs=self.validate_certs) except HTTPError as e: url = e.geturl() try: with open(self.src, 'rb') as file: data = file.read() request = open_url(url=url, data=data, method='PUT', validate_certs=self.validate_certs, url_username=self.username, url_password=self.password, force_basic_auth=True) except Exception as e: self.module.fail_json(msg=to_native(e)) if not self.host.runtime.inMaintenanceMode: self.enter_maintenance() try: self.host.configManager.firmwareSystem.RestoreFirmwareConfiguration(force=True) self.module.exit_json(changed=True) except Exception as e: self.exit_maintenance() self.module.fail_json(msg=to_native(e))
def put_file(self, in_path, out_path): """ Transfer a file from local to docker container """ super(Connection, self).put_file(in_path, out_path) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) out_path = self._prefix_login_path(out_path) if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): raise AnsibleFileNotFound( "file or module does not exist: %s" % to_native(in_path)) out_path = shlex_quote(out_path) # Older docker doesn't have native support for copying files into # running containers, so we use docker exec to implement this # Although docker version 1.8 and later provide support, the # owner and group of the files are always set to root args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s" % (out_path, BUFSIZE)]) args = [to_bytes(i, errors='surrogate_or_strict') for i in args] with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: try: p = subprocess.Popen(args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: raise AnsibleError("docker connection requires dd command in the container to put files") stdout, stderr = p.communicate() if p.returncode != 0: raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
def get_config(self, xml_str): """ get_config """ con_obj = None try: con_obj = self.mc.get(filter=xml_str) except RPCError as err: self._module.fail_json(msg='Error: %s' % to_native(err).replace("\r\n", "")) set_id = get_nc_set_id(con_obj.xml) if not set_id: return con_obj.xml # continue to get next xml_str = con_obj.xml while set_id: set_attr = dict() set_attr["set-id"] = str(set_id) xsd_fetch = xml_.new_ele_ns('get-next', "http://www.huawei.com/netconf/capability/base/1.0", set_attr) # get next data try: con_obj_next = self.mc.dispatch(xsd_fetch) except RPCError as err: self._module.fail_json(msg='Error: %s' % to_native(err).replace("\r\n", "")) if "<data/>" in con_obj_next.xml: break # merge two xml data xml_str = merge_nc_xml(xml_str, con_obj_next.xml) set_id = get_nc_set_id(con_obj_next.xml) return xml_str
def create_or_update_bucket_cors(connection, module): name = module.params.get("name") rules = module.params.get("rules", []) changed = False try: current_camel_rules = connection.get_bucket_cors(Bucket=name)['CORSRules'] except ClientError: current_camel_rules = [] new_camel_rules = snake_dict_to_camel_dict(rules, capitalize_first=True) # compare_policies() takes two dicts and makes them hashable for comparison if compare_policies(new_camel_rules, current_camel_rules): changed = True if changed: try: cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={'CORSRules': new_camel_rules}) except ClientError as e: module.fail_json( msg="Unable to update CORS for bucket {0}: {1}".format(name, to_native(e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response) ) except BotoCoreError as e: module.fail_json( msg=to_native(e), exception=traceback.format_exc() ) module.exit_json(changed=changed, name=name, rules=rules)
def ensure(self): """ Function to manage internal VMKernel management Returns: NA """ host_vmk_states = { 'absent': { 'update': self.host_vmk_delete, 'present': self.host_vmk_delete, 'absent': self.host_vmk_unchange, }, 'present': { 'update': self.host_vmk_update, 'present': self.host_vmk_unchange, 'absent': self.host_vmk_create, } } try: host_vmk_states[self.module.params['state']][self.check_state()]() except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) except vmodl.MethodFault as method_fault: self.module.fail_json(msg=to_native(method_fault.msg)) except Exception as e: self.module.fail_json(msg=to_native(e))
def snapshot_vm(self, vm): memory_dump = False quiesce = False # Check if there is a latest snapshot already present as specified by user if vm.snapshot is not None: snap_obj = self.get_snapshots_by_name_recursively(vm.snapshot.rootSnapshotList, self.module.params["snapshot_name"]) if snap_obj: # Snapshot already exists, do not anything. self.module.exit_json(changed=False, msg="Snapshot named [%(snapshot_name)s] already exists and is current." % self.module.params) # Check if Virtual Machine provides capabilities for Quiesce and Memory Snapshots if vm.capability.quiescedSnapshotsSupported: quiesce = self.module.params['quiesce'] if vm.capability.memorySnapshotsSupported: memory_dump = self.module.params['memory_dump'] task = None try: task = vm.CreateSnapshot(self.module.params["snapshot_name"], self.module.params["description"], memory_dump, quiesce) except vim.fault.RestrictedVersion as exc: self.module.fail_json(msg="Failed to take snapshot due to VMware Licence" " restriction : %s" % to_native(exc.msg)) except Exception as exc: self.module.fail_json(msg="Failed to create snapshot of virtual machine" " %s due to %s" % (self.module.params['name'], to_native(exc))) return task
def main(): module = AnsibleModule( argument_spec=dict( datacenter=dict(), server=dict(), name=dict(), size=dict(type='int', default=10), bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), image=dict(), image_password=dict(default=None, no_log=True), ssh_keys=dict(type='list', default=[]), disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), licence_type=dict(default='UNKNOWN'), count=dict(type='int', default=1), auto_increment=dict(type='bool', default=True), instance_ids=dict(type='list', default=[]), subscription_user=dict(), subscription_password=dict(no_log=True), wait=dict(type='bool', default=True), wait_timeout=dict(type='int', default=600), state=dict(default='present'), ) ) if not module.params.get('subscription_user'): module.fail_json(msg='subscription_user parameter is required') if not module.params.get('subscription_password'): module.fail_json(msg='subscription_password parameter is required') subscription_user = module.params.get('subscription_user') subscription_password = module.params.get('subscription_password') profitbricks = ProfitBricksService( username=subscription_user, password=subscription_password) state = module.params.get('state') if state == 'absent': if not module.params.get('datacenter'): module.fail_json(msg='datacenter parameter is required for running or stopping machines.') try: (changed) = delete_volume(module, profitbricks) module.exit_json(changed=changed) except Exception as e: module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) elif state == 'present': if not module.params.get('datacenter'): module.fail_json(msg='datacenter parameter is required for new instance') if not module.params.get('name'): module.fail_json(msg='name parameter is required for new instance') try: (volume_dict_array) = create_volume(module, profitbricks) module.exit_json(**volume_dict_array) except Exception as e: module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
def absent(module, dest, regexp, line, backup): b_dest = to_bytes(dest, errors='surrogate_or_strict') if not os.path.exists(b_dest): module.exit_json(changed=False, msg="file not present") msg = '' diff = {'before': '', 'after': '', 'before_header': '%s (content)' % dest, 'after_header': '%s (content)' % dest} f = open(b_dest, 'rb') b_lines = f.readlines() f.close() if module._diff: diff['before'] = to_native(b('').join(b_lines)) if regexp is not None: bre_c = re.compile(to_bytes(regexp, errors='surrogate_or_strict')) found = [] b_line = to_bytes(line, errors='surrogate_or_strict') def matcher(b_cur_line): if regexp is not None: match_found = bre_c.search(b_cur_line) else: match_found = b_line == b_cur_line.rstrip(b('\r\n')) if match_found: found.append(b_cur_line) return not match_found b_lines = [l for l in b_lines if matcher(l)] changed = len(found) > 0 if module._diff: diff['after'] = to_native(b('').join(b_lines)) backupdest = "" if changed and not module.check_mode: if backup: backupdest = module.backup_local(dest) write_changes(module, b_lines, dest) if changed: msg = "%s line(s) removed" % len(found) attr_diff = {} msg, changed = check_file_attrs(module, changed, msg, attr_diff) attr_diff['before_header'] = '%s (file attributes)' % dest attr_diff['after_header'] = '%s (file attributes)' % dest difflist = [diff, attr_diff] module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest, diff=difflist)
def test_utf8_output(self, mocker, rc_am): rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(u'Žarn§'.encode('utf-8')) rc_am._os._cmd_out[mocker.sentinel.stderr] = BytesIO(u'لرئيسية'.encode('utf-8')) (rc, stdout, stderr) = rc_am.run_command('/bin/something_ugly') assert rc == 0 # module_utils function. On py3 it returns text and py2 it returns # bytes because it's returning native strings assert stdout == to_native(u'Žarn§') assert stderr == to_native(u'لرئيسية')
def host_vmk_create(self): """ Function to create VMKernel Returns: NA """ results = dict(changed=False, result='') vnic_config = vim.host.VirtualNic.Specification() ip_spec = vim.host.IpConfig() if self.network_type == 'dhcp': ip_spec.dhcp = True else: ip_spec.dhcp = False ip_spec.ipAddress = self.ip_address ip_spec.subnetMask = self.subnet_mask vnic_config.ip = ip_spec vnic_config.mtu = self.mtu vmk_device = None try: vmk_device = self.esxi_host_obj.configManager.networkSystem.AddVirtualNic(self.port_group_name, vnic_config) results['changed'] = True results['result'] = vmk_device self.vnic = self.get_vmkernel(port_group_name=self.port_group_name) except vim.fault.AlreadyExists as already_exists: self.module.fail_json(msg="Failed to add vmk as portgroup already has a " "virtual network adapter %s" % to_native(already_exists.msg)) except vim.fault.HostConfigFault as host_config_fault: self.module.fail_json(msg="Failed to add vmk due to host config " "issues : %s" % to_native(host_config_fault.msg)) except vim.fault.InvalidState as invalid_state: self.module.fail_json(msg="Failed to add vmk as ipv6 address is specified in an " "ipv4 only system : %s" % to_native(invalid_state.msg)) except vmodl.fault.InvalidArgument as invalid_arg: self.module.fail_json(msg="Failed to add vmk as IP address or Subnet Mask in the IP " "configuration are invalid or PortGroup " "does not exist : %s" % to_native(invalid_arg.msg)) except Exception as e: self.module.fail_json(msg="Failed to add vmk due to general " "exception : %s" % to_native(e)) # VSAN if self.enable_vsan: results['changed'], results['result'] = self.set_vsan_service_type() # Other service type host_vnic_manager = self.esxi_host_obj.configManager.virtualNicManager if self.enable_vmotion: results['changed'] = self.set_service_type(host_vnic_manager, self.vnic, 'vmotion') if self.enable_mgmt: results['changed'] = self.set_service_type(host_vnic_manager, self.vnic, 'management') if self.enable_ft: results['changed'] = self.set_service_type(host_vnic_manager, self.vnic, 'faultToleranceLogging') self.module.exit_json(**results)
def construct_ip_configuration_set(self, raw): configurations = [str(dict( private_ip_allocation_method=to_native(item.get('private_ip_allocation_method')), public_ip_address_name=(to_native(item.get('public_ip_address').get('name')) if item.get('public_ip_address') else to_native(item.get('public_ip_address_name'))), primary=item.get('primary'), name=to_native(item.get('name')) )) for item in raw] return set(configurations)
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params): try: return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) except ValueError as e: module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e)) except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, botocore.exceptions.NoCredentialsError) as e: module.fail_json(msg=to_native(e)) except botocore.exceptions.NoRegionError as e: module.fail_json(msg="The %s module requires a region and none was found in configuration, " "environment variables or module parameters" % module._name)
def complete_cd(self, text, line, begidx, endidx): mline = line.partition(' ')[2] offs = len(mline) - len(text) if self.options.cwd in ('all','*','\\'): completions = self.hosts + self.groups else: completions = [x.name for x in self.inventory.list_hosts(self.options.cwd)] return [to_native(s)[offs:] for s in completions if to_native(s).startswith(to_native(mline))]
def destroy_role(connection, module): params = dict() params['RoleName'] = module.params.get('name') if get_role(connection, module, params['RoleName']): # We need to remove any instance profiles from the role before we delete it try: instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'])['InstanceProfiles'] except ClientError as e: module.fail_json(msg="Unable to list instance profiles for role {0}: {1}".format(params['RoleName'], to_native(e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except BotoCoreError as e: module.fail_json(msg="Unable to list instance profiles for role {0}: {1}".format(params['RoleName'], to_native(e)), exception=traceback.format_exc()) # Now remove the role from the instance profile(s) for profile in instance_profiles: try: if not module.check_mode: connection.remove_role_from_instance_profile(InstanceProfileName=profile['InstanceProfileName'], RoleName=params['RoleName']) except ClientError as e: module.fail_json(msg="Unable to remove role {0} from instance profile {1}: {2}".format( params['RoleName'], profile['InstanceProfileName'], to_native(e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except BotoCoreError as e: module.fail_json(msg="Unable to remove role {0} from instance profile {1}: {2}".format( params['RoleName'], profile['InstanceProfileName'], to_native(e)), exception=traceback.format_exc()) # Now remove any attached policies otherwise deletion fails try: for policy in get_attached_policy_list(connection, module, params['RoleName']): if not module.check_mode: connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy['PolicyArn']) except ClientError as e: module.fail_json(msg="Unable to detach policy {0} from role {1}: {2}".format(policy['PolicyArn'], params['RoleName'], to_native(e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except BotoCoreError as e: module.fail_json(msg="Unable to detach policy {0} from role {1}: {2}".format(policy['PolicyArn'], params['RoleName'], to_native(e)), exception=traceback.format_exc()) try: if not module.check_mode: connection.delete_role(**params) except ClientError as e: module.fail_json(msg="Unable to delete role: {0}".format(to_native(e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except BotoCoreError as e: module.fail_json(msg="Unable to delete role: {0}".format(to_native(e)), exception=traceback.format_exc()) else: module.exit_json(changed=False) module.exit_json(changed=True)
def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) out_path = self._shell._unquote(out_path) display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host) if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path)) script_template = u''' begin {{ $path = '{0}' $DebugPreference = "Continue" $ErrorActionPreference = "Stop" Set-StrictMode -Version 2 $fd = [System.IO.File]::Create($path) $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create() $bytes = @() #initialize for empty file case }} process {{ $bytes = [System.Convert]::FromBase64String($input) $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null $fd.Write($bytes, 0, $bytes.Length) }} end {{ $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant() $fd.Close() Write-Output "{{""sha1"":""$hash""}}" }} ''' script = script_template.format(self._shell._escape(out_path)) cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path)) # TODO: improve error handling if result.status_code != 0: raise AnsibleError(to_native(result.std_err)) put_output = json.loads(result.std_out) remote_sha1 = put_output.get("sha1") if not remote_sha1: raise AnsibleError("Remote sha1 was not returned") local_sha1 = secure_hash(in_path) if not remote_sha1 == local_sha1: raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1)))
def main(): module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), size=dict(default=4096, type='int'), type=dict(default='RSA', choices=['RSA', 'DSA'], type='str'), force=dict(default=False, type='bool'), path=dict(required=True, type='path'), passphrase=dict(type='str', no_log=True), cipher=dict(type='str'), ), supports_check_mode=True, add_file_common_args=True, required_together=[['cipher', 'passphrase']], ) if not pyopenssl_found: module.fail_json(msg='the python pyOpenSSL module is required') base_dir = os.path.dirname(module.params['path']) if not os.path.isdir(base_dir): module.fail_json( name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir ) private_key = PrivateKey(module) if private_key.state == 'present': if module.check_mode: result = private_key.dump() result['changed'] = module.params['force'] or not private_key.check(module) module.exit_json(**result) try: private_key.generate(module) except PrivateKeyError as exc: module.fail_json(msg=to_native(exc)) else: if module.check_mode: result = private_key.dump() result['changed'] = os.path.exists(module.params['path']) module.exit_json(**result) try: private_key.remove() except PrivateKeyError as exc: module.fail_json(msg=to_native(exc)) result = private_key.dump() module.exit_json(**result)
def get_role(connection, module, name): try: return connection.get_role(RoleName=name)['Role'] except ClientError as e: if e.response['Error']['Code'] == 'NoSuchEntity': return None else: module.fail_json(msg="Unable to get role {0}: {1}".format(name, to_native(e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except BotoCoreError as e: module.fail_json(msg="Unable to get role {0}: {1}".format(name, to_native(e)), exception=traceback.format_exc())
def copy_file_from_remote(module, local, local_file_directory, file_system='bootflash:'): hostname = module.params['host'] username = module.params['username'] password = module.params['password'] port = module.params['connect_ssh_port'] try: child = pexpect.spawn('ssh ' + username + '@' + hostname + ' -p' + str(port)) # response could be unknown host addition or Password index = child.expect(['yes', '(?i)Password']) if index == 0: child.sendline('yes') child.expect('(?i)Password') child.sendline(password) child.expect('#') ldir = '/' if local_file_directory: dir_array = local_file_directory.split('/') for each in dir_array: if each: child.sendline('mkdir ' + ldir + each) child.expect('#') ldir += each + '/' cmdroot = 'copy scp://' ruser = module.params['remote_scp_server_user'] + '@' rserver = module.params['remote_scp_server'] rfile = module.params['remote_file'] + ' ' vrf = ' vrf management' command = (cmdroot + ruser + rserver + rfile + file_system + ldir + local + vrf) child.sendline(command) # response could be remote host connection time out, # there is already an existing file with the same name, # unknown host addition or password index = child.expect(['timed out', 'existing', 'yes', '(?i)password'], timeout=180) if index == 0: module.fail_json(msg='Timeout occured due to remote scp server not responding') elif index == 1: child.sendline('y') # response could be unknown host addition or Password sub_index = child.expect(['yes', '(?i)password']) if sub_index == 0: child.sendline('yes') child.expect('(?i)password') elif index == 2: child.sendline('yes') child.expect('(?i)password') child.sendline(module.params['remote_scp_server_password']) fpt = module.params['file_pull_timeout'] # response could be that there is no space left on device, # permission denied due to wrong user/password, # remote file non-existent or success, # timeout due to large file transfer or network too slow, # success index = child.expect(['No space', 'Permission denied', 'No such file', pexpect.TIMEOUT, '#'], timeout=fpt) if index == 0: module.fail_json(msg='File copy failed due to no space left on the device') elif index == 1: module.fail_json(msg='Username/Password for remote scp server is wrong') elif index == 2: module.fail_json(msg='File copy failed due to remote file not present') elif index == 3: module.fail_json(msg='Timeout occured, please increase "file_pull_timeout" and try again!') except pexpect.ExceptionPexpect as e: module.fail_json(msg='%s' % to_native(e), exception=traceback.format_exc()) child.close()
def test_wait_import_task_with_failure(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch): api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins) if token_ins: mock_token_get = MagicMock() mock_token_get.return_value = 'my token' monkeypatch.setattr(token_ins, 'get', mock_token_get) mock_open = MagicMock() mock_open.side_effect = [ StringIO(to_text(json.dumps({ 'finished_at': 'some_time', 'state': 'failed', 'error': { 'code': 'GW001', 'description': u'Becäuse I said so!', }, 'messages': [ { 'level': 'error', 'message': u'Somé error', }, { 'level': 'warning', 'message': u'Some wärning', }, { 'level': 'info', 'message': u'Somé info', }, ], }))), ] monkeypatch.setattr(galaxy_api, 'open_url', mock_open) mock_display = MagicMock() monkeypatch.setattr(Display, 'display', mock_display) mock_vvv = MagicMock() monkeypatch.setattr(Display, 'vvv', mock_vvv) mock_warn = MagicMock() monkeypatch.setattr(Display, 'warning', mock_warn) mock_err = MagicMock() monkeypatch.setattr(Display, 'error', mock_err) expected = to_native(u'Galaxy import process failed: Becäuse I said so! (Code: GW001)') with pytest.raises(AnsibleError, match=re.escape(expected)): api.wait_import_task(import_uri) assert mock_open.call_count == 1 assert mock_open.mock_calls[0][1][0] == full_import_uri assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type assert mock_display.call_count == 1 assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri assert mock_vvv.call_count == 1 assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: info - Somé info' assert mock_warn.call_count == 1 assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning' assert mock_err.call_count == 1 assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error'
def send_key_to_vm(self, vm_obj): key_event = None num_keys_returned = 0 if self.params['keys_send']: for specified_key in self.params['keys_send']: key_found = False for keys in self.keys_hid_code: if (isinstance(keys[0], tuple) and specified_key in keys[0]) or \ (not isinstance(keys[0], tuple) and specified_key == keys[0]): hid_code, modifiers = self.get_hid_from_key(specified_key) key_event = self.get_key_event(hid_code, modifiers) self.usb_scan_code_spec.keyEvents.append(key_event) self.num_keys_send += 1 key_found = True break if not key_found: self.module.fail_json(msg="keys_send parameter: '%s' in %s not supported." % (specified_key, self.params['keys_send'])) if self.params['string_send']: for char in self.params['string_send']: key_found = False for keys in self.keys_hid_code: if (isinstance(keys[0], tuple) and char in keys[0]) or char == ' ': hid_code, modifiers = self.get_hid_from_key(char) key_event = self.get_key_event(hid_code, modifiers) self.usb_scan_code_spec.keyEvents.append(key_event) self.num_keys_send += 1 key_found = True break if not key_found: self.module.fail_json(msg="string_send parameter: '%s' contains char: '%s' not supported." % (self.params['string_send'], char)) if self.usb_scan_code_spec.keyEvents: try: num_keys_returned = vm_obj.PutUsbScanCodes(self.usb_scan_code_spec) self.change_detected = True except vmodl.RuntimeFault as e: self.module.fail_json(msg="Failed to send key %s to virtual machine due to %s" % (key_event, to_native(e.msg))) sendkey_facts = self.get_sendkey_facts(vm_obj, num_keys_returned) if num_keys_returned != self.num_keys_send: results = {'changed': self.change_detected, 'failed': True, 'sendkey_info': sendkey_facts} else: results = {'changed': self.change_detected, 'failed': False, 'sendkey_info': sendkey_facts} return results
def connect_to_db(module, autocommit=False, fail_on_conn=True, warn_db_default=True): ensure_required_libs(module) # To use defaults values, keyword arguments must be absent, so # check which values are empty and don't include in the **kw # dictionary params_map = { "login_host": "host", "login_user": "******", "login_password": "******", "port": "port", "ssl_mode": "sslmode", "ca_cert": "sslrootcert" } # Might be different in the modules: if module.params.get('db'): params_map['db'] = 'database' elif module.params.get('database'): params_map['database'] = 'database' elif module.params.get('login_db'): params_map['login_db'] = 'database' else: if warn_db_default: module.warn('Database name has not been passed, ' 'used default database to connect to.') kw = dict((params_map[k], v) for (k, v) in iteritems(module.params) if k in params_map and v != '' and v is not None) # If a login_unix_socket is specified, incorporate it here. is_localhost = "host" not in kw or kw["host"] is None or kw[ "host"] == "localhost" if is_localhost and module.params["login_unix_socket"] != "": kw["host"] = module.params["login_unix_socket"] try: db_connection = psycopg2.connect(**kw) if autocommit: if psycopg2.__version__ >= '2.4.2': db_connection.set_session(autocommit=True) else: db_connection.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) # Switch role, if specified: cursor = db_connection.cursor(cursor_factory=DictCursor) if module.params.get('session_role'): try: cursor.execute('SET ROLE %s' % module.params['session_role']) except Exception as e: module.fail_json(msg="Could not switch role: %s" % to_native(e)) cursor.close() except TypeError as e: if 'sslrootcert' in e.args[0]: module.fail_json(msg='Postgresql server must be at least ' 'version 8.4 to support sslrootcert') if fail_on_conn: module.fail_json(msg="unable to connect to database: %s" % to_native(e)) else: module.warn("PostgreSQL server is unavailable: %s" % to_native(e)) db_connection = None except Exception as e: if fail_on_conn: module.fail_json(msg="unable to connect to database: %s" % to_native(e)) else: module.warn("PostgreSQL server is unavailable: %s" % to_native(e)) db_connection = None return db_connection
def is_present(name): """Checks if the given locale is currently installed.""" output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0] output = to_native(output) return any( fix_case(name) == fix_case(line) for line in output.splitlines())
def test_extract_tar_file_missing_member(tmp_tarfile): temp_dir, tfile, dummy, dummy = tmp_tarfile expected = "Collection tar at '%s' does not contain the expected file 'missing'." % to_native( tfile.name) with pytest.raises(AnsibleError, match=expected): collection._extract_tar_file(tfile, 'missing', temp_dir, temp_dir)
def main(): # initialize module = AnsibleModule( argument_spec = dict( name = dict(required=True, type='str', aliases=['unit', 'service']), state = dict(choices=[ 'started', 'stopped', 'restarted', 'reloaded'], type='str'), enabled = dict(type='bool'), masked = dict(type='bool'), daemon_reload= dict(type='bool', default=False, aliases=['daemon-reload']), user= dict(type='bool', default=False), ), supports_check_mode=True, required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']], ) systemctl = module.get_bin_path('systemctl') if module.params['user']: systemctl = systemctl + " --user" unit = module.params['name'] rc = 0 out = err = '' result = { 'name': unit, 'changed': False, 'status': {}, 'warnings': [], } # Run daemon-reload first, if requested if module.params['daemon_reload']: (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl)) if rc != 0: module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err)) found = False is_initd = sysv_exists(unit) is_systemd = False # check service data, cannot error out on rc as it changes across versions, assume not found (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit)) if rc == 0: # load return of systemctl show into dictionary for easy access and return multival = [] if out: k = None for line in to_native(out).split('\n'): # systemd can have multiline values delimited with {} if line.strip(): if k is None: if '=' in line: k,v = line.split('=', 1) if v.lstrip().startswith('{'): if not v.rstrip().endswith('}'): multival.append(line) continue result['status'][k] = v.strip() k = None else: if line.rstrip().endswith('}'): result['status'][k] = '\n'.join(multival).strip() multival = [] k = None else: multival.append(line) is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found' # Check for loading error if is_systemd and 'LoadError' in result['status']: module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError'])) # Does service exist? found = is_systemd or is_initd if is_initd and not is_systemd: result['warnings'].append('The service (%s) is actually an init script but the system is managed by systemd' % unit) # mask/unmask the service, if requested, can operate on services before they are installed if module.params['masked'] is not None: # state is not masked unless systemd affirms otherwise masked = ('LoadState' in result['status'] and result['status']['LoadState'] == 'masked') if masked != module.params['masked']: result['changed'] = True if module.params['masked']: action = 'mask' else: action = 'unmask' if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't fail_if_missing(module, found, unit, msg='host') # Enable/disable service startup at boot if requested if module.params['enabled'] is not None: if module.params['enabled']: action = 'enable' else: action = 'disable' fail_if_missing(module, found, unit, msg='host') # do we need to enable the service? enabled = False (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit)) # check systemctl result or if it is a init script if rc == 0: enabled = True elif rc == 1: # if both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries if is_initd and (not out.startswith('disabled') or sysv_is_enabled(unit)): enabled = True # default to current state result['enabled'] = enabled # Change enable/disable if needed if enabled != module.params['enabled']: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err)) result['enabled'] = not enabled # set service state if requested if module.params['state'] is not None: fail_if_missing(module, found, unit, msg="host") # default to desired state result['state'] = module.params['state'] # What is current service state? if 'ActiveState' in result['status']: action = None if module.params['state'] == 'started': if result['status']['ActiveState'] != 'active': action = 'start' elif module.params['state'] == 'stopped': if result['status']['ActiveState'] == 'active': action = 'stop' else: action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded result['state'] = 'started' if action: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err)) else: # this should not happen? module.fail_json(msg="Service is in unknown state", status=result['status']) module.exit_json(**result)
def grafana_create_dashboard(module, data): # define data payload for grafana API payload = {} if data.get('dashboard_id'): data[ 'path'] = "https://grafana.com/api/dashboards/%s/revisions/%s/download" % ( data['dashboard_id'], data['dashboard_revision']) if data['path'].startswith('http'): r, info = fetch_url(module, data['path']) if info['status'] != 200: raise GrafanaAPIException( 'Unable to download grafana dashboard from url %s : %s' % (data['path'], info)) payload = json.loads(r.read()) else: try: with open(data['path'], 'r') as json_file: payload = json.load(json_file) except Exception as e: raise GrafanaAPIException("Can't load json file %s" % to_native(e)) # Check that the dashboard JSON is nested under the 'dashboard' key if 'dashboard' not in payload: payload = {'dashboard': payload} # define http header headers = grafana_headers(module, data) grafana_version = get_grafana_version(module, data['grafana_url'], headers) if grafana_version < 5: if data.get('slug'): uid = data['slug'] elif 'meta' in payload and 'slug' in payload['meta']: uid = payload['meta']['slug'] else: raise GrafanaMalformedJson( 'No slug found in json. Needed with grafana < 5') else: if data.get('uid'): uid = data['uid'] elif 'uid' in payload['dashboard']: uid = payload['dashboard']['uid'] else: uid = None result = {} # test if the folder exists if grafana_version >= 5: folder_exists, folder_id = grafana_folder_exists( module, data['grafana_url'], data['folder'], headers) if folder_exists is False: result['msg'] = "Dashboard folder '%s' does not exist." % data[ 'folder'] result['uid'] = uid result['changed'] = False return result payload['folderId'] = folder_id # test if dashboard already exists if uid: dashboard_exists, dashboard = grafana_dashboard_exists( module, data['grafana_url'], uid, headers=headers) else: dashboard_exists, dashboard = grafana_dashboard_search( module, data['grafana_url'], folder_id, payload['dashboard']['title'], headers=headers) if dashboard_exists is True: if grafana_dashboard_changed(payload, dashboard): # update if 'overwrite' in data and data['overwrite']: payload['overwrite'] = True if 'commit_message' in data and data['commit_message']: payload['message'] = data['commit_message'] r, info = fetch_url(module, '%s/api/dashboards/db' % data['grafana_url'], data=json.dumps(payload), headers=headers, method='POST') if info['status'] == 200: if grafana_version >= 5: try: dashboard = json.loads(r.read()) uid = dashboard['uid'] except Exception as e: raise GrafanaAPIException(e) result['uid'] = uid result['msg'] = "Dashboard %s updated" % payload['dashboard'][ 'title'] result['changed'] = True else: body = json.loads(info['body']) raise GrafanaAPIException( 'Unable to update the dashboard %s : %s (HTTP: %d)' % (uid, body['commit_message'], info['status'])) else: # unchanged result['uid'] = uid result['msg'] = "Dashboard %s unchanged." % payload['dashboard'][ 'title'] result['changed'] = False else: # create if folder_exists is True: payload['folderId'] = folder_id # Ensure there is no id in payload if 'id' in payload['dashboard']: del payload['dashboard']['id'] r, info = fetch_url(module, '%s/api/dashboards/db' % data['grafana_url'], data=json.dumps(payload), headers=headers, method='POST') if info['status'] == 200: result[ 'msg'] = "Dashboard %s created" % payload['dashboard']['title'] result['changed'] = True if grafana_version >= 5: try: dashboard = json.loads(r.read()) uid = dashboard['uid'] except Exception as e: raise GrafanaAPIException(e) result['uid'] = uid else: raise GrafanaAPIException( 'Unable to create the new dashboard %s : %s - %s. (headers : %s)' % (payload['dashboard']['title'], info['status'], info, headers)) return result
def _populate(self): for host in self._get_hosts(): if host.get('name'): host_name = self.inventory.add_host(host['name']) # create directly mapped groups group_name = host.get('hostgroup_title', host.get('hostgroup_name')) if group_name: group_name = to_safe_group_name( '%s%s' % (self.get_option('group_prefix'), group_name.lower().replace(" ", ""))) group_name = self.inventory.add_group(group_name) self.inventory.add_child(group_name, host_name) if self.get_option('legacy_hostvars'): hostvars = self._get_hostvars(host) self.inventory.set_variable(host_name, 'foreman', hostvars) else: omitted_vars = ('name', 'hostgroup_title', 'hostgroup_name') hostvars = self._get_hostvars( host, self.get_option('vars_prefix'), omitted_vars) for k, v in hostvars.items(): try: self.inventory.set_variable(host_name, k, v) except ValueError as e: self.display.warning( "Could not set host info hostvar for %s, skipping %s: %s" % (host, k, to_text(e))) # set host vars from params if self.get_option('want_params'): params = self._get_all_params_by_id(host['id']) filtered_params = {} for p in params: if 'name' in p and 'value' in p: filtered_params[p['name']] = p['value'] if self.get_option('legacy_hostvars'): self.inventory.set_variable(host_name, 'foreman_params', filtered_params) else: for k, v in filtered_params.items(): try: self.inventory.set_variable( host_name, p['name'], p['value']) except ValueError as e: self.display.warning( "Could not set hostvar %s to '%s' for the '%s' host, skipping: %s" % (p['name'], to_native( p['value']), host, to_native(e))) # set host vars from facts if self.get_option('want_facts'): self.inventory.set_variable(host_name, 'foreman_facts', self._get_facts(host)) # create group for host collections if self.get_option('want_hostcollections'): host_data = self._get_host_data_by_id(host['id']) hostcollections = host_data.get('host_collections') if hostcollections: # Create Ansible groups for host collections for hostcollection in hostcollections: try: hostcollection_group = to_safe_group_name( '%shostcollection_%s' % (self.get_option('group_prefix'), hostcollection['name'].lower().replace( " ", ""))) hostcollection_group = self.inventory.add_group( hostcollection_group) self.inventory.add_child( hostcollection_group, host_name) except ValueError as e: self.display.warning( "Could not create groups for host collections for %s, skipping: %s" % (host_name, to_text(e))) strict = self.get_option('strict') hostvars = self.inventory.get_host(host_name).get_vars() self._set_composite_vars(self.get_option('compose'), hostvars, host_name, strict) self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host_name, strict) self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host_name, strict)
def http_request(self, req): tmp_ca_cert_path, to_add_ca_cert_path, paths_checked = self.get_ca_certs() https_proxy = os.environ.get('https_proxy') context = None if HAS_SSLCONTEXT: context = self._make_context(to_add_ca_cert_path) # Detect if 'no_proxy' environment variable is set and if our URL is included use_proxy = self.detect_no_proxy(req.get_full_url()) if not use_proxy: # ignore proxy settings for this host request return req try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if https_proxy: proxy_parts = generic_urlparse(urlparse(https_proxy)) port = proxy_parts.get('port') or 443 s.connect((proxy_parts.get('hostname'), port)) if proxy_parts.get('scheme') == 'http': s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port)) if proxy_parts.get('username'): credentials = "%s:%s" % (proxy_parts.get('username',''), proxy_parts.get('password','')) s.sendall(b('Proxy-Authorization: Basic %s\r\n') % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip()) s.sendall(b('\r\n')) connect_result = b("") while connect_result.find(b("\r\n\r\n")) <= 0: connect_result += s.recv(4096) # 128 kilobytes of headers should be enough for everyone. if len(connect_result) > 131072: raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.') self.validate_proxy_response(connect_result) if context: ssl_s = context.wrap_socket(s, server_hostname=self.hostname) elif HAS_URLLIB3_SNI_SUPPORT: ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname) else: ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) else: raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) else: s.connect((self.hostname, self.port)) if context: ssl_s = context.wrap_socket(s, server_hostname=self.hostname) elif HAS_URLLIB3_SNI_SUPPORT: ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname) else: ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) # close the ssl connection #ssl_s.unwrap() s.close() except (ssl.SSLError, CertificateError): e = get_exception() build_ssl_validation_error(self.hostname, self.port, paths_checked, e) except socket.error: e = get_exception() raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e))) try: # cleanup the temp file created, don't worry # if it fails for some reason os.remove(tmp_ca_cert_path) except: pass try: # cleanup the temp file created, don't worry # if it fails for some reason if to_add_ca_cert_path: os.remove(to_add_ca_cert_path) except: pass return req
def deploy_vm_from_template(self, power_on=False): # Find the datacenter by the given datacenter name self.datacenter_id = self.get_datacenter_by_name( datacenter_name=self.datacenter) if not self.datacenter_id: self.module.fail_json(msg="Failed to find the datacenter %s" % self.datacenter) # Find the datastore by the given datastore name self.datastore_id = self.get_datastore_by_name(self.datacenter, self.datastore) if not self.datastore_id: self.module.fail_json(msg="Failed to find the datastore %s" % self.datastore) # Find the LibraryItem (Template) by the given LibraryItem name if self.content_library_name: self.library_item_id = self.get_library_item_from_content_library_name( self.template_name, self.content_library_name) if not self.library_item_id: self.module.fail_json( msg= "Failed to find the library Item %s in content library %s" % (self.template_name, self.content_library_name)) else: self.library_item_id = self.get_library_item_by_name( self.template_name) if not self.library_item_id: self.module.fail_json( msg="Failed to find the library Item %s" % self.template_name) # Find the folder by the given folder name self.folder_id = self.get_folder_by_name(self.datacenter, self.folder) if not self.folder_id: self.module.fail_json(msg="Failed to find the folder %s" % self.folder) # Find the Host by given HostName self.host_id = self.get_host_by_name(self.datacenter, self.host) if not self.host_id: self.module.fail_json(msg="Failed to find the Host %s" % self.host) # Find the resourcepool by the given resourcepool name self.resourcepool_id = None if self.resourcepool: self.resourcepool_id = self.get_resource_pool_by_name( self.datacenter, self.resourcepool) if not self.resourcepool_id: self.module.fail_json( msg="Failed to find the resource_pool %s" % self.resourcepool) # Find the Cluster by the given Cluster name self.cluster_id = None if self.cluster: self.cluster_id = self.get_cluster_by_name(self.datacenter, self.cluster) if not self.cluster_id: self.module.fail_json(msg="Failed to find the Cluster %s" % self.cluster) # Create VM placement specs self.placement_spec = LibraryItems.DeployPlacementSpec( folder=self.folder_id, host=self.host_id) if self.resourcepool_id or self.cluster_id: self.placement_spec.resource_pool = self.resourcepool_id self.placement_spec.cluster = self.cluster_id self.vm_home_storage_spec = LibraryItems.DeploySpecVmHomeStorage( datastore=to_native(self.datastore_id)) self.disk_storage_spec = LibraryItems.DeploySpecDiskStorage( datastore=to_native(self.datastore_id)) self.deploy_spec = LibraryItems.DeploySpec( name=self.vm_name, placement=self.placement_spec, vm_home_storage=self.vm_home_storage_spec, disk_storage=self.disk_storage_spec, powered_on=power_on) vm_id = '' try: vm_id = self.template_service.deploy(self.library_item_id, self.deploy_spec) except Error as error: self.module.fail_json(msg="%s" % self.get_error_message(error)) except Exception as err: self.module.fail_json(msg="%s" % to_native(err)) if vm_id: self.module.exit_json(changed=True, vm_deploy_info=dict( msg="Deployed Virtual Machine '%s'." % self.vm_name, vm_id=vm_id, )) self.module.exit_json(changed=False, vm_deploy_info=dict( msg="Virtual Machine deployment failed", vm_id=vm_id))
def main(): module = AnsibleModule( argument_spec=dict( xml=dict(type='str', required=False), src=dict(type='path', required=False), datastore=dict(choices=['auto', 'candidate', 'running'], default='auto'), save=dict(type='bool', default=False), # connection arguments host=dict(type='str'), port=dict(type='int', default=830), username=dict(type='str', no_log=True), password=dict(type='str', no_log=True), hostkey_verify=dict(type='bool', default=True), look_for_keys=dict(type='bool', default=True), allow_agent=dict(type='bool', default=True), ), mutually_exclusive=[('xml', 'src')]) if not module._socket_path and not HAS_NCCLIENT: module.fail_json(msg='could not import the python library ' 'ncclient required by this module') if (module.params['src']): config_xml = str(module.params['src']) elif module.params['xml']: config_xml = str(module.params['xml']) else: module.fail_json(msg='Option src or xml must be provided') local_connection = module._socket_path is None if not local_connection: m = Connection(module._socket_path) capabilities = module.from_json(m.get_capabilities()) server_capabilities = capabilities.get('server_capabilities') else: nckwargs = dict( host=module.params['host'], port=module.params['port'], hostkey_verify=module.params['hostkey_verify'], allow_agent=module.params['allow_agent'], look_for_keys=module.params['look_for_keys'], username=module.params['username'], password=module.params['password'], ) try: m = ncclient.manager.connect(**nckwargs) server_capabilities = list(m.server_capabilities) except ncclient.transport.errors.AuthenticationError: module.fail_json( msg='authentication failed while connecting to device') except Exception as e: module.fail_json(msg='error connecting to the device: %s' % to_native(e), exception=traceback.format_exc()) try: xml.dom.minidom.parseString(config_xml) except Exception as e: module.fail_json(msg='error parsing XML: %s' % to_native(e), exception=traceback.format_exc()) retkwargs = dict() retkwargs['server_capabilities'] = server_capabilities server_capabilities = '\n'.join(server_capabilities) if module.params['datastore'] == 'candidate': if ':candidate' in server_capabilities: datastore = 'candidate' else: if local_connection: m.close_session() module.fail_json( msg=':candidate is not supported by this netconf server') elif module.params['datastore'] == 'running': if ':writable-running' in server_capabilities: datastore = 'running' else: if local_connection: m.close_session() module.fail_json( msg=':writable-running is not supported by this netconf server' ) elif module.params['datastore'] == 'auto': if ':candidate' in server_capabilities: datastore = 'candidate' elif ':writable-running' in server_capabilities: datastore = 'running' else: if local_connection: m.close_session() module.fail_json( msg= 'neither :candidate nor :writable-running are supported by this netconf server' ) else: if local_connection: m.close_session() module.fail_json(msg=module.params['datastore'] + ' datastore is not supported by this ansible module') if module.params['save']: if ':startup' not in server_capabilities: module.fail_json( msg= 'cannot copy <running/> to <startup/>, while :startup is not supported' ) try: changed = netconf_edit_config(m=m, xml=config_xml, commit=True, retkwargs=retkwargs, datastore=datastore, capabilities=server_capabilities, local_connection=local_connection) if changed and module.params['save']: m.copy_config(source="running", target="startup") except Exception as e: module.fail_json(msg='error editing configuration: %s' % to_native(e), exception=traceback.format_exc()) finally: if local_connection: m.close_session() module.exit_json(changed=changed, **retkwargs)
def main(): argument_spec = postgres_common_argument_spec() argument_spec.update( database=dict(required=True, aliases=['db', 'login_db']), state=dict(default='present', choices=['present', 'absent']), privs=dict(required=False, aliases=['priv']), type=dict(default='table', choices=[ 'table', 'sequence', 'function', 'procedure', 'database', 'schema', 'language', 'tablespace', 'group', 'default_privs', 'foreign_data_wrapper', 'foreign_server', 'type', ]), objs=dict(required=False, aliases=['obj']), schema=dict(required=False), roles=dict(required=True, aliases=['role']), session_role=dict(required=False), target_roles=dict(required=False), grant_option=dict(required=False, type='bool', aliases=['admin_option']), host=dict(default='', aliases=['login_host']), unix_socket=dict(default='', aliases=['login_unix_socket']), login=dict(default='postgres', aliases=['login_user']), password=dict(default='', aliases=['login_password'], no_log=True), fail_on_role=dict(type='bool', default=True), trust_input=dict(type='bool', default=True), usage_on_types=dict(type='bool', default=True), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) fail_on_role = module.params['fail_on_role'] usage_on_types = module.params['usage_on_types'] # Create type object as namespace for module params p = type('Params', (), module.params) # param "schema": default, allowed depends on param "type" if p.type in [ 'table', 'sequence', 'function', 'procedure', 'type', 'default_privs' ]: p.schema = p.schema or 'public' elif p.schema: module.fail_json(msg='Argument "schema" is not allowed ' 'for type "%s".' % p.type) # param "objs": default, required depends on param "type" if p.type == 'database': p.objs = p.objs or p.database elif not p.objs: module.fail_json(msg='Argument "objs" is required ' 'for type "%s".' % p.type) # param "privs": allowed, required depends on param "type" if p.type == 'group': if p.privs: module.fail_json(msg='Argument "privs" is not allowed ' 'for type "group".') elif not p.privs: module.fail_json(msg='Argument "privs" is required ' 'for type "%s".' % p.type) # Check input if not p.trust_input: # Check input for potentially dangerous elements: check_input(module, p.roles, p.target_roles, p.session_role, p.schema) # Connect to Database if not psycopg2: module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR) try: conn = Connection(p, module) except psycopg2.Error as e: module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc()) except TypeError as e: if 'sslrootcert' in e.args[0]: module.fail_json( msg= 'Postgresql server must be at least version 8.4 to support sslrootcert' ) module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) except ValueError as e: # We raise this when the psycopg library is too old module.fail_json(msg=to_native(e)) if p.session_role: try: conn.cursor.execute('SET ROLE "%s"' % p.session_role) except Exception as e: module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc()) try: # privs if p.privs: privs = frozenset(pr.upper() for pr in p.privs.split(',')) if not privs.issubset(VALID_PRIVS): module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) else: privs = None # objs: if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_tables_in_schema(p.schema) elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_sequences_in_schema(p.schema) elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_functions_in_schema(p.schema) elif p.type == 'procedure' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_procedures_in_schema(p.schema) elif p.type == 'default_privs': if p.objs == 'ALL_DEFAULT': objs = frozenset(VALID_DEFAULT_OBJS.keys()) else: objs = frozenset(obj.upper() for obj in p.objs.split(',')) if not objs.issubset(VALID_DEFAULT_OBJS): module.fail_json( msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys())) # Again, do we have valid privs specified for object type: valid_objects_for_priv = frozenset( obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj])) if not valid_objects_for_priv == objs: module.fail_json( msg= 'Invalid priv specified. Valid object for priv: {0}. Objects: {1}' .format(valid_objects_for_priv, objs)) else: objs = p.objs.split(',') # function signatures are encoded using ':' to separate args if p.type in ('function', 'procedure'): objs = [obj.replace(':', ',') for obj in objs] # roles if p.roles.upper() == 'PUBLIC': roles = 'PUBLIC' else: roles = p.roles.split(',') if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]): module.exit_json(changed=False) if fail_on_role: module.fail_json(msg="Role '%s' does not exist" % roles[0].strip()) else: module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip()) # check if target_roles is set with type: default_privs if p.target_roles and not p.type == 'default_privs': module.warn( '"target_roles" will be ignored ' 'Argument "type: default_privs" is required for usage of "target_roles".' ) # target roles if p.target_roles: target_roles = p.target_roles.split(',') else: target_roles = None changed = conn.manipulate_privs( obj_type=p.type, privs=privs, objs=objs, roles=roles, target_roles=target_roles, state=p.state, grant_option=p.grant_option, schema_qualifier=p.schema, fail_on_role=fail_on_role, usage_on_types=usage_on_types, ) except Error as e: conn.rollback() module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except psycopg2.Error as e: conn.rollback() module.fail_json(msg=to_native(e)) if module.check_mode or not changed: conn.rollback() else: conn.commit() module.exit_json(changed=changed, queries=executed_queries)
def main(): module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), digest=dict(default='sha256', type='str'), privatekey_path=dict(require=True, type='path'), privatekey_passphrase=dict(type='str', no_log=True), version=dict(default='1', type='int'), force=dict(default=False, type='bool'), path=dict(required=True, type='path'), subject=dict(type='dict'), countryName=dict(aliases=['C', 'country_name'], type='str'), stateOrProvinceName=dict(aliases=['ST', 'state_or_province_name'], type='str'), localityName=dict(aliases=['L', 'locality_name'], type='str'), organizationName=dict(aliases=['O', 'organization_name'], type='str'), organizationalUnitName=dict(aliases=['OU', 'organizational_unit_name'], type='str'), commonName=dict(aliases=['CN', 'common_name'], type='str'), emailAddress=dict(aliases=['E', 'email_address'], type='str'), subjectAltName=dict(aliases=['subject_alt_name'], type='list'), subjectAltName_critical=dict(aliases=['subject_alt_name_critical'], default=False, type='bool'), keyUsage=dict(aliases=['key_usage'], type='list'), keyUsage_critical=dict(aliases=['key_usage_critical'], default=False, type='bool'), extendedKeyUsage=dict(aliases=['extKeyUsage', 'extended_key_usage'], type='list'), extendedKeyUsage_critical=dict(aliases=['extKeyUsage_critical', 'extended_key_usage_critical'], default=False, type='bool'), basicConstraints=dict(aliases=['basic_constraints'], type='list'), basicConstraints_critical=dict(aliases=['basic_constraints_critical'], default=False, type='bool'), ocspMustStaple=dict(aliases=['ocsp_must_staple'], default=False, type='bool'), ocspMustStaple_critical=dict(aliases=['ocsp_must_staple_critical'], default=False, type='bool'), ), add_file_common_args=True, supports_check_mode=True, ) if not pyopenssl_found: module.fail_json(msg='the python pyOpenSSL module is required') try: getattr(crypto.X509Req, 'get_extensions') except AttributeError: module.fail_json(msg='You need to have PyOpenSSL>=0.15 to generate CSRs') base_dir = os.path.dirname(module.params['path']) or '.' if not os.path.isdir(base_dir): module.fail_json(name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir) csr = CertificateSigningRequest(module) if module.params['state'] == 'present': if module.check_mode: result = csr.dump() result['changed'] = module.params['force'] or not csr.check(module) module.exit_json(**result) try: csr.generate(module) except (CertificateSigningRequestError, crypto_utils.OpenSSLObjectError) as exc: module.fail_json(msg=to_native(exc)) else: if module.check_mode: result = csr.dump() result['changed'] = os.path.exists(module.params['path']) module.exit_json(**result) try: csr.remove() except (CertificateSigningRequestError, crypto_utils.OpenSSLObjectError) as exc: module.fail_json(msg=to_native(exc)) result = csr.dump() module.exit_json(**result)
def _populate(self): for host in self._get_hosts(): if host.get('name'): self.inventory.add_host(host['name']) # create directly mapped groups group_name = host.get('hostgroup_title', host.get('hostgroup_name')) if group_name: group_name = self.to_safe('%s%s' % (self.get_option('group_prefix'), group_name.lower())) self.inventory.add_group(group_name) self.inventory.add_child(group_name, host['name']) # set host vars from host info try: for k, v in host.items(): if k not in ('name', 'hostgroup_title', 'hostgroup_name'): try: self.inventory.set_variable(host['name'], self.get_option('vars_prefix') + k, v) except ValueError as e: self.display.warning("Could not set host info hostvar for %s, skipping %s: %s" % (host, k, to_native(e))) except ValueError as e: self.display.warning("Could not get host info for %s, skipping: %s" % (host['name'], to_native(e))) # set host vars from params if self.get_option('want_params'): for k, v in self._get_all_params_by_id(host['id']).items(): try: self.inventory.set_variable(host['name'], k, v) except ValueError as e: self.display.warning("Could not set parameter hostvar for %s, skipping %s: %s" % (host, k, to_native(e))) # set host vars from facts if self.get_option('want_facts'): self.inventory.set_variable(host['name'], 'ansible_facts', self._get_facts(host))
def execute_module(self): self.client = get_api_client(self.module) v1_templates = self.find_resource('templates', 'template.openshift.io/v1', fail=True) v1_processed_templates = self.find_resource('processedtemplates', 'template.openshift.io/v1', fail=True) name = self.params.get('name') namespace = self.params.get('namespace') namespace_target = self.params.get('namespace_target') definition = self.params.get('resource_definition') src = self.params.get('src') state = self.params.get('state') parameters = self.params.get('parameters') or {} parameter_file = self.params.get('parameter_file') if (name and definition) or (name and src) or (src and definition): self.fail_json( "Only one of src, name, or definition may be provided") if name and not namespace: self.fail_json("namespace is required when name is set") template = None if src or definition: self.set_resource_definitions(self.module) if len(self.resource_definitions) < 1: self.fail_json( 'Unable to load a Template resource from src or resource_definition' ) elif len(self.resource_definitions) > 1: self.fail_json( 'Multiple Template resources found in src or resource_definition, only one Template may be processed at a time' ) template = self.resource_definitions[0] template_namespace = template.get('metadata', {}).get('namespace') namespace = template_namespace or namespace or namespace_target or 'default' elif name and namespace: try: template = v1_templates.get(name=name, namespace=namespace).to_dict() except DynamicApiError as exc: self.fail_json( msg= "Failed to retrieve Template with name '{0}' in namespace '{1}': {2}" .format(name, namespace, exc.body), error=exc.status, status=exc.status, reason=exc.reason) except Exception as exc: self.module.fail_json( msg= "Failed to retrieve Template with name '{0}' in namespace '{1}': {2}" .format(name, namespace, to_native(exc)), error='', status='', reason='') else: self.fail_json( "One of resource_definition, src, or name and namespace must be provided" ) if parameter_file: parameters = self.parse_dotenv_and_merge(parameters, parameter_file) for k, v in parameters.items(): template = self.update_template_param(template, k, v) result = {'changed': False} try: response = v1_processed_templates.create( body=template, namespace=namespace).to_dict() except DynamicApiError as exc: self.fail_json( msg="Server failed to render the Template: {0}".format( exc.body), error=exc.status, status=exc.status, reason=exc.reason) except Exception as exc: self.module.fail_json( msg="Server failed to render the Template: {0}".format( to_native(exc)), error='', status='', reason='') result['message'] = "" if "message" in response: result['message'] = response['message'] result['resources'] = response['objects'] if state != 'rendered': self.resource_definitions = response['objects'] self.kind = self.api_version = self.name = None self.namespace = self.params.get('namespace_target') self.append_hash = False self.apply = False self.params['validate'] = None self.params['merge_type'] = None super(OpenShiftProcess, self).execute_module() self.module.exit_json(**result)
def _load_tasks(self, attr, ds): ''' Loads a list of blocks from a list which may be mixed tasks/blocks. Bare tasks outside of a block are given an implicit block. ''' try: return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) except AssertionError as e: raise AnsibleParserError("A malformed block was encountered while loading tasks: %s" % to_native(e), obj=self._ds, orig_exc=e)
def run(module, hdfs_client): params = module.params state = params['state'] mode = params['mode'] owner = params['owner'] group = params['group'] recurse = params['recurse'] # TODO diff_peek = params['diff_peek'] src = params['src'] b_src = to_bytes(src, errors='surrogate_or_strict') # modify source as we later reload and pass, specially relevant when used by other modules. path = params['path'] b_path = to_bytes(path, errors='surrogate_or_strict') # TODO implement this - uesd by hdfscopy # # short-circuit for diff_peek # if diff_peek is not None: # appears_binary = False # try: # f = open(b_path, 'rb') # head = f.read(8192) # f.close() # if b("\x00") in head: # appears_binary = True # except: # pass # module.exit_json(path=path, changed=False, appears_binary=appears_binary) prev_state = _get_hdfs_state(hdfs_client, b_path) if prev_state == 'link': module.fail_json( path=path, msg= 'the specified path is currently a symlink. hdfsfile module does not support symlinks' ) # state should default to file, but since that creates many conflicts, # default to 'current' when it exists. if state is None: if prev_state != 'absent': state = prev_state elif recurse: state = 'directory' else: state = 'file' # original_basename is used by other modules that depend on file. if state != 'absent' and _is_hdfs_dir(hdfs_client, b_path): basename = None if params['original_basename']: basename = params['original_basename'] elif src is not None: basename = os.path.basename(src) if basename: params['path'] = path = os.path.join(path, basename) b_path = to_bytes(path, errors='surrogate_or_strict') # make sure the target path is a directory when we're doing a recursive operation if recurse and state not in ('directory', 'absent'): module.fail_json( path=path, msg= "recurse option requires state to be either 'directory' or 'absent'" ) changed = False diff = { 'before': { 'path': path }, 'after': { 'path': path }, } state_change = False if prev_state != state: diff['before']['state'] = prev_state diff['after']['state'] = state state_change = True if state == 'absent': if state_change: if not module.check_mode: if prev_state == 'directory': # Only do rm -r if recurse is true if not recurse and not _is_hdfs_dir_empty( hdfs_client, b_path): module.fail_json( msg= "HDFS directory is non-empty, unable to delete with recurse=false: %s" % b_path) try: hdfs_client.rm(b_path, recursive=recurse) except (PermissionError, FileNotFoundError, IOError): e = get_exception() module.fail_json( msg="Delete HDFS directory failed: %s" % str(e)) else: # rm try: hdfs_client.rm(b_path, recursive=False) except (PermissionError, FileNotFoundError, IOError): e = get_exception() module.fail_json(msg="Delete HDFS file failed: %s" % str(e)) module.exit_json(path=path, changed=True, diff=diff) else: module.exit_json(path=path, changed=False) elif state == 'file': if state_change: # file is not absent and any other state is a conflict module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state)) changed = _set_hdfs_attributes_if_different(module, hdfs_client, path, mode, owner, group, changed, diff) module.exit_json(path=path, changed=changed, diff=diff) elif state == 'directory': if prev_state == 'absent': if module.check_mode: module.exit_json(changed=True, diff=diff) changed = True curpath = '' try: # Split the path so we can apply filesystem attributes recursively # from the root (/) directory for absolute paths or the base path # of a relative path. We can then walk the appropriate directory # path to apply attributes. for dirname in path.strip('/').split('/'): curpath = '/'.join([curpath, dirname]) # Remove leading slash if we're creating a relative path if not os.path.isabs(path): curpath = curpath.lstrip('/') b_curpath = to_bytes(curpath, errors='surrogate_or_strict') if not _hdfs_exists(hdfs_client, b_curpath): hdfs_client.mkdir(b_curpath) changed = _set_hdfs_attributes_if_different( module, hdfs_client, curpath, mode, owner, group, changed, diff) except Exception: e = get_exception() module.fail_json( path=path, msg='There was an issue creating directory %s: %s' % (curpath, str(e))) # We already know prev_state is not 'absent', therefore it exists in some form. elif prev_state != 'directory': module.fail_json(path=path, msg='%s already exists as a %s' % (path, prev_state)) changed = _set_hdfs_attributes_if_different(module, hdfs_client, path, mode, owner, group, changed, diff) if recurse: changed |= _recursive_set_attributes( module, hdfs_client, to_bytes(path, errors='surrogate_or_strict'), mode, owner, group) module.exit_json(path=path, changed=changed, diff=diff) elif state == 'link': module.fail_json( path=path, msg='hdfsfile module does not support creation of symlinks') elif state == 'touchz': if not module.check_mode: try: hdfs_client.touch(b_path) except (PermissionError, IOError): e = get_exception() module.fail_json(path=path, msg='Error, could not touchz target: %s' % to_native(e, nonstring='simplerepr')) try: _set_hdfs_attributes_if_different(module, hdfs_client, path, mode, owner, group, True, diff) except SystemExit: e = get_exception() if e.code: # We take this to mean that fail_json() was called from # somewhere in basic.py if prev_state == 'absent': # If we just created the file we can safely remove it try: module.log( 'Attempting to cleanup touchz file: %s' % path) hdfs_client.rm(b_path, recursive=False) except (PermissionError, FileNotFoundError, IOError): e_cleanup = get_exception() module.log('Unable to cleanup touchz file: %s' % str(e_cleanup)) except FileNotFoundError: # File was never created pass raise e module.exit_json(dest=path, changed=True, diff=diff) module.fail_json(path=path, msg='unexpected position reached')
def main(): module = AnsibleModule( argument_spec=dict( username=dict(type='str'), password=dict(type='str', no_log=True), host=dict(type='str', default='localhost'), port=dict(type='int', default=25), sender=dict(type='str', default='root', aliases=['from']), to=dict(type='list', default=['root'], aliases=['recipients']), cc=dict(type='list', default=[]), bcc=dict(type='list', default=[]), subject=dict(type='str', required=True, aliases=['msg']), body=dict(type='str'), attach=dict(type='list', default=[]), headers=dict(type='list', default=[]), charset=dict(type='str', default='utf-8'), subtype=dict(type='str', default='plain', choices=['html', 'plain']), secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']), timeout=dict(type='int', default=20), ), required_together=[['password', 'username']], ) username = module.params.get('username') password = module.params.get('password') host = module.params.get('host') port = module.params.get('port') sender = module.params.get('sender') recipients = module.params.get('to') copies = module.params.get('cc') blindcopies = module.params.get('bcc') subject = module.params.get('subject') body = module.params.get('body') attach_files = module.params.get('attach') headers = module.params.get('headers') charset = module.params.get('charset') subtype = module.params.get('subtype') secure = module.params.get('secure') timeout = module.params.get('timeout') code = 0 secure_state = False sender_phrase, sender_addr = parseaddr(sender) if not body: body = subject try: if secure != 'never': try: if PY3: smtp = smtplib.SMTP_SSL(host=host, port=port, timeout=timeout) else: smtp = smtplib.SMTP_SSL(timeout=timeout) code, smtpmessage = smtp.connect(host, port) secure_state = True except ssl.SSLError as e: if secure == 'always': module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) except Exception: pass if not secure_state: if PY3: smtp = smtplib.SMTP(host=host, port=port, timeout=timeout) else: smtp = smtplib.SMTP(timeout=timeout) code, smtpmessage = smtp.connect(host, port) except smtplib.SMTPException as e: module.fail_json(rc=1, msg='Unable to Connect %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) try: smtp.ehlo() except smtplib.SMTPException as e: module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) if int(code) > 0: if not secure_state and secure in ('starttls', 'try'): if smtp.has_extn('STARTTLS'): try: smtp.starttls() secure_state = True except smtplib.SMTPException as e: module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) try: smtp.ehlo() except smtplib.SMTPException as e: module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) else: if secure == 'starttls': module.fail_json(rc=1, msg='StartTLS is not offered on server %s:%s' % (host, port)) if username and password: if smtp.has_extn('AUTH'): try: smtp.login(username, password) except smtplib.SMTPAuthenticationError: module.fail_json(rc=1, msg='Authentication to %s:%s failed, please check your username and/or password' % (host, port)) except smtplib.SMTPException: module.fail_json(rc=1, msg='No Suitable authentication method was found on %s:%s' % (host, port)) else: module.fail_json(rc=1, msg="No Authentication on the server at %s:%s" % (host, port)) if not secure_state and (username and password): module.warn('Username and Password was sent without encryption') msg = MIMEMultipart(_charset=charset) msg['From'] = formataddr((sender_phrase, sender_addr)) msg['Subject'] = Header(subject, charset) msg.preamble = "Multipart message" for header in headers: # NOTE: Backward compatible with old syntax using '|' as delimiter for hdr in [x.strip() for x in header.split('|')]: try: h_key, h_val = hdr.split('=') h_val = to_native(Header(h_val, charset)) msg.add_header(h_key, h_val) except Exception: module.warn("Skipping header '%s', unable to parse" % hdr) if 'X-Mailer' not in msg: msg.add_header('X-Mailer', 'Ansible mail module') addr_list = [] for addr in [x.strip() for x in blindcopies]: addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase to_list = [] for addr in [x.strip() for x in recipients]: to_list.append(formataddr(parseaddr(addr))) addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase msg['To'] = ", ".join(to_list) cc_list = [] for addr in [x.strip() for x in copies]: cc_list.append(formataddr(parseaddr(addr))) addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase msg['Cc'] = ", ".join(cc_list) part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset) msg.attach(part) # NOTE: Backware compatibility with old syntax using space as delimiter is not retained # This breaks files with spaces in it :-( for filename in attach_files: try: part = MIMEBase('application', 'octet-stream') with open(filename, 'rb') as fp: part.set_payload(fp.read()) encoders.encode_base64(part) part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename)) msg.attach(part) except Exception as e: module.fail_json(rc=1, msg="Failed to send mail: can't attach file %s: %s" % (filename, to_native(e)), exception=traceback.format_exc()) composed = msg.as_string() try: result = smtp.sendmail(sender_addr, set(addr_list), composed) except Exception as e: module.fail_json(rc=1, msg="Failed to send mail to '%s': %s" % (", ".join(set(addr_list)), to_native(e)), exception=traceback.format_exc()) smtp.quit() if result: for key in result: module.warn("Failed to send mail to '%s': %s %s" % (key, result[key][0], result[key][1])) module.exit_json(msg='Failed to send mail to at least one recipient', result=result) module.exit_json(msg='Mail sent successfully', result=result)
def ensure(self): allow_erasing = False response = {'msg': "", 'changed': False, 'results': [], 'rc': 0} # Accumulate failures. Package management modules install what they can # and fail with a message about what they can't. failure_response = {'msg': "", 'failures': [], 'results': [], 'rc': 1} # Autoremove is called alone # Jump to remove path where base.autoremove() is run if not self.names and self.autoremove: self.names = [] self.state = 'absent' if self.names == ['*'] and self.state == 'latest': try: self.base.upgrade_all() except dnf.exceptions.DepsolveError as e: failure_response[ 'msg'] = "Depsolve Error occured attempting to upgrade all packages" self.module.fail_json(**failure_response) else: pkg_specs, group_specs, module_specs, filenames = self._parse_spec_group_file( ) pkg_specs = [p.strip() for p in pkg_specs] filenames = [f.strip() for f in filenames] groups = [] environments = [] for group_spec in (g.strip() for g in group_specs): group = self.base.comps.group_by_pattern(group_spec) if group: groups.append(group.id) else: environment = self.base.comps.environment_by_pattern( group_spec) if environment: environments.append(environment.id) else: self.module.fail_json( msg="No group {0} available.".format(group_spec), results=[], ) if self.state in ['installed', 'present']: # Install files. self._install_remote_rpms(filenames) for filename in filenames: response['results'].append( "Installed {0}".format(filename)) # Install modules if module_specs and self.with_modules: for module in module_specs: try: if not self._is_module_installed(module): response['results'].append( "Module {0} installed.".format(module)) self.module_base.install([module]) except dnf.exceptions.MarkingErrors as e: failure_response['failures'].append(" ".join( (' '.join(module), to_native(e)))) # Install groups. for group in groups: try: group_pkg_count_installed = self.base.group_install( group, dnf.const.GROUP_PACKAGE_TYPES) if group_pkg_count_installed == 0: response['results'].append( "Group {0} already installed.".format(group)) else: response['results'].append( "Group {0} installed.".format(group)) except dnf.exceptions.DepsolveError as e: failure_response[ 'msg'] = "Depsolve Error occured attempting to install group: {0}".format( group) self.module.fail_json(**failure_response) except dnf.exceptions.Error as e: # In dnf 2.0 if all the mandatory packages in a group do # not install, an error is raised. We want to capture # this but still install as much as possible. failure_response['failures'].append(" ".join( (group, to_native(e)))) for environment in environments: try: self.base.environment_install( environment, dnf.const.GROUP_PACKAGE_TYPES) except dnf.exceptions.DepsolveError as e: failure_response[ 'msg'] = "Depsolve Error occured attempting to install environment: {0}".format( environment) self.module.fail_json(**failure_response) except dnf.exceptions.Error as e: failure_response['failures'].append(" ".join( (environment, to_native(e)))) if module_specs and not self.with_modules: # This means that the group or env wasn't found in comps self.module.fail_json( msg="No group {0} available.".format(module_specs[0]), results=[], ) # Install packages. if self.update_only: not_installed = self._update_only(pkg_specs) for spec in not_installed: response['results'].append( "Packages providing %s not installed due to update_only specified" % spec) else: for pkg_spec in pkg_specs: install_result = self._mark_package_install(pkg_spec) if install_result['failed']: failure_response['msg'] += install_result['msg'] failure_response['failures'].append( self._sanitize_dnf_error_msg( pkg_spec, install_result['failure'])) else: response['results'].append(install_result['msg']) elif self.state == 'latest': # "latest" is same as "installed" for filenames. self._install_remote_rpms(filenames) for filename in filenames: response['results'].append( "Installed {0}".format(filename)) # Upgrade modules if module_specs and self.with_modules: for module in module_specs: try: if self._is_module_installed(module): response['results'].append( "Module {0} upgraded.".format(module)) self.module_base.upgrade([module]) except dnf.exceptions.MarkingErrors as e: failure_response['failures'].append(" ".join( (' '.join(module), to_native(e)))) for group in groups: try: try: self.base.group_upgrade(group) response['results'].append( "Group {0} upgraded.".format(group)) except dnf.exceptions.CompsError: if not self.update_only: # If not already installed, try to install. group_pkg_count_installed = self.base.group_install( group, dnf.const.GROUP_PACKAGE_TYPES) if group_pkg_count_installed == 0: response['results'].append( "Group {0} already installed.".format( group)) else: response['results'].append( "Group {0} installed.".format(group)) except dnf.exceptions.Error as e: failure_response['failures'].append(" ".join( (group, to_native(e)))) for environment in environments: try: try: self.base.environment_upgrade(environment) except dnf.exceptions.CompsError: # If not already installed, try to install. self.base.environment_install( environment, dnf.const.GROUP_PACKAGE_TYPES) except dnf.exceptions.DepsolveError as e: failure_response[ 'msg'] = "Depsolve Error occured attempting to install environment: {0}".format( environment) except dnf.exceptions.Error as e: failure_response['failures'].append(" ".join( (environment, to_native(e)))) if self.update_only: not_installed = self._update_only(pkg_specs) for spec in not_installed: response['results'].append( "Packages providing %s not installed due to update_only specified" % spec) else: for pkg_spec in pkg_specs: # best effort causes to install the latest package # even if not previously installed self.base.conf.best = True install_result = self._mark_package_install( pkg_spec, upgrade=True) if install_result['failed']: failure_response['msg'] += install_result['msg'] failure_response['failures'].append( self._sanitize_dnf_error_msg( pkg_spec, install_result['failure'])) else: response['results'].append(install_result['msg']) else: # state == absent if filenames: self.module.fail_json( msg= "Cannot remove paths -- please specify package name.", results=[], ) # Remove modules if module_specs and self.with_modules: for module in module_specs: try: if self._is_module_installed(module): response['results'].append( "Module {0} removed.".format(module)) self.module_base.disable([module]) self.module_base.remove([module]) except dnf.exceptions.MarkingErrors as e: failure_response['failures'].append(" ".join( (' '.join(module), to_native(e)))) for group in groups: try: self.base.group_remove(group) except dnf.exceptions.CompsError: # Group is already uninstalled. pass except AttributeError: # Group either isn't installed or wasn't marked installed at install time # because of DNF bug # # This is necessary until the upstream dnf API bug is fixed where installing # a group via the dnf API doesn't actually mark the group as installed # https://bugzilla.redhat.com/show_bug.cgi?id=1620324 pass for environment in environments: try: self.base.environment_remove(environment) except dnf.exceptions.CompsError: # Environment is already uninstalled. pass installed = self.base.sack.query().installed() for pkg_spec in pkg_specs: if ("*" in pkg_spec) or installed.filter(name=pkg_spec): self.base.remove(pkg_spec) # Like the dnf CLI we want to allow recursive removal of dependent # packages allow_erasing = True if self.autoremove: self.base.autoremove() try: if not self.base.resolve(allow_erasing=allow_erasing): if failure_response['failures']: failure_response[ 'msg'] = 'Failed to install some of the specified packages' self.module.fail_json(**failure_response) response['msg'] = "Nothing to do" self.module.exit_json(**response) else: response['changed'] = True if failure_response['failures']: failure_response[ 'msg'] = 'Failed to install some of the specified packages', self.module.fail_json(**failure_response) if self.module.check_mode: response[ 'msg'] = "Check mode: No changes made, but would have if not in check mode" self.module.exit_json(**response) try: self.base.download_packages( self.base.transaction.install_set) except dnf.exceptions.DownloadError as e: self.module.fail_json( msg="Failed to download packages: {0}".format( to_text(e)), results=[], ) if self.download_only: for package in self.base.transaction.install_set: response['results'].append( "Downloaded: {0}".format(package)) self.module.exit_json(**response) else: self.base.do_transaction() for package in self.base.transaction.install_set: response['results'].append( "Installed: {0}".format(package)) for package in self.base.transaction.remove_set: response['results'].append( "Removed: {0}".format(package)) if failure_response['failures']: failure_response[ 'msg'] = 'Failed to install some of the specified packages', self.module.exit_json(**response) self.module.exit_json(**response) except dnf.exceptions.DepsolveError as e: failure_response['msg'] = "Depsolve Error occured: {0}".format( to_native(e)) self.module.fail_json(**failure_response) except dnf.exceptions.Error as e: if to_text("already installed") in to_text(e): response['changed'] = False response['results'].append( "Package already installed: {0}".format(to_native(e))) self.module.exit_json(**response) else: failure_response['msg'] = "Unknown Error occured: {0}".format( to_native(e)) self.module.fail_json(**failure_response)
def state_update_vswitch(self): """ Update vSwitch """ results = dict(changed=False, result="No change in vSwitch '%s'" % self.switch) vswitch_pnic_info = self.available_vswitches[self.switch] remain_pnic = [] for desired_pnic in self.nics: if desired_pnic not in vswitch_pnic_info['pnic']: remain_pnic.append(desired_pnic) diff = False # Update all nics all_nics = vswitch_pnic_info['pnic'] if remain_pnic: all_nics += remain_pnic diff = True if vswitch_pnic_info['mtu'] != self.mtu or \ vswitch_pnic_info['num_ports'] != self.number_of_ports: diff = True try: if diff: vss_spec = vim.host.VirtualSwitch.Specification() if all_nics: vss_spec.bridge = vim.host.VirtualSwitch.BondBridge( nicDevice=all_nics) vss_spec.numPorts = self.number_of_ports vss_spec.mtu = self.mtu network_mgr = self.host_system.configManager.networkSystem if network_mgr: network_mgr.UpdateVirtualSwitch(vswitchName=self.switch, spec=vss_spec) results['changed'] = True results[ 'result'] = "vSwitch '%s' is updated successfully" % self.switch else: self.module.fail_json( msg="Failed to find network manager for ESXi system.") except vim.fault.ResourceInUse as resource_used: self.module.fail_json( msg="Failed to update vSwitch '%s' as physical network adapter" " being bridged is already in use: %s" % (self.switch, to_native(resource_used.msg))) except vim.fault.NotFound as not_found: self.module.fail_json(msg="Failed to update vSwitch with name '%s'" " as it does not exists: %s" % (self.switch, to_native(not_found.msg))) except vim.fault.HostConfigFault as host_config_fault: self.module.fail_json( msg="Failed to update vSwitch '%s' due to host" " configuration fault : %s" % (self.switch, to_native(host_config_fault.msg))) except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json( msg= "Failed to update vSwitch '%s', this can be due to either of following :" " 1. vSwitch Name exceeds the maximum allowed length," " 2. Number of ports specified falls out of valid range," " 3. Network policy is invalid," " 4. Beacon configuration is invalid : %s" % (self.switch, to_native(invalid_argument.msg))) except vmodl.fault.SystemError as system_error: self.module.fail_json( msg="Failed to update vSwitch '%s' due to : %s" % (self.switch, to_native(system_error.msg))) except vmodl.fault.NotSupported as not_supported: self.module.fail_json( msg= "Failed to update vSwitch '%s' as network adapter teaming policy" " is set but is not supported : %s" % (self.switch, to_native(not_supported.msg))) except Exception as generic_exc: self.module.fail_json(msg="Failed to update vSwitch '%s' due to" " generic exception : %s" % (self.switch, to_native(generic_exc))) self.module.exit_json(**results)
def _mark_package_install(self, pkg_spec, upgrade=False): """Mark the package for install.""" is_newer_version_installed = self._is_newer_version_installed(pkg_spec) is_installed = self._is_installed(pkg_spec) try: if self.allow_downgrade: # dnf only does allow_downgrade, we have to handle this ourselves # because it allows a possibility for non-idempotent transactions # on a system's package set (pending the yum repo has many old # NVRs indexed) if upgrade: if is_installed: self.base.upgrade(pkg_spec) else: self.base.install(pkg_spec) else: self.base.install(pkg_spec) elif not self.allow_downgrade and is_newer_version_installed: return {'failed': False, 'msg': '', 'failure': '', 'rc': 0} elif not is_newer_version_installed: if upgrade: if is_installed: self.base.upgrade(pkg_spec) else: self.base.install(pkg_spec) else: self.base.install(pkg_spec) else: if upgrade: if is_installed: self.base.upgrade(pkg_spec) else: self.base.install(pkg_spec) else: self.base.install(pkg_spec) return { 'failed': False, 'msg': 'Installed: {0}'.format(pkg_spec), 'failure': '', 'rc': 0 } except dnf.exceptions.MarkingError as e: return { 'failed': True, 'msg': "No package {0} available.".format(pkg_spec), 'failure': " ".join((pkg_spec, to_native(e))), 'rc': 1, "results": [] } except dnf.exceptions.DepsolveError as e: return { 'failed': True, 'msg': "Depsolve Error occured for package {0}.".format(pkg_spec), 'failure': " ".join((pkg_spec, to_native(e))), 'rc': 1, "results": [] } except dnf.exceptions.Error as e: if to_text("already installed") in to_text(e): return {'failed': False, 'msg': '', 'failure': ''} else: return { 'failed': True, 'msg': "Unknown Error occured for package {0}.".format(pkg_spec), 'failure': " ".join((pkg_spec, to_native(e))), 'rc': 1, "results": [] }
def execute(self): result = dict(changed=False) datacenter = self.find_datacenter_by_name(self.datacenter) if not datacenter: self.module.fail_json( msg="Cannot find the specified Datacenter: %s" % self.datacenter) dcpath = compile_folder_path_for_object(datacenter) if not dcpath.endswith("/"): dcpath += "/" if (self.folder in [None, "", "/"]): self.module.fail_json( msg="Please specify folder path other than blank or '/'") elif (self.folder.startswith("/vm")): fullpath = "%s%s%s" % (dcpath, self.datacenter, self.folder) else: fullpath = "%s%s" % (dcpath, self.folder) folder_obj = self.content.searchIndex.FindByInventoryPath( inventoryPath="%s" % fullpath) if not folder_obj: details = { 'datacenter': datacenter.name, 'datacenter_path': dcpath, 'folder': self.folder, 'full_search_path': fullpath, } self.module.fail_json( msg="No folder %s matched in the search path : %s" % (self.folder, fullpath), details=details) if self.state == "present": if self.get_vm(): self.module.exit_json(**result) if self.esxi_hostname: host_obj = self.find_hostsystem_by_name(self.esxi_hostname) if not host_obj: self.module.fail_json( msg="Cannot find the specified ESXi host: %s" % self.esxi_hostname) else: host_obj = None if self.cluster: cluster_obj = find_cluster_by_name(self.content, self.cluster, datacenter) if not cluster_obj: self.module.fail_json( msg="Cannot find the specified cluster name: %s" % self.cluster) resource_pool_obj = cluster_obj.resourcePool elif self.resource_pool: resource_pool_obj = find_resource_pool_by_name( self.content, self.resource_pool) if not resource_pool_obj: self.module.fail_json( msg="Cannot find the specified resource pool: %s" % self.resource_pool) else: resource_pool_obj = host_obj.parent.resourcePool task = folder_obj.RegisterVM_Task(path=self.path, name=self.name, asTemplate=self.template, pool=resource_pool_obj, host=host_obj) changed = False try: changed, info = wait_for_task(task) except Exception as task_e: self.module.fail_json(msg=to_native(task_e)) result.update(changed=changed) self.module.exit_json(**result) if self.state == "absent": vm_obj = self.get_vm() if vm_obj: try: vm_obj.UnregisterVM() result.update(changed=True) except Exception as exc: self.module.fail_json(msg=to_native(exc)) self.module.exit_json(**result)
def _packagename_dict(self, packagename): """ Return a dictionary of information for a package name string or None if the package name doesn't contain at least all NVR elements """ if packagename[-4:] == '.rpm': packagename = packagename[:-4] # This list was auto generated on a Fedora 28 system with the following one-liner # printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n' redhat_rpm_arches = [ "aarch64", "alphaev56", "alphaev5", "alphaev67", "alphaev6", "alpha", "alphapca56", "amd64", "armv3l", "armv4b", "armv4l", "armv5tejl", "armv5tel", "armv5tl", "armv6hl", "armv6l", "armv7hl", "armv7hnl", "armv7l", "athlon", "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "m68k", "mips64el", "mips64", "mips64r6el", "mips64r6", "mipsel", "mips", "mipsr6el", "mipsr6", "noarch", "pentium3", "pentium4", "ppc32dy4", "ppc64iseries", "ppc64le", "ppc64", "ppc64p7", "ppc64pseries", "ppc8260", "ppc8560", "ppciseries", "ppc", "ppcpseries", "riscv64", "s390", "s390x", "sh3", "sh4a", "sh4", "sh", "sparc64", "sparc64v", "sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64" ] rpm_arch_re = re.compile(r'(.*)\.(.*)') rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.]*)') try: arch = None rpm_arch_match = rpm_arch_re.match(packagename) if rpm_arch_match: nevr, arch = rpm_arch_match.groups() if arch in redhat_rpm_arches: packagename = nevr rpm_nevr_match = rpm_nevr_re.match(packagename) if rpm_nevr_match: name, epoch, version, release = rpm_nevr_re.match( packagename).groups() if not version or not version.split('.')[0].isdigit(): return None else: return None except AttributeError as e: self.module.fail_json( msg='Error attempting to parse package: %s, %s' % (packagename, to_native(e)), rc=1, results=[]) if not epoch: epoch = "0" if ':' in name: epoch_name = name.split(":") epoch = epoch_name[0] name = ''.join(epoch_name[1:]) result = { 'name': name, 'epoch': epoch, 'release': release, 'version': version, } return result
def test_missing_required_galaxy_key(galaxy_yml): expected = "The collection galaxy.yml at '%s' is missing the following mandatory keys: authors, name, " \ "readme, version" % to_native(galaxy_yml) with pytest.raises(AnsibleError, match=expected): collection._get_galaxy_yml(galaxy_yml)
def _load_playbook_data(self, file_name, variable_manager, vars=None): if os.path.isabs(file_name): self._basedir = os.path.dirname(file_name) else: self._basedir = os.path.normpath( os.path.join(self._basedir, os.path.dirname(file_name))) # set the loaders basedir cur_basedir = self._loader.get_basedir() self._loader.set_basedir(self._basedir) self._file_name = file_name # dynamically load any plugins from the playbook directory for name, obj in get_all_plugin_loaders(): if obj.subdir: plugin_path = os.path.join(self._basedir, obj.subdir) if os.path.isdir(plugin_path): obj.add_directory(plugin_path) try: ds = self._loader.load_from_file(os.path.basename(file_name)) except UnicodeDecodeError as e: raise AnsibleParserError( "Could not read playbook (%s) due to encoding issues: %s" % (file_name, to_native(e))) # check for errors and restore the basedir in case this error is caught and handled if not ds: self._loader.set_basedir(cur_basedir) raise AnsibleParserError("Empty playbook, nothing to do", obj=ds) elif not isinstance(ds, list): self._loader.set_basedir(cur_basedir) raise AnsibleParserError( "A playbook must be a list of plays, got a %s instead" % type(ds), obj=ds) # Parse the playbook entries. For plays, we simply parse them # using the Play() object, and includes are parsed using the # PlaybookInclude() object for entry in ds: if not isinstance(entry, dict): # restore the basedir in case this error is caught and handled self._loader.set_basedir(cur_basedir) raise AnsibleParserError( "playbook entries must be either a valid play or an include statement", obj=entry) if any(action in entry for action in ('import_playbook', 'include')): if 'include' in entry: display.deprecated( "'include' for playbook includes. You should use 'import_playbook' instead", version="2.12") pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader) if pb is not None: self._entries.extend(pb._entries) else: which = entry.get('import_playbook', entry.get('include', entry)) display.display( "skipping playbook '%s' due to conditional test failure" % which, color=C.COLOR_SKIP) else: entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader, vars=vars) self._entries.append(entry_obj) # we're done, so restore the old basedir in the loader self._loader.set_basedir(cur_basedir)
def ensure(self): """ Function to manage internal state of host system """ results = dict(changed=False, result=dict()) state = self.params.get('state') force = self.params.get('force') timeout = self.params.get('timeout') host_change_list = [] for host in self.hosts: changed = False if not host.runtime.inMaintenanceMode and not force: self.module.fail_json(msg="Current host system '%s' is not in maintenance mode," " please specify 'force' as True to proceed." % host.name) if host.runtime.connectionState == 'notResponding': self.module.fail_json(msg="Current host system '%s' can not be set in '%s'" " mode as the host system is not responding." % (host.name, state)) results['result'][host.name] = dict(msg='', error='') if state == 'reboot-host' and not host.capability.rebootSupported: self.module.fail_json(msg="Current host '%s' can not be rebooted as the host system" " does not have capability to reboot." % host.name) elif state == 'shutdown-host' and not host.capability.shutdownSupported: self.module.fail_json(msg="Current host '%s' can not be shut down as the host system" " does not have capability to shut down." % host.name) elif state in ['power-down-to-standby', 'power-up-from-standby'] and not host.capability.standbySupported: self.module.fail_json(msg="Current host '%s' can not be '%s' as the host system" " does not have capability to standby supported." % (host.name, state)) if state == 'reboot-host': task = host.RebootHost_Task(force) verb = "reboot '%s'" % host.name elif state == 'shutdown-host': task = host.ShutdownHost_Task(force) verb = "shutdown '%s'" % host.name elif state == 'power-down-to-standby': task = host.PowerDownHostToStandBy_Task(timeout, force) verb = "power down '%s' to standby" % host.name elif state == 'power-up-from-standby': task = host.PowerUpHostFromStandBy_Task(timeout) verb = "power up '%s' from standby" % host.name if not self.module.check_mode: try: success, result = wait_for_task(task) if success: changed = True results['result'][host.name]['msg'] = verb else: results['result'][host.name]['error'] = result except TaskError as task_error: self.module.fail_json(msg="Failed to %s as host system due to : %s" % (verb, str(task_error))) except Exception as generic_exc: self.module.fail_json(msg="Failed to %s due to generic exception : %s" % (host.name, to_native(generic_exc))) else: # Check mode changed = True results['result'][host.name]['msg'] = verb host_change_list.append(changed) if any(host_change_list): results['changed'] = True self.module.exit_json(**results)
def update_mapping_info(self): """Collect the current state of the storage array.""" response = None try: rc, response = request(self.url + "storage-systems/%s/graph" % self.ssid, method="GET", headers=HEADERS, **self.creds) except Exception as error: self.module.fail_json( msg= "Failed to retrieve storage array graph. Id [%s]. Error [%s]" % (self.ssid, to_native(error))) # Create dictionary containing host/cluster references mapped to their names target_reference = {} target_name = {} target_type = {} if self.target_type is None or self.target_type == "host": for host in response["storagePoolBundle"]["host"]: target_reference.update({host["hostRef"]: host["name"]}) target_name.update({host["name"]: host["hostRef"]}) target_type.update({host["name"]: "host"}) if self.target_type is None or self.target_type == "group": for cluster in response["storagePoolBundle"]["cluster"]: # Verify there is no ambiguity between target's type (ie host and group has the same name) if self.target and self.target_type is None and cluster["name"] == self.target and \ self.target in target_name.keys(): self.module.fail_json( msg= "Ambiguous target type: target name is used for both host and group" " targets! Id [%s]" % self.ssid) target_reference.update( {cluster["clusterRef"]: cluster["name"]}) target_name.update({cluster["name"]: cluster["clusterRef"]}) target_type.update({cluster["name"]: "group"}) volume_reference = {} volume_name = {} lun_name = {} for volume in response["volume"]: volume_reference.update({volume["volumeRef"]: volume["name"]}) volume_name.update({volume["name"]: volume["volumeRef"]}) if volume["listOfMappings"]: lun_name.update( {volume["name"]: volume["listOfMappings"][0]["lun"]}) for volume in response["highLevelVolBundle"]["thinVolume"]: volume_reference.update({volume["volumeRef"]: volume["name"]}) volume_name.update({volume["name"]: volume["volumeRef"]}) if volume["listOfMappings"]: lun_name.update( {volume["name"]: volume["listOfMappings"][0]["lun"]}) # Build current mapping object self.mapping_info = dict(lun_mapping=[ dict(volume_reference=mapping["volumeRef"], map_reference=mapping["mapRef"], lun_mapping_reference=mapping["lunMappingRef"], lun=mapping["lun"]) for mapping in response["storagePoolBundle"]["lunMapping"] ], volume_by_reference=volume_reference, volume_by_name=volume_name, lun_by_name=lun_name, target_by_reference=target_reference, target_by_name=target_name, target_type_by_name=target_type)
def emerge_packages(module, packages): """Run emerge command against given list of atoms.""" p = module.params if not (p['update'] or p['noreplace'] or p['state'] == 'latest'): for package in packages: if not query_package(module, package, 'emerge'): break else: module.exit_json(changed=False, msg='Packages already present.') if module.check_mode: module.exit_json(changed=True, msg='Packages would be installed.') args = [] emerge_flags = { 'update': '--update', 'deep': '--deep', 'newuse': '--newuse', 'changed_use': '--changed-use', 'oneshot': '--oneshot', 'noreplace': '--noreplace', 'nodeps': '--nodeps', 'onlydeps': '--onlydeps', 'quiet': '--quiet', 'verbose': '--verbose', 'getbinpkg': '--getbinpkg', 'usepkgonly': '--usepkgonly', 'usepkg': '--usepkg', 'keepgoing': '--keep-going', } for flag, arg in emerge_flags.items(): if p[flag]: args.append(arg) if p['state'] and p['state'] == 'latest': args.append("--update") if p['usepkg'] and p['usepkgonly']: module.fail_json(msg='Use only one of usepkg, usepkgonly') emerge_flags = { 'jobs': '--jobs', 'loadavg': '--load-average', } for flag, arg in emerge_flags.items(): flag_val = p[flag] if flag_val is None: """Fallback to default: don't use this argument at all.""" continue if not flag_val: """If the value is 0 or 0.0: add the flag, but not the value.""" args.append(arg) continue """Add the --flag=value pair.""" args.extend((arg, to_native(flag_val))) cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: module.fail_json( cmd=cmd, rc=rc, stdout=out, stderr=err, msg='Packages not installed.', ) # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite # this error if (p['usepkgonly'] or p['getbinpkg']) \ and 'Permission denied (publickey).' in err: module.fail_json( cmd=cmd, rc=rc, stdout=out, stderr=err, msg='Please check your PORTAGE_BINHOST configuration in make.conf ' 'and your SSH authorized_keys file', ) changed = True for line in out.splitlines(): if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): msg = 'Packages installed.' break elif module.check_mode and re.match(r'\[(binary|ebuild)', line): msg = 'Packages would be installed.' break else: changed = False msg = 'No packages installed.' module.exit_json( changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, msg=msg, )
def upload(self): if self.params['ovf'] is None: self.module.fail_json(msg="OVF path is required for upload operation.") ovf_dir = os.path.dirname(self.params['ovf']) lease, import_spec = self.get_lease() uploaders = [] for file_item in import_spec.fileItem: device_upload_url = None for device_url in lease.info.deviceUrl: if file_item.deviceId == device_url.importKey: device_upload_url = self._normalize_url(device_url.url) break if not device_upload_url: lease.HttpNfcLeaseAbort( vmodl.fault.SystemError(reason='Failed to find deviceUrl for file %s' % file_item.path) ) self.module.fail_json( msg='Failed to find deviceUrl for file %s' % file_item.path ) vmdk_tarinfo = None if self.tar: vmdk = self.tar try: vmdk_tarinfo = self.tar.getmember(file_item.path) except KeyError: lease.HttpNfcLeaseAbort( vmodl.fault.SystemError(reason='Failed to find VMDK file %s in OVA' % file_item.path) ) self.module.fail_json( msg='Failed to find VMDK file %s in OVA' % file_item.path ) else: vmdk = os.path.join(ovf_dir, file_item.path) try: path_exists(vmdk) except ValueError: lease.HttpNfcLeaseAbort( vmodl.fault.SystemError(reason='Failed to find VMDK file at %s' % vmdk) ) self.module.fail_json( msg='Failed to find VMDK file at %s' % vmdk ) uploaders.append( VMDKUploader( vmdk, device_upload_url, self.params['validate_certs'], tarinfo=vmdk_tarinfo, create=file_item.create ) ) total_size = sum(u.size for u in uploaders) total_bytes_read = [0] * len(uploaders) for i, uploader in enumerate(uploaders): uploader.start() while uploader.is_alive(): time.sleep(0.1) total_bytes_read[i] = uploader.bytes_read lease.HttpNfcLeaseProgress(int(100.0 * sum(total_bytes_read) / total_size)) if uploader.e: lease.HttpNfcLeaseAbort( vmodl.fault.SystemError(reason='%s' % to_native(uploader.e[1])) ) self.module.fail_json( msg='%s' % to_native(uploader.e[1]), exception=''.join(traceback.format_tb(uploader.e[2])) )