def reconfigure_vm_serial_port(self, vm_obj): """ Reconfigure vm with new or modified serial port config spec """ self.get_serial_port_config_spec(vm_obj) try: # configure create tasks first if self.serial_ports: for serial_port in self.serial_ports: # each type of serial port is of config_spec.device = vim.vm.device.VirtualSerialPort() object type # because serial ports differ in the backing types and config_spec.device has to be unique, # we are creating a new spec for every create port configuration spec = vim.vm.ConfigSpec() spec.deviceChange.append(serial_port) task = vm_obj.ReconfigVM_Task(spec=spec) wait_for_task(task) task = vm_obj.ReconfigVM_Task(spec=self.config_spec) wait_for_task(task) except vim.fault.InvalidDatastorePath as e: self.module.fail_json(msg="Failed to configure serial port on given virtual machine due to invalid path: %s" % to_native(e.msg)) except vim.fault.RestrictedVersion as e: self.module.fail_json(msg="Failed to reconfigure virtual machine due to product versioning restrictions: %s" % to_native(e.msg)) if task.info.state == 'error': results = {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg} else: serial_port_info = get_serial_port_info(vm_obj) results = {'changed': self.change_applied, 'failed': False, 'serial_port_info': serial_port_info} return results
def set_port_security_promiscuous(self, ports, state): """Set the given port to the given promiscuous state. Parameters ---------- port : str[] PortKey state: bool State of the promiscuous mode, if true its allowed, else not. """ # Creating the new port policy port_spec = [] vim_bool = vim.BoolPolicy(value=state) port_policy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy( allowPromiscuous=vim_bool) port_settings = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy( securityPolicy=port_policy) for port in ports: temp_port_spec = vim.dvs.DistributedVirtualPort.ConfigSpec( operation="edit", key=port, setting=port_settings) port_spec.append(temp_port_spec) task = self.dv_switch.ReconfigureDVPort_Task(port_spec) try: wait_for_task(task) except Exception: self.restore_original_state() self.module.fail_json(msg=task.info.error.msg)
def _update_version3_resources(self, resources): allocations = list() for resource in resources: allocation = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource() allocation.allocationInfo = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource.ResourceAllocation() allocation.key = resource['name'] if 'limit' in resource: allocation.allocationInfo.limit = resource['limit'] if 'reservation' in resource: allocation.allocationInfo.reservation = resource['reservation'] if 'shares_level' in resource: allocation.allocationInfo.shares = vim.SharesInfo() allocation.allocationInfo.shares.level = resource['shares_level'] if 'shares' in resource and resource['shares_level'] == 'custom': allocation.allocationInfo.shares.shares = resource['shares'] elif resource['shares_level'] == 'custom': self.module.fail_json( msg="Resource %s, shares_level set to custom but shares not specified" % resource['name'] ) allocations.append(allocation) spec = vim.DistributedVirtualSwitch.ConfigSpec() spec.configVersion = self.dvs.config.configVersion spec.infrastructureTrafficResourceConfig = allocations task = self.dvs.ReconfigureDvs_Task(spec) wait_for_task(task)
def __create_vm_group(self): # Check if anything has changed when editing if self.__operation == 'add' or (self.__operation == 'edit' and self.__check_if_vms_hosts_changed()): group = vim.cluster.VmGroup() group.name = self.__group_name group.vm = self.__vm_obj_list group_spec = vim.cluster.GroupSpec(info=group, operation=self.__operation) config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec]) # Check if dry run if not self.module.check_mode: task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True) wait_for_task(task) self.__set_result(group) self.__changed = True if self.__operation == 'edit': self.__msg = "Updated vm group %s successfully" % (self.__group_name) else: self.__msg = "Created vm group %s successfully" % (self.__group_name)
def delete(self, rule_name=None): """ Function to delete VM-Host DRS rule using name """ changed = False if rule_name is None: rule_name = self.__rule_name rule_obj = self.__get_rule_key_by_name(rule_name=rule_name) if rule_obj is not None: rule_key = int(rule_obj.key) rule_spec = vim.cluster.RuleSpec(removeKey=rule_key, operation='remove') config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec]) if not self.module.check_mode: task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True) wait_for_task(task) self.__changed = True if self.__changed: self.__msg = "Deleted DRS rule `%s` successfully" % ( self.__rule_name) else: self.__msg = "DRS Rule `%s` does not exists or already deleted" % ( self.__rule_name)
def inject_ovf_env(self): attrib = { 'xmlns': 'http://schemas.dmtf.org/ovf/environment/1', 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'xmlns:oe': 'http://schemas.dmtf.org/ovf/environment/1', 'xmlns:ve': 'http://www.vmware.com/schema/ovfenv', 'oe:id': '', 've:esxId': self.entity._moId } env = ET.Element('Environment', **attrib) platform = ET.SubElement(env, 'PlatformSection') ET.SubElement(platform, 'Kind').text = self.content.about.name ET.SubElement(platform, 'Version').text = self.content.about.version ET.SubElement(platform, 'Vendor').text = self.content.about.vendor ET.SubElement(platform, 'Locale').text = 'US' prop_section = ET.SubElement(env, 'PropertySection') for key, value in self.params['properties'].items(): params = { 'oe:key': key, 'oe:value': str(value) if isinstance(value, bool) else value } ET.SubElement(prop_section, 'Property', **params) opt = vim.option.OptionValue() opt.key = 'guestinfo.ovfEnv' opt.value = '<?xml version="1.0" encoding="UTF-8"?>' + to_native( ET.tostring(env)) config_spec = vim.vm.ConfigSpec() config_spec.extraConfig = [opt] task = self.entity.ReconfigVM_Task(config_spec) wait_for_task(task)
def update_dvs_config(self, switch_object, spec): """Update DVS config""" try: task = switch_object.ReconfigureDvs_Task(spec) wait_for_task(task) except TaskError as invalid_argument: self.module.fail_json(msg="Failed to update DVS : %s" % to_native(invalid_argument))
def create(self): """ Function to create a host VM-Host DRS rule if rule does not exist """ rule_obj = self.__get_rule_key_by_name(rule_name=self.__rule_name) # Check if rule exists if rule_obj: operation = 'edit' rule_changed = self.__check_rule_has_changed(rule_obj) else: operation = 'add' # Check if anything has changed when editing if operation == 'add' or (operation == 'edit' and rule_changed is True): rule = vim.cluster.VmHostRuleInfo() # Check if already rule exists if rule_obj: # This need to be set in order to edit a existing rule rule.key = rule_obj.key rule.enabled = self.__enabled rule.mandatory = self.__mandatory rule.name = self.__rule_name if self.__affinity_rule: rule.affineHostGroupName = self.__host_group_name else: rule.antiAffineHostGroupName = self.__host_group_name rule.vmGroupName = self.__vm_group_name rule_spec = vim.cluster.RuleSpec(info=rule, operation=operation) config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec]) if not self.module.check_mode: task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True) wait_for_task(task) self.__changed = True rule_obj = self.__get_rule_key_by_name(rule_name=self.__rule_name) self.__result = self.__normalize_vm_host_rule_spec(rule_obj) if operation == 'edit': self.__msg = "Updated DRS rule `%s` successfully" % ( self.__rule_name) else: self.__msg = "Created DRS rule `%s` successfully" % ( self.__rule_name)
def clone(self): # clone the vm/template on destination VC vm_folder = find_folder_by_name(content=self.destination_content, folder_name=self.params['destination_vm_folder']) vm_name = self.params['destination_vm_name'] task = self.vm_obj.Clone(folder=vm_folder, name=vm_name, spec=self.clone_spec) wait_for_task(task) if task.info.state == 'error': result = {'changed': False, 'failed': True, 'msg': task.info.error.msg} else: vm_info = self.get_new_vm_info(vm_name) result = {'changed': True, 'failed': False, 'vm_info': vm_info} return result
def set_nioc_version(self): upgrade_spec = vim.DistributedVirtualSwitch.ConfigSpec() upgrade_spec.configVersion = self.dvs.config.configVersion if not self.version: self.version = 'version2' upgrade_spec.networkResourceControlVersion = self.version try: task = self.dvs.ReconfigureDvs_Task(spec=upgrade_spec) wait_for_task(task) except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg="RuntimeFault when setting NIOC version: %s " % to_native(runtime_fault.msg))
def set_vnc_extraconfig(content, vm, enabled, ip, port, password): result = dict( changed=False, failed=False, ) # set new values key_prefix = "remotedisplay.vnc." new_values = dict() for key in ['enabled', 'ip', 'port', 'password']: new_values[key_prefix + key] = "" if enabled: new_values[key_prefix + "enabled"] = "true" new_values[key_prefix + "password"] = str(password).strip() new_values[key_prefix + "ip"] = str(ip).strip() new_values[key_prefix + "port"] = str(port).strip() # get current vnc config current_values = get_vnc_extraconfig(vm) # check if any value is changed reconfig_vm = False for key, val in new_values.items(): key = key.replace(key_prefix, "") current_value = current_values.get(key, "") # enabled is not case-sensitive if key == "enabled": current_value = current_value.lower() val = val.lower() if current_value != val: reconfig_vm = True if not reconfig_vm: return result # reconfigure vm spec = vim.vm.ConfigSpec() spec.extraConfig = [] for key, val in new_values.items(): opt = vim.option.OptionValue() opt.key = key opt.value = val spec.extraConfig.append(opt) task = vm.ReconfigVM_Task(spec) try: wait_for_task(task) except TaskError as task_err: result['failed'] = True result['msg'] = to_native(task_err) if task.info.state == 'error': result['failed'] = True result['msg'] = task.info.error.msg else: result['changed'] = True result['instance'] = gather_vm_facts(content, vm) return result
def take_vm_screenshot(self): if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn: self.module.fail_json( msg="VM is %s, valid power state is poweredOn." % self.current_vm_obj.runtime.powerState) try: task = self.current_vm_obj.CreateScreenshot_Task() wait_for_task(task) except vim.fault.FileFault as e: self.module.fail_json( msg= "Failed to create screenshot due to errors when creating or accessing one or more" " files needed for this operation, %s" % to_native(e.msg)) except vim.fault.InvalidState as e: self.module.fail_json( msg= "Failed to create screenshot due to VM is not ready to respond to such requests," " %s" % to_native(e.msg)) except vmodl.RuntimeFault as e: self.module.fail_json( msg="Failed to create screenshot due to runtime fault, %s," % to_native(e.msg)) except vim.fault.TaskInProgress as e: self.module.fail_json( msg="Failed to create screenshot due to VM is busy, %s" % to_native(e.msg)) if task.info.state == 'error': return { 'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg } else: download_file_size = None self.change_detected = True file_url = self.generate_http_access_url(task.info.result) if self.params.get('local_path'): if file_url: download_file_size = self.download_screenshot_file( file_url=file_url, local_file_path=self.params['local_path'], file_name=task.info.result.split('/')[-1]) screenshot_facts = self.get_screenshot_facts( task.info, file_url, download_file_size) return { 'changed': self.change_detected, 'failed': False, 'screenshot_info': screenshot_facts }
def destroy_dvswitch(self): """Delete a DVS""" changed = True results = dict(changed=changed) results['dvswitch'] = self.switch_name if self.module.check_mode: results['result'] = "DVS would be deleted" else: try: task = self.dvs.Destroy_Task() except vim.fault.VimFault as vim_fault: self.module.fail_json(msg="Failed to deleted DVS : %s" % to_native(vim_fault)) wait_for_task(task) results['result'] = "DVS deleted" self.module.exit_json(**results)
def ExitMaintenanceMode(self): if not self.host.runtime.inMaintenanceMode: self.module.exit_json(changed=False, hostsystem=str(self.host), hostname=self.esxi_hostname, status='NO_ACTION', msg='Host %s not in maintenance mode' % self.esxi_hostname) try: task = self.host.ExitMaintenanceMode_Task( self.module.params['timeout']) success, result = wait_for_task(task) self.module.exit_json(changed=success, hostsystem=str(self.host), hostname=self.esxi_hostname, status='EXIT', msg='Host %s exited maintenance mode' % self.esxi_hostname) except TaskError as e: self.module.fail_json( msg='Host %s failed to exit maintenance mode due to %s' % (self.esxi_hostname, to_native(e)))
def EnterMaintenanceMode(self): if self.host.runtime.inMaintenanceMode: self.module.exit_json(changed=False, hostsystem=str(self.host), hostname=self.esxi_hostname, status='NO_ACTION', msg='Host %s already in maintenance mode' % self.esxi_hostname) spec = vim.host.MaintenanceSpec() if self.vsan: spec.vsanMode = vim.vsan.host.DecommissionMode() spec.vsanMode.objectAction = self.vsan try: task = self.host.EnterMaintenanceMode_Task( self.module.params['timeout'], self.module.params['evacuate'], spec) success, result = wait_for_task(task) self.module.exit_json(changed=success, hostsystem=str(self.host), hostname=self.esxi_hostname, status='ENTER', msg='Host %s entered maintenance mode' % self.esxi_hostname) except TaskError as e: self.module.fail_json( msg='Host %s failed to enter maintenance mode due to %s' % (self.esxi_hostname, to_native(e)))
def state_remove_host(self): """Remove host from vCenter""" changed = True result = None if self.module.check_mode: result = "Host would be removed from vCenter '%s'" % self.vcenter else: # Check parent type parent_type = self.get_parent_type(self.host_update) if parent_type == 'cluster': self.put_host_in_maintenance_mode(self.host_update) try: if self.folder_name: task = self.host_parent_compute_resource.Destroy_Task() elif self.cluster_name: task = self.host.Destroy_Task() except vim.fault.VimFault as vim_fault: self.module.fail_json(msg=vim_fault) try: changed, result = wait_for_task(task) result = "Host removed from vCenter '%s'" % self.vcenter except TaskError as task_error: self.module.fail_json( msg="Failed to remove the host from vCenter '%s' : %s" % (self.vcenter, to_native(task_error))) self.module.exit_json(changed=changed, result=str(result))
def state_update_cluster(self): """ Update cluster configuration of existing cluster """ changed, result = False, None cluster_config_spec = vim.cluster.ConfigSpecEx() diff = False # Triggers Reconfigure Task only when there is a change if self.check_ha_config_diff() and not self.ignore_ha: cluster_config_spec.dasConfig = self.configure_ha() diff = True if self.check_drs_config_diff() and not self.ignore_drs: cluster_config_spec.drsConfig = self.configure_drs() diff = True if self.check_vsan_config_diff() and not self.ignore_vsan: cluster_config_spec.vsanConfig = self.configure_vsan() diff = True try: if not self.module.check_mode and diff: task = self.cluster.ReconfigureComputeResource_Task( cluster_config_spec, True) changed, result = wait_for_task(task) self.module.exit_json(changed=changed, result=result) except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) except vmodl.MethodFault as method_fault: self.module.fail_json(msg=to_native(method_fault.msg)) except TaskError as task_e: self.module.fail_json(msg=to_native(task_e)) except Exception as generic_exc: self.module.fail_json(msg="Failed to update cluster" " due to generic exception %s" % to_native(generic_exc))
def configure_vsan(self): """ Manage VSAN configuration """ changed, result = False, None if self.check_vsan_config_diff(): if not self.module.check_mode: cluster_config_spec = vim.cluster.ConfigSpecEx() cluster_config_spec.vsanConfig = vim.vsan.cluster.ConfigInfo() cluster_config_spec.vsanConfig.enabled = self.enable_vsan cluster_config_spec.vsanConfig.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo( ) cluster_config_spec.vsanConfig.defaultConfig.autoClaimStorage = self.params.get( 'vsan_auto_claim_storage') try: task = self.cluster.ReconfigureComputeResource_Task( cluster_config_spec, True) changed, result = wait_for_task(task) except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) except vmodl.MethodFault as method_fault: self.module.fail_json(msg=to_native(method_fault.msg)) except TaskError as task_e: self.module.fail_json(msg=to_native(task_e)) except Exception as generic_exc: self.module.fail_json(msg="Failed to update cluster" " due to generic exception %s" % to_native(generic_exc)) else: changed = True self.module.exit_json(changed=changed, result=result)
def migrate_network_adapter_vds(self): vm_configspec = vim.vm.ConfigSpec() nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo( ) port = vim.dvs.PortConnection() devicespec = vim.vm.device.VirtualDeviceSpec() pg = self.find_dvspg_by_name() if pg is None: self.module.fail_json(msg="The standard portgroup was not found") dvswitch = pg.config.distributedVirtualSwitch port.switchUuid = dvswitch.uuid port.portgroupKey = pg.key nic.port = port for device in self.vm.config.hardware.device: if isinstance(device, vim.vm.device.VirtualEthernetCard): devicespec.device = device devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit devicespec.device.backing = nic vm_configspec.deviceChange.append(devicespec) task = self.vm.ReconfigVM_Task(vm_configspec) changed, result = wait_for_task(task) self.module.exit_json(changed=changed, result=result)
def state_update_evc(self): """ Update EVC Mode """ changed, result = False, None try: if not self.module.check_mode and self.current_evc_mode != self.evc_mode: evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode) changed, result = wait_for_task(evc_task) if self.module.check_mode and self.current_evc_mode != self.evc_mode: changed, result = True, None if self.current_evc_mode == self.evc_mode: self.module.exit_json( changed=changed, msg= "EVC Mode is already set to '%(evc_mode)s' on '%(cluster_name)s'." % self.params) self.module.exit_json( changed=changed, msg= "EVC Mode has been updated to '%(evc_mode)s' on '%(cluster_name)s'." % self.params) except TaskError as invalid_argument: self.module.fail_json(msg="Failed to update EVC mode: %s" % to_native(invalid_argument))
def update_rule_spec(self, rule_obj=None): """ Update DRS rule """ changed = False rule_obj.vm = self.vm_obj_list if (rule_obj.mandatory != self.mandatory): rule_obj.mandatory = self.mandatory if (rule_obj.enabled != self.enabled): rule_obj.enabled = self.enabled rule_spec = vim.cluster.RuleSpec(info=rule_obj, operation='edit') config_spec = vim.cluster.ConfigSpec(rulesSpec=[rule_spec]) try: task = self.cluster_obj.ReconfigureCluster_Task(config_spec, modify=True) changed, result = wait_for_task(task) except vmodl.fault.InvalidRequest as e: result = to_native(e.msg) except Exception as e: result = to_native(e) if changed: rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name) result = self.normalize_rule_spec(rule_obj) return changed, result
def create_rule_spec(self): """ Create DRS rule """ changed = False if self.affinity_rule: rule = vim.cluster.AffinityRuleSpec() else: rule = vim.cluster.AntiAffinityRuleSpec() rule.vm = self.vm_obj_list rule.enabled = self.enabled rule.mandatory = self.mandatory rule.name = self.rule_name rule_spec = vim.cluster.RuleSpec(info=rule, operation='add') config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec]) try: task = self.cluster_obj.ReconfigureEx(config_spec, modify=True) changed, result = wait_for_task(task) except vmodl.fault.InvalidRequest as e: result = to_native(e.msg) except Exception as e: result = to_native(e) if changed: rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name) result = self.normalize_rule_spec(rule_obj) return changed, result
def exit_maintenance(self): try: task = self.host.ExitMaintenanceMode_Task(timeout=15) success, result = wait_for_task(task) except Exception as generic_exc: self.module.fail_json( msg="Failed to exit maintenance mode due to %s" % to_native(generic_exc))
def state_destroy_dvspg(self): changed = True result = None if not self.module.check_mode: task = self.dvs_portgroup.Destroy_Task() changed, result = wait_for_task(task) self.module.exit_json(changed=changed, result=str(result))
def enter_maintenance(self): try: task = self.host.EnterMaintenanceMode_Task(timeout=15) success, result = wait_for_task(task) except Exception as e: self.module.fail_json( msg="Failed to enter maintenance mode." " Ensure that there are no powered on machines on the host. %s" % to_native(e))
def restore_original_state(self): """In case of failure restore, the changes we made.""" for port, state in self.modified_ports.items(): self.set_port_security_promiscuous([port], state) if self.deleted_session is not None: session = self.deleted_session config_version = self.dv_switch.config.configVersion s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec( vspanSession=session, operation="add") c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec( vspanConfigSpec=[s_spec], configVersion=config_version) # Revert the delete task = self.dv_switch.ReconfigureDvs_Task(c_spec) try: wait_for_task(task) except Exception: self.restore_original_state() self.module.fail_json(msg=task.info.error.msg)
def reconnect_host(self, host_object): """Reconnect host to vCenter""" reconnecthost_args = {} reconnecthost_args['reconnectSpec'] = vim.HostSystem.ReconnectSpec() reconnecthost_args['reconnectSpec'].syncState = True if self.esxi_username and self.esxi_password: # Build the connection spec as well and fetch thumbprint if enabled # Useful if you reinstalled a host and it uses a new self-signed certificate reconnecthost_args['cnxSpec'] = self.get_host_connect_spec() try: task = host_object.ReconnectHost_Task(**reconnecthost_args) except vim.fault.InvalidLogin as invalid_login: self.module.fail_json( msg="Cannot authenticate with the host : %s" % to_native(invalid_login)) except vim.fault.InvalidState as invalid_state: self.module.fail_json(msg="The host is not disconnected : %s" % to_native(invalid_state)) except vim.fault.InvalidName as invalid_name: self.module.fail_json(msg="The host name is invalid : %s" % to_native(invalid_name)) except vim.fault.HostConnectFault as connect_fault: self.module.fail_json( msg="An error occurred during reconnect : %s" % to_native(connect_fault)) except vmodl.fault.NotSupported as not_supported: self.module.fail_json( msg="No host can be added to this group : %s" % to_native(not_supported)) except vim.fault.AlreadyBeingManaged as already_managed: self.module.fail_json( msg= "The host is already being managed by another vCenter server : %s" % to_native(already_managed)) except vmodl.fault.NotEnoughLicenses as not_enough_licenses: self.module.fail_json( msg="There are not enough licenses to add this host : %s" % to_native(not_enough_licenses)) except vim.fault.NoHost as no_host: self.module.fail_json(msg="Unable to contact the host : %s" % to_native(no_host)) except vim.fault.NotSupportedHost as host_not_supported: self.module.fail_json( msg= "The host is running a software version that is not supported : %s" % to_native(host_not_supported)) except vim.fault.SSLVerifyFault as ssl_fault: self.module.fail_json( msg="The host certificate could not be authenticated : %s" % to_native(ssl_fault)) try: changed, result = wait_for_task(task) except TaskError as task_error: self.module.fail_json( msg="Failed to reconnect host to vCenter '%s' due to %s" % (self.vcenter, to_native(task_error)))
def delete_mirroring_session(self, key): """Deletes the mirroring session. Parameters ---------- key : str Key of the Session """ session = vim.dvs.VmwareDistributedVirtualSwitch.VspanSession(key=key) config_version = self.dv_switch.config.configVersion s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec( vspanSession=session, operation="remove") c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec( vspanConfigSpec=[s_spec], configVersion=config_version) task = self.dv_switch.ReconfigureDvs_Task(c_spec) try: wait_for_task(task) except Exception: self.restore_original_state() self.module.fail_json(msg=task.info.error.msg)
def deploy(self): facts = {} if self.params['inject_ovf_env']: self.inject_ovf_env() if self.params['power_on']: task = self.entity.PowerOn() if self.params['wait']: wait_for_task(task) if self.params['wait_for_ip_address']: _facts = wait_for_vm_ip(self.content, self.entity) if not _facts: self.module.fail_json( msg='Waiting for IP address timed out') facts.update(_facts) if not facts: facts.update(gather_vm_facts(self.content, self.entity)) return facts
def upgrade_tools(self, vm): result = {'failed': False, 'changed': False, 'msg': ''} # Exit if VMware tools is already up to date if vm.guest.toolsStatus == "toolsOk": result.update( changed=False, msg="VMware tools is already up to date", ) return result # Fail if VM is not powered on elif vm.summary.runtime.powerState != "poweredOn": result.update( failed=True, msg="VM must be powered on to upgrade tools", ) return result # Fail if VMware tools is either not running or not installed elif vm.guest.toolsStatus in ["toolsNotRunning", "toolsNotInstalled"]: result.update( failed=True, msg="VMware tools is either not running or not installed", ) return result # If vmware tools is out of date, check major OS family # Upgrade tools on Linux and Windows guests elif vm.guest.toolsStatus == "toolsOld": try: if vm.guest.guestFamily in ["linuxGuest", "windowsGuest"]: task = vm.UpgradeTools() changed, err_msg = wait_for_task(task) result.update(changed=changed, msg=to_native(err_msg)) else: result.update( msg= 'Guest Operating System is other than Linux and Windows.' ) return result except Exception as exc: result.update( failed=True, msg='Error while upgrading VMware tools %s' % to_native(exc), ) return result else: result.update( failed=True, msg="VMware tools could not be upgraded", ) return result