def clone(self): # clone the vm/template on destination VC vm_folder = find_folder_by_name(content=self.destination_content, folder_name=self.params['destination_vm_folder']) vm_name = self.params['destination_vm_name'] task = self.vm_obj.Clone(folder=vm_folder, name=vm_name, spec=self.clone_spec) wait_for_task(task) if task.info.state == 'error': result = {'changed': False, 'failed': True, 'msg': task.info.error.msg} else: vm_info = self.get_new_vm_info(vm_name) result = {'changed': True, 'failed': False, 'vm_info': vm_info} return result
def set_nioc_version(self): upgrade_spec = vim.DistributedVirtualSwitch.ConfigSpec() upgrade_spec.configVersion = self.dvs.config.configVersion if not self.version: self.version = 'version2' upgrade_spec.networkResourceControlVersion = self.version try: task = self.dvs.ReconfigureDvs_Task(spec=upgrade_spec) wait_for_task(task) except vmodl.RuntimeFault as runtime_fault: self.module.fail_json( msg="RuntimeFault when setting NIOC version: %s " % to_native(runtime_fault.msg))
def take_vm_screenshot(self): if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn: self.module.fail_json( msg="VM is %s, valid power state is poweredOn." % self.current_vm_obj.runtime.powerState) try: task = self.current_vm_obj.CreateScreenshot_Task() wait_for_task(task) except vim.fault.FileFault as e: self.module.fail_json( msg= "Failed to create screenshot due to errors when creating or accessing one or more" " files needed for this operation, %s" % to_native(e.msg)) except vim.fault.InvalidState as e: self.module.fail_json( msg= "Failed to create screenshot due to VM is not ready to respond to such requests," " %s" % to_native(e.msg)) except vmodl.RuntimeFault as e: self.module.fail_json( msg="Failed to create screenshot due to runtime fault, %s," % to_native(e.msg)) except vim.fault.TaskInProgress as e: self.module.fail_json( msg="Failed to create screenshot due to VM is busy, %s" % to_native(e.msg)) if task.info.state == 'error': return { 'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg } else: download_file_size = None self.change_detected = True file_url = self.generate_http_access_url(task.info.result) if self.params.get('local_path'): if file_url: download_file_size = self.download_screenshot_file( file_url=file_url, local_file_path=self.params['local_path'], file_name=task.info.result.split('/')[-1]) screenshot_facts = self.get_screenshot_facts( task.info, file_url, download_file_size) return { 'changed': self.change_detected, 'failed': False, 'screenshot_info': screenshot_facts }
def state_update_evc(self): """ Update EVC Mode """ changed, result = False, None try: if not self.module.check_mode and self.current_evc_mode != self.evc_mode: evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode) changed, result = wait_for_task(evc_task) if self.module.check_mode and self.current_evc_mode != self.evc_mode: changed, result = True, None if self.current_evc_mode == self.evc_mode: self.module.exit_json( changed=changed, msg= "EVC Mode is already set to '%(evc_mode)s' on '%(cluster_name)s'." % self.params) self.module.exit_json( changed=changed, msg= "EVC Mode has been updated to '%(evc_mode)s' on '%(cluster_name)s'." % self.params) except TaskError as invalid_argument: self.module.fail_json(msg="Failed to update EVC mode: %s" % to_native(invalid_argument))
def restore_original_state(self): """In case of failure restore, the changes we made.""" for port, state in self.modified_ports.items(): self.set_port_security_promiscuous([port], state) if self.deleted_session is not None: session = self.deleted_session config_version = self.dv_switch.config.configVersion s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec(vspanSession=session, operation="add") c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec(vspanConfigSpec=[s_spec], configVersion=config_version) # Revert the delete task = self.dv_switch.ReconfigureDvs_Task(c_spec) try: wait_for_task(task) except Exception: self.restore_original_state() self.module.fail_json(msg=task.info.error.msg)
def reconfigure_vm(self, config_spec, device_type): """ Reconfigure virtual machine after modifying device spec Args: config_spec: Config Spec device_type: Type of device being modified Returns: Boolean status 'changed' and actual task result """ changed, results = (False, '') try: # Perform actual VM reconfiguration task = self.vm.ReconfigVM_Task(spec=config_spec) changed, results = wait_for_task(task) except vim.fault.InvalidDeviceSpec as invalid_device_spec: self.module.fail_json( msg="Failed to manage %s on given virtual machine due to invalid" " device spec : %s" % (device_type, to_native(invalid_device_spec.msg)), details="Please check ESXi server logs for more details.") except vim.fault.RestrictedVersion as e: self.module.fail_json( msg="Failed to reconfigure virtual machine due to" " product versioning restrictions: %s" % to_native(e.msg)) return changed, results
def create_rule_spec(self): """ Create DRS rule """ changed = False if self.affinity_rule: rule = vim.cluster.AffinityRuleSpec() else: rule = vim.cluster.AntiAffinityRuleSpec() rule.vm = self.vm_obj_list rule.enabled = self.enabled rule.mandatory = self.mandatory rule.name = self.rule_name rule_spec = vim.cluster.RuleSpec(info=rule, operation='add') config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec]) try: task = self.cluster_obj.ReconfigureEx(config_spec, modify=True) changed, result = wait_for_task(task) except vmodl.fault.InvalidRequest as e: result = to_native(e.msg) except Exception as e: result = to_native(e) if changed: rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name) result = self.normalize_rule_spec(rule_obj) return changed, result
def state_update_cluster(self): """ Update cluster configuration of existing cluster """ changed, result = False, None cluster_config_spec = vim.cluster.ConfigSpecEx() diff = False # Triggers Reconfigure Task only when there is a change if self.check_ha_config_diff() and not self.ignore_ha: cluster_config_spec.dasConfig = self.configure_ha() diff = True if self.check_drs_config_diff() and not self.ignore_drs: cluster_config_spec.drsConfig = self.configure_drs() diff = True if self.check_vsan_config_diff() and not self.ignore_vsan: cluster_config_spec.vsanConfig = self.configure_vsan() diff = True try: if not self.module.check_mode and diff: task = self.cluster.ReconfigureComputeResource_Task( cluster_config_spec, True) changed, result = wait_for_task(task) self.module.exit_json(changed=changed, result=result) except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) except vmodl.MethodFault as method_fault: self.module.fail_json(msg=to_native(method_fault.msg)) except TaskError as task_e: self.module.fail_json(msg=to_native(task_e)) except Exception as generic_exc: self.module.fail_json(msg="Failed to update cluster" " due to generic exception %s" % to_native(generic_exc))
def migrate_network_adapter_vds(self): vm_configspec = vim.vm.ConfigSpec() nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo( ) port = vim.dvs.PortConnection() devicespec = vim.vm.device.VirtualDeviceSpec() pg = self.find_dvspg_by_name() if pg is None: self.module.fail_json(msg="The standard portgroup was not found") dvswitch = pg.config.distributedVirtualSwitch port.switchUuid = dvswitch.uuid port.portgroupKey = pg.key nic.port = port for device in self.vm.config.hardware.device: if isinstance(device, vim.vm.device.VirtualEthernetCard): devicespec.device = device devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit devicespec.device.backing = nic vm_configspec.deviceChange.append(devicespec) task = self.vm.ReconfigVM_Task(vm_configspec) changed, result = wait_for_task(task) self.module.exit_json(changed=changed, result=result)
def destroy_dvswitch(self): """Delete a DVS""" changed = True results = dict(changed=changed) results['dvswitch'] = self.switch_name if self.module.check_mode: results['result'] = "DVS would be deleted" else: try: task = self.dvs.Destroy_Task() except vim.fault.VimFault as vim_fault: self.module.fail_json(msg="Failed to deleted DVS : %s" % to_native(vim_fault)) wait_for_task(task) results['result'] = "DVS deleted" self.module.exit_json(**results)
def state_remove_host(self): """Remove host from vCenter""" changed = True result = None if self.module.check_mode: result = "Host would be removed from vCenter '%s'" % self.vcenter else: # Check parent type parent_type = self.get_parent_type(self.host_update) if parent_type == 'cluster': self.put_host_in_maintenance_mode(self.host_update) try: if self.folder_name: task = self.host_parent_compute_resource.Destroy_Task() elif self.cluster_name: task = self.host.Destroy_Task() except vim.fault.VimFault as vim_fault: self.module.fail_json(msg=vim_fault) try: changed, result = wait_for_task(task) result = "Host removed from vCenter '%s'" % self.vcenter except TaskError as task_error: self.module.fail_json( msg="Failed to remove the host from vCenter '%s' : %s" % (self.vcenter, to_native(task_error)) ) self.module.exit_json(changed=changed, result=str(result))
def update_rule_spec(self, rule_obj=None): """ Update DRS rule """ changed = False rule_obj.vm = self.vm_obj_list if (rule_obj.mandatory != self.mandatory): rule_obj.mandatory = self.mandatory if (rule_obj.enabled != self.enabled): rule_obj.enabled = self.enabled rule_spec = vim.cluster.RuleSpec(info=rule_obj, operation='edit') config_spec = vim.cluster.ConfigSpec(rulesSpec=[rule_spec]) try: task = self.cluster_obj.ReconfigureCluster_Task(config_spec, modify=True) changed, result = wait_for_task(task) except vmodl.fault.InvalidRequest as e: result = to_native(e.msg) except Exception as e: result = to_native(e) if changed: rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name) result = self.normalize_rule_spec(rule_obj) return changed, result
def ExitMaintenanceMode(self): if not self.host.runtime.inMaintenanceMode: self.module.exit_json(changed=False, hostsystem=str(self.host), hostname=self.esxi_hostname, status='NO_ACTION', msg='Host %s not in maintenance mode' % self.esxi_hostname) try: task = self.host.ExitMaintenanceMode_Task( self.module.params['timeout']) success, result = wait_for_task(task) self.module.exit_json(changed=success, hostsystem=str(self.host), hostname=self.esxi_hostname, status='EXIT', msg='Host %s exited maintenance mode' % self.esxi_hostname) except TaskError as e: self.module.fail_json( msg='Host %s failed to exit maintenance mode due to %s' % (self.esxi_hostname, to_native(e)))
def reconnect_host(self, host_object): """Reconnect host to vCenter""" reconnecthost_args = {} reconnecthost_args['reconnectSpec'] = vim.HostSystem.ReconnectSpec() reconnecthost_args['reconnectSpec'].syncState = True if self.esxi_username and self.esxi_password: # Build the connection spec as well and fetch thumbprint if enabled # Useful if you reinstalled a host and it uses a new self-signed certificate reconnecthost_args['cnxSpec'] = self.get_host_connect_spec() try: task = host_object.ReconnectHost_Task(**reconnecthost_args) except vim.fault.InvalidLogin as invalid_login: self.module.fail_json( msg="Cannot authenticate with the host : %s" % to_native(invalid_login) ) except vim.fault.InvalidState as invalid_state: self.module.fail_json( msg="The host is not disconnected : %s" % to_native(invalid_state) ) except vim.fault.InvalidName as invalid_name: self.module.fail_json( msg="The host name is invalid : %s" % to_native(invalid_name) ) except vim.fault.HostConnectFault as connect_fault: self.module.fail_json( msg="An error occurred during reconnect : %s" % to_native(connect_fault) ) except vmodl.fault.NotSupported as not_supported: self.module.fail_json( msg="No host can be added to this group : %s" % to_native(not_supported) ) except vim.fault.AlreadyBeingManaged as already_managed: self.module.fail_json( msg="The host is already being managed by another vCenter server : %s" % to_native(already_managed) ) except vmodl.fault.NotEnoughLicenses as not_enough_licenses: self.module.fail_json( msg="There are not enough licenses to add this host : %s" % to_native(not_enough_licenses) ) except vim.fault.NoHost as no_host: self.module.fail_json( msg="Unable to contact the host : %s" % to_native(no_host) ) except vim.fault.NotSupportedHost as host_not_supported: self.module.fail_json( msg="The host is running a software version that is not supported : %s" % to_native(host_not_supported) ) except vim.fault.SSLVerifyFault as ssl_fault: self.module.fail_json( msg="The host certificate could not be authenticated : %s" % to_native(ssl_fault) ) try: changed, result = wait_for_task(task) except TaskError as task_error: self.module.fail_json( msg="Failed to reconnect host to vCenter '%s' due to %s" % (self.vcenter, to_native(task_error)) )
def EnterMaintenanceMode(self): if self.host.runtime.inMaintenanceMode: self.module.exit_json(changed=False, hostsystem=str(self.host), hostname=self.esxi_hostname, status='NO_ACTION', msg='Host %s already in maintenance mode' % self.esxi_hostname) spec = vim.host.MaintenanceSpec() if self.vsan: spec.vsanMode = vim.vsan.host.DecommissionMode() spec.vsanMode.objectAction = self.vsan try: task = self.host.EnterMaintenanceMode_Task( self.module.params['timeout'], self.module.params['evacuate'], spec) success, result = wait_for_task(task) self.module.exit_json(changed=success, hostsystem=str(self.host), hostname=self.esxi_hostname, status='ENTER', msg='Host %s entered maintenance mode' % self.esxi_hostname) except TaskError as e: self.module.fail_json( msg='Host %s failed to enter maintenance mode due to %s' % (self.esxi_hostname, to_native(e)))
def configure_vsan(self): """ Manage VSAN configuration """ changed, result = False, None if self.check_vsan_config_diff(): if not self.module.check_mode: cluster_config_spec = vim.cluster.ConfigSpecEx() cluster_config_spec.vsanConfig = vim.vsan.cluster.ConfigInfo() cluster_config_spec.vsanConfig.enabled = self.enable_vsan cluster_config_spec.vsanConfig.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo( ) cluster_config_spec.vsanConfig.defaultConfig.autoClaimStorage = self.params.get( 'vsan_auto_claim_storage') try: task = self.cluster.ReconfigureComputeResource_Task( cluster_config_spec, True) changed, result = wait_for_task(task) except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) except vmodl.MethodFault as method_fault: self.module.fail_json(msg=to_native(method_fault.msg)) except TaskError as task_e: self.module.fail_json(msg=to_native(task_e)) except Exception as generic_exc: self.module.fail_json(msg="Failed to update cluster" " due to generic exception %s" % to_native(generic_exc)) else: changed = True self.module.exit_json(changed=changed, result=result)
def enter_maintenance(self): try: task = self.host.EnterMaintenanceMode_Task(timeout=15) success, result = wait_for_task(task) except Exception as e: self.module.fail_json(msg="Failed to enter maintenance mode." " Ensure that there are no powered on machines on the host. %s" % to_native(e))
def ensure(self): datastore_results = dict() change_datastore_list = [] for datastore in self.datastore_objs: changed = False if self.state == 'present' and datastore.summary.maintenanceMode != 'normal': datastore_results[ datastore. name] = "Datastore '%s' is already in maintenance mode." % datastore.name break elif self.state == 'absent' and datastore.summary.maintenanceMode == 'normal': datastore_results[ datastore. name] = "Datastore '%s' is not in maintenance mode." % datastore.name break try: if self.state == 'present': storage_replacement_result = datastore.DatastoreEnterMaintenanceMode( ) task = storage_replacement_result.task else: task = datastore.DatastoreExitMaintenanceMode_Task() success, result = wait_for_task(task) if success: changed = True if self.state == 'present': datastore_results[ datastore. name] = "Datastore '%s' entered in maintenance mode." % datastore.name else: datastore_results[ datastore. name] = "Datastore '%s' exited from maintenance mode." % datastore.name except vim.fault.InvalidState as invalid_state: if self.state == 'present': msg = "Unable to enter datastore '%s' in" % datastore.name else: msg = "Unable to exit datastore '%s' from" % datastore.name msg += " maintenance mode due to : %s" % to_native( invalid_state.msg) self.module.fail_json(msg=msg) except Exception as exc: if self.state == 'present': msg = "Unable to enter datastore '%s' in" % datastore.name else: msg = "Unable to exit datastore '%s' from" % datastore.name msg += " maintenance mode due to generic exception : %s" % to_native( exc) self.module.fail_json(msg=msg) change_datastore_list.append(changed) changed = False if any(change_datastore_list): changed = True self.module.exit_json(changed=changed, datastore_status=datastore_results)
def state_destroy_dvspg(self): changed = True result = None if not self.module.check_mode: task = self.dvs_portgroup.Destroy_Task() changed, result = wait_for_task(task) self.module.exit_json(changed=changed, result=str(result))
def update_dvs_config(self, switch_object, spec): """Update DVS config""" try: task = switch_object.ReconfigureDvs_Task(spec) result = wait_for_task(task) except TaskError as invalid_argument: self.module.fail_json(msg="Failed to update DVS : %s" % to_native(invalid_argument)) return result
def delete_mirroring_session(self, key): """Deletes the mirroring session. Parameters ---------- key : str Key of the Session """ session = vim.dvs.VmwareDistributedVirtualSwitch.VspanSession( key=key ) config_version = self.dv_switch.config.configVersion s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec(vspanSession=session, operation="remove") c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec(vspanConfigSpec=[s_spec], configVersion=config_version) task = self.dv_switch.ReconfigureDvs_Task(c_spec) try: wait_for_task(task) except Exception: self.restore_original_state() self.module.fail_json(msg=task.info.error.msg)
def reconfigure_vm_serial_port(self, vm_obj): """ Reconfigure vm with new or modified serial port config spec """ self.get_serial_port_config_spec(vm_obj) try: # configure create tasks first if self.serial_ports: for serial_port in self.serial_ports: # each type of serial port is of config_spec.device = vim.vm.device.VirtualSerialPort() object type # because serial ports differ in the backing types and config_spec.device has to be unique, # we are creating a new spec for every create port configuration spec = vim.vm.ConfigSpec() spec.deviceChange.append(serial_port) task = vm_obj.ReconfigVM_Task(spec=spec) wait_for_task(task) task = vm_obj.ReconfigVM_Task(spec=self.config_spec) wait_for_task(task) except vim.fault.InvalidDatastorePath as e: self.module.fail_json( msg= "Failed to configure serial port on given virtual machine due to invalid path: %s" % to_native(e.msg)) except vim.fault.RestrictedVersion as e: self.module.fail_json( msg= "Failed to reconfigure virtual machine due to product versioning restrictions: %s" % to_native(e.msg)) if task.info.state == 'error': results = { 'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg } else: serial_port_info = get_serial_port_info(vm_obj) results = { 'changed': self.change_applied, 'failed': False, 'serial_port_info': serial_port_info } return results
def update_health_check_config(self, switch_object, health_check_config): """Update Health Check config""" try: task = switch_object.UpdateDVSHealthCheckConfig_Task( healthCheckConfig=health_check_config) except vim.fault.DvsFault as dvs_fault: self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault)) except vmodl.fault.NotSupported as not_supported: self.module.fail_json( msg="Health check not supported on the switch : %s" % to_native(not_supported)) except TaskError as invalid_argument: self.module.fail_json(msg="Failed to configure health check : %s" % to_native(invalid_argument)) try: wait_for_task(task) except TaskError as invalid_argument: self.module.fail_json( msg="Failed to update health check config : %s" % to_native(invalid_argument))
def deploy(self): facts = {} if self.params['inject_ovf_env']: self.inject_ovf_env() if self.params['power_on']: task = self.entity.PowerOn() if self.params['wait']: wait_for_task(task) if self.params['wait_for_ip_address']: _facts = wait_for_vm_ip(self.content, self.entity) if not _facts: self.module.fail_json( msg='Waiting for IP address timed out') facts.update(_facts) if not facts: facts.update(gather_vm_facts(self.content, self.entity)) return facts
def state_remove_rp(self): changed = True result = None resource_pool = self.select_resource_pool(self.host_obj) try: task = self.resource_pool_obj.Destroy() success, result = wait_for_task(task) except Exception: self.module.fail_json( msg="Failed to remove resource pool '%s' '%s'" % (self.resource_pool, resource_pool)) self.module.exit_json(changed=changed, result=str(result))
def put_host_in_maintenance_mode(self, host_object): """Put host in maintenance mode, if not already""" if not host_object.runtime.inMaintenanceMode: try: try: maintenance_mode_task = host_object.EnterMaintenanceMode_Task(300, True, None) except vim.fault.InvalidState as invalid_state: self.module.fail_json( msg="The host is already in maintenance mode : %s" % to_native(invalid_state) ) except vim.fault.Timedout as timed_out: self.module.fail_json( msg="The maintenance mode operation timed out : %s" % to_native(timed_out) ) except vim.fault.Timedout as timed_out: self.module.fail_json( msg="The maintenance mode operation was canceled : %s" % to_native(timed_out) ) wait_for_task(maintenance_mode_task) except TaskError as task_err: self.module.fail_json( msg="Failed to put the host in maintenance mode : %s" % to_native(task_err) )
def set_vsan_service_type(self): """ Set VSAN service type Returns: result of UpdateVsan_Task """ result = None vsan_system = self.esxi_host_obj.configManager.vsanSystem vsan_port_config = vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig() vsan_port_config.device = self.vnic.device vsan_config = vim.vsan.host.ConfigInfo() vsan_config.networkInfo = vim.vsan.host.ConfigInfo.NetworkInfo() vsan_config.networkInfo.port = [vsan_port_config] if not self.module.check_mode: try: vsan_task = vsan_system.UpdateVsan_Task(vsan_config) wait_for_task(vsan_task) except TaskError as task_err: self.module.fail_json( msg="Failed to set service type to vsan for %s : %s" % (self.vnic.device, to_native(task_err))) return result
def state_disable_evc(self): """ Disable EVC Mode """ changed, result = False, None try: if not self.module.check_mode: evc_task = self.evcm.DisableEvcMode_Task() changed, result = wait_for_task(evc_task) if self.module.check_mode: changed, result = True, None self.module.exit_json( changed=changed, msg="EVC Mode has been disabled on cluster '%s'." % self.cluster_name) except TaskError as invalid_argument: self.module.fail_json(msg="Failed to disable EVC mode: %s" % to_native(invalid_argument))
def update_lacp_group_config(self, switch_object, lacp_group_spec): """Update LACP group config""" try: task = switch_object.UpdateDVSLacpGroupConfig_Task( lacpGroupSpec=lacp_group_spec) result = wait_for_task(task) except vim.fault.DvsFault as dvs_fault: self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault)) except vmodl.fault.NotSupported as not_supported: self.module.fail_json( msg= "Multiple Link Aggregation Control Protocol groups not supported on the switch : %s" % to_native(not_supported)) except TaskError as invalid_argument: self.module.fail_json( msg="Failed to update Link Aggregation Group : %s" % to_native(invalid_argument)) return result
def state_enable_evc(self): """ Enable EVC Mode """ changed, result = False, None try: if not self.module.check_mode: evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode) changed, result = wait_for_task(evc_task) if self.module.check_mode: changed, result = True, None self.module.exit_json( changed=changed, msg= "EVC Mode for '%(evc_mode)s' has been enabled on '%(cluster_name)s'." % self.params) except TaskError as invalid_argument: self.module.fail_json(msg="Failed to enable EVC mode: %s" % to_native(invalid_argument))