Example #1
0
    def delete(self, rule_name=None):
        """
        Function to delete VM-Host DRS rule using name
        """
        changed = False

        if rule_name is None:
            rule_name = self.__rule_name

        rule_obj = self.__get_rule_key_by_name(rule_name=rule_name)

        if rule_obj is not None:

            rule_key = int(rule_obj.key)
            rule_spec = vim.cluster.RuleSpec(removeKey=rule_key,
                                             operation='remove')
            config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])

            if not self.module.check_mode:

                task = self.__cluster_obj.ReconfigureEx(config_spec,
                                                        modify=True)
                wait_for_task(task)

            self.__changed = True

        if self.__changed:
            self.__msg = "Deleted DRS rule `%s` successfully" % (
                self.__rule_name)
        else:
            self.__msg = "DRS Rule `%s` does not exists or already deleted" % (
                self.__rule_name)
Example #2
0
    def _update_version3_resources(self, resources):
        allocations = list()

        for resource in resources:
            allocation = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource()
            allocation.allocationInfo = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource.ResourceAllocation()
            allocation.key = resource['name']
            if 'limit' in resource:
                allocation.allocationInfo.limit = resource['limit']
            if 'reservation' in resource:
                allocation.allocationInfo.reservation = resource['reservation']
            if 'shares_level' in resource:
                allocation.allocationInfo.shares = vim.SharesInfo()
                allocation.allocationInfo.shares.level = resource['shares_level']
                if 'shares' in resource and resource['shares_level'] == 'custom':
                    allocation.allocationInfo.shares.shares = resource['shares']
                elif resource['shares_level'] == 'custom':
                    self.module.fail_json(
                        msg="Resource %s, shares_level set to custom but shares not specified" % resource['name']
                    )

            allocations.append(allocation)

        spec = vim.DistributedVirtualSwitch.ConfigSpec()
        spec.configVersion = self.dvs.config.configVersion
        spec.infrastructureTrafficResourceConfig = allocations

        task = self.dvs.ReconfigureDvs_Task(spec)
        wait_for_task(task)
Example #3
0
    def inject_ovf_env(self):
        attrib = {
            'xmlns': 'http://schemas.dmtf.org/ovf/environment/1',
            'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
            'xmlns:oe': 'http://schemas.dmtf.org/ovf/environment/1',
            'xmlns:ve': 'http://www.vmware.com/schema/ovfenv',
            'oe:id': '',
            've:esxId': self.entity._moId
        }
        env = ET.Element('Environment', **attrib)

        platform = ET.SubElement(env, 'PlatformSection')
        ET.SubElement(platform, 'Kind').text = self.si.about.name
        ET.SubElement(platform, 'Version').text = self.si.about.version
        ET.SubElement(platform, 'Vendor').text = self.si.about.vendor
        ET.SubElement(platform, 'Locale').text = 'US'

        prop_section = ET.SubElement(env, 'PropertySection')
        for key, value in self.params['properties'].items():
            params = {
                'oe:key': key,
                'oe:value': str(value) if isinstance(value, bool) else value
            }
            ET.SubElement(prop_section, 'Property', **params)

        opt = vim.option.OptionValue()
        opt.key = 'guestinfo.ovfEnv'
        opt.value = '<?xml version="1.0" encoding="UTF-8"?>' + to_native(
            ET.tostring(env))

        config_spec = vim.vm.ConfigSpec()
        config_spec.extraConfig = [opt]

        task = self.entity.ReconfigVM_Task(config_spec)
        wait_for_task(task)
    def __create_vm_group(self):

        # Check if anything has changed when editing
        if self.__operation == 'add' or (self.__operation == 'edit' and
                                         self.__check_if_vms_hosts_changed()):

            group = vim.cluster.VmGroup()

            group.name = self.__group_name
            group.vm = self.__vm_obj_list

            group_spec = vim.cluster.GroupSpec(info=group,
                                               operation=self.__operation)
            config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])

            # Check if dry run
            if not self.module.check_mode:
                task = self.__cluster_obj.ReconfigureEx(config_spec,
                                                        modify=True)
                wait_for_task(task)

            self.__set_result(group)
            self.__changed = True

        if self.__operation == 'edit':
            self.__msg = "Updated vm group %s successfully" % (
                self.__group_name)
        else:
            self.__msg = "Created vm group %s successfully" % (
                self.__group_name)
    def set_port_security_promiscuous(self, ports, state):
        """Set the given port to the given promiscuous state.
        Parameters
        ----------
        port : str[]
            PortKey
        state: bool
            State of the promiscuous mode, if true its allowed, else not.
        """
        # Creating the new port policy
        port_spec = []
        vim_bool = vim.BoolPolicy(value=state)
        port_policy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy(
            allowPromiscuous=vim_bool)
        port_settings = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy(
            securityPolicy=port_policy)
        for port in ports:
            temp_port_spec = vim.dvs.DistributedVirtualPort.ConfigSpec(
                operation="edit", key=port, setting=port_settings)
            port_spec.append(temp_port_spec)

        task = self.dv_switch.ReconfigureDVPort_Task(port_spec)
        try:
            wait_for_task(task)
        except Exception:
            self.restore_original_state()
            self.module.fail_json(msg=task.info.error.msg)
    def delete_drs_group(self):
        """
        Function to delete a DRS host/vm group
        """

        if self.__group_obj is not None:

            self.__changed = True

            # Check if dry run
            if not self.module.check_mode:

                group_spec = vim.cluster.GroupSpec(removeKey=self.__group_name,
                                                   operation=self.__operation)
                config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])

                task = self.__cluster_obj.ReconfigureEx(config_spec,
                                                        modify=True)
                wait_for_task(task)

        # Dont throw error if group does not exist. Simply set changed = False
        if self.__changed:
            self.__msg = "Deleted group `%s` successfully" % (
                self.__group_name)
        else:
            self.__msg = "DRS group `%s` does not exists or already deleted" % (
                self.__group_name)
 def update_dvs_config(self, switch_object, spec):
     """Update DVS config"""
     try:
         task = switch_object.ReconfigureDvs_Task(spec)
         wait_for_task(task)
     except TaskError as invalid_argument:
         self.module.fail_json(msg="Failed to update DVS : %s" %
                               to_native(invalid_argument))
Example #8
0
    def create(self):
        """
        Function to create a host VM-Host DRS rule if rule does not exist
        """
        rule_obj = self.__get_rule_key_by_name(rule_name=self.__rule_name)

        # Check if rule exists
        if rule_obj:

            operation = 'edit'
            rule_changed = self.__check_rule_has_changed(rule_obj)

        else:
            operation = 'add'

        # Check if anything has changed when editing
        if operation == 'add' or (operation == 'edit'
                                  and rule_changed is True):

            rule = vim.cluster.VmHostRuleInfo()

            # Check if already rule exists
            if rule_obj:
                # This need to be set in order to edit a existing rule
                rule.key = rule_obj.key

            rule.enabled = self.__enabled
            rule.mandatory = self.__mandatory
            rule.name = self.__rule_name

            if self.__affinity_rule:
                rule.affineHostGroupName = self.__host_group_name
            else:
                rule.antiAffineHostGroupName = self.__host_group_name

            rule.vmGroupName = self.__vm_group_name

            rule_spec = vim.cluster.RuleSpec(info=rule, operation=operation)
            config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])

            if not self.module.check_mode:

                task = self.__cluster_obj.ReconfigureEx(config_spec,
                                                        modify=True)
                wait_for_task(task)

            self.__changed = True

        rule_obj = self.__get_rule_key_by_name(rule_name=self.__rule_name)
        self.__result = self.__normalize_vm_host_rule_spec(rule_obj)

        if operation == 'edit':
            self.__msg = "Updated DRS rule `%s` successfully" % (
                self.__rule_name)
        else:
            self.__msg = "Created DRS rule `%s` successfully" % (
                self.__rule_name)
Example #9
0
    def set_nioc_version(self):
        upgrade_spec = vim.DistributedVirtualSwitch.ConfigSpec()
        upgrade_spec.configVersion = self.dvs.config.configVersion
        if not self.version:
            self.version = 'version2'
        upgrade_spec.networkResourceControlVersion = self.version

        try:
            task = self.dvs.ReconfigureDvs_Task(spec=upgrade_spec)
            wait_for_task(task)
        except vmodl.RuntimeFault as runtime_fault:
            self.module.fail_json(msg="RuntimeFault when setting NIOC version: %s " % to_native(runtime_fault.msg))
Example #10
0
def set_vnc_extraconfig(content, vm, enabled, ip, port, password):
    result = dict(
        changed=False,
        failed=False,
    )
    # set new values
    key_prefix = "remotedisplay.vnc."
    new_values = dict()
    for key in ['enabled', 'ip', 'port', 'password']:
        new_values[key_prefix + key] = ""
    if enabled:
        new_values[key_prefix + "enabled"] = "true"
        new_values[key_prefix + "password"] = str(password).strip()
        new_values[key_prefix + "ip"] = str(ip).strip()
        new_values[key_prefix + "port"] = str(port).strip()

    # get current vnc config
    current_values = get_vnc_extraconfig(vm)
    # check if any value is changed
    reconfig_vm = False
    for key, val in new_values.items():
        key = key.replace(key_prefix, "")
        current_value = current_values.get(key, "")
        # enabled is not case-sensitive
        if key == "enabled":
            current_value = current_value.lower()
            val = val.lower()
        if current_value != val:
            reconfig_vm = True
    if not reconfig_vm:
        return result
    # reconfigure vm
    spec = vim.vm.ConfigSpec()
    spec.extraConfig = []
    for key, val in new_values.items():
        opt = vim.option.OptionValue()
        opt.key = key
        opt.value = val
        spec.extraConfig.append(opt)
    task = vm.ReconfigVM_Task(spec)
    try:
        wait_for_task(task)
    except TaskError as task_err:
        result['failed'] = True
        result['msg'] = to_native(task_err)

    if task.info.state == 'error':
        result['failed'] = True
        result['msg'] = task.info.error.msg
    else:
        result['changed'] = True
        result['instance'] = gather_vm_facts(content, vm)
    return result
    def take_vm_screenshot(self):
        if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn:
            self.module.fail_json(
                msg="VM is %s, valid power state is poweredOn." %
                self.current_vm_obj.runtime.powerState)
        try:
            task = self.current_vm_obj.CreateScreenshot_Task()
            wait_for_task(task)
        except vim.fault.FileFault as e:
            self.module.fail_json(
                msg=
                "Failed to create screenshot due to errors when creating or accessing one or more"
                " files needed for this operation, %s" % to_native(e.msg))
        except vim.fault.InvalidState as e:
            self.module.fail_json(
                msg=
                "Failed to create screenshot due to VM is not ready to respond to such requests,"
                " %s" % to_native(e.msg))
        except vmodl.RuntimeFault as e:
            self.module.fail_json(
                msg="Failed to create screenshot due to runtime fault, %s," %
                to_native(e.msg))
        except vim.fault.TaskInProgress as e:
            self.module.fail_json(
                msg="Failed to create screenshot due to VM is busy, %s" %
                to_native(e.msg))

        if task.info.state == 'error':
            return {
                'changed': self.change_detected,
                'failed': True,
                'msg': task.info.error.msg
            }
        else:
            download_file_size = None
            self.change_detected = True
            file_url = self.generate_http_access_url(task.info.result)
            if self.params.get('local_path'):
                if file_url:
                    download_file_size = self.download_screenshot_file(
                        file_url=file_url,
                        local_file_path=self.params['local_path'],
                        file_name=task.info.result.split('/')[-1])
            screenshot_facts = self.get_screenshot_facts(
                task.info, file_url, download_file_size)
            return {
                'changed': self.change_detected,
                'failed': False,
                'screenshot_info': screenshot_facts
            }
 def update_health_check_config(self, switch_object, health_check_config):
     """Update Health Check config"""
     try:
         task = switch_object.UpdateDVSHealthCheckConfig_Task(healthCheckConfig=health_check_config)
     except vim.fault.DvsFault as dvs_fault:
         self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault))
     except vmodl.fault.NotSupported as not_supported:
         self.module.fail_json(msg="Health check not supported on the switch : %s" % to_native(not_supported))
     except TaskError as invalid_argument:
         self.module.fail_json(msg="Failed to configure health check : %s" % to_native(invalid_argument))
     try:
         wait_for_task(task)
     except TaskError as invalid_argument:
         self.module.fail_json(msg="Failed to update health check config : %s" % to_native(invalid_argument))
 def destroy_dvswitch(self):
     """Delete a DVS"""
     changed = True
     results = dict(changed=changed)
     results['dvswitch'] = self.switch_name
     if self.module.check_mode:
         results['result'] = "DVS would be deleted"
     else:
         try:
             task = self.dvs.Destroy_Task()
         except vim.fault.VimFault as vim_fault:
             self.module.fail_json(msg="Failed to deleted DVS : %s" % to_native(vim_fault))
         wait_for_task(task)
         results['result'] = "DVS deleted"
     self.module.exit_json(**results)
    def modify_dvs_host(self, operation):
        changed, result = False, None
        spec = vim.DistributedVirtualSwitch.ConfigSpec()
        spec.configVersion = self.dv_switch.config.configVersion
        spec.host = [vim.dvs.HostMember.ConfigSpec()]
        spec.host[0].operation = operation
        spec.host[0].host = self.host

        if operation in ("edit", "add"):
            spec.host[0].backing = vim.dvs.HostMember.PnicBacking()
            count = 0

            for nic in self.vmnics:
                spec.host[0].backing.pnicSpec.append(
                    vim.dvs.HostMember.PnicSpec())
                spec.host[0].backing.pnicSpec[count].pnicDevice = nic
                spec.host[0].backing.pnicSpec[
                    count].uplinkPortgroupKey = self.uplink_portgroup.key
                count += 1

        try:
            task = self.dv_switch.ReconfigureDvs_Task(spec)
            changed, result = wait_for_task(task)
        except vmodl.fault.NotSupported as not_supported:
            self.module.fail_json(
                msg="Failed to configure DVS host %s as it is not"
                " compatible with the VDS version." % self.esxi_hostname,
                details=to_native(not_supported.msg))
        return changed, result
Example #15
0
 def state_remove_host(self):
     """Remove host from vCenter"""
     changed = True
     result = None
     if self.module.check_mode:
         result = "Host would be removed from vCenter '%s'" % self.vcenter
     else:
         # Check parent type
         parent_type = self.get_parent_type(self.host_update)
         if parent_type == 'cluster':
             self.put_host_in_maintenance_mode(self.host_update)
         try:
             if self.folder_name:
                 task = self.host_parent_compute_resource.Destroy_Task()
             elif self.cluster_name:
                 task = self.host.Destroy_Task()
         except vim.fault.VimFault as vim_fault:
             self.module.fail_json(msg=vim_fault)
         try:
             changed, result = wait_for_task(task)
             result = "Host removed from vCenter '%s'" % self.vcenter
         except TaskError as task_error:
             self.module.fail_json(
                 msg="Failed to remove the host from vCenter '%s' : %s" % (self.vcenter, to_native(task_error))
             )
     self.module.exit_json(changed=changed, result=str(result))
Example #16
0
    def reconnect_host(self, host_object):
        """Reconnect host to vCenter"""
        reconnecthost_args = {}
        reconnecthost_args['reconnectSpec'] = vim.HostSystem.ReconnectSpec()
        reconnecthost_args['reconnectSpec'].syncState = True

        if self.esxi_username and self.esxi_password:
            # Build the connection spec as well and fetch thumbprint if enabled
            # Usefull if you reinstalled a host and it uses a new self-signed certificate
            reconnecthost_args['cnxSpec'] = self.get_host_connect_spec()
        try:
            task = host_object.ReconnectHost_Task(**reconnecthost_args)
        except vim.fault.InvalidLogin as invalid_login:
            self.module.fail_json(
                msg="Cannot authenticate with the host : %s" % to_native(invalid_login)
            )
        except vim.fault.InvalidState as invalid_state:
            self.module.fail_json(
                msg="The host is not disconnected : %s" % to_native(invalid_state)
            )
        except vim.fault.InvalidName as invalid_name:
            self.module.fail_json(
                msg="The host name is invalid : %s" % to_native(invalid_name)
            )
        except vim.fault.HostConnectFault as connect_fault:
            self.module.fail_json(
                msg="An error occurred during reconnect : %s" % to_native(connect_fault)
            )
        except vmodl.fault.NotSupported as not_supported:
            self.module.fail_json(
                msg="No host can be added to this group : %s" % to_native(not_supported)
            )
        except vim.fault.AlreadyBeingManaged as already_managed:
            self.module.fail_json(
                msg="The host is already being managed by another vCenter server : %s" % to_native(already_managed)
            )
        except vmodl.fault.NotEnoughLicenses as not_enough_licenses:
            self.module.fail_json(
                msg="There are not enough licenses to add this host : %s" % to_native(not_enough_licenses)
            )
        except vim.fault.NoHost as no_host:
            self.module.fail_json(
                msg="Unable to contact the host : %s" % to_native(no_host)
            )
        except vim.fault.NotSupportedHost as host_not_supported:
            self.module.fail_json(
                msg="The host is running a software version that is not supported : %s" %
                to_native(host_not_supported)
            )
        except vim.fault.SSLVerifyFault as ssl_fault:
            self.module.fail_json(
                msg="The host certificate could not be authenticated : %s" % to_native(ssl_fault)
            )
        try:
            changed, result = wait_for_task(task)
        except TaskError as task_error:
            self.module.fail_json(
                msg="Failed to reconnect host to vCenter '%s' due to %s" %
                (self.vcenter, to_native(task_error))
            )
    def ExitMaintenanceMode(self):
        if not self.host.runtime.inMaintenanceMode:
            self.module.exit_json(changed=False,
                                  hostsystem=str(self.host),
                                  hostname=self.esxi_hostname,
                                  status='NO_ACTION',
                                  msg='Host %s not in maintenance mode' %
                                  self.esxi_hostname)

        try:
            task = self.host.ExitMaintenanceMode_Task(
                self.module.params['timeout'])

            success, result = wait_for_task(task)

            self.module.exit_json(changed=success,
                                  hostsystem=str(self.host),
                                  hostname=self.esxi_hostname,
                                  status='EXIT',
                                  msg='Host %s exited maintenance mode' %
                                  self.esxi_hostname)
        except TaskError as e:
            self.module.fail_json(
                msg='Host %s failed to exit maintenance mode due to %s' %
                (self.esxi_hostname, to_native(e)))
    def EnterMaintenanceMode(self):
        if self.host.runtime.inMaintenanceMode:
            self.module.exit_json(changed=False,
                                  hostsystem=str(self.host),
                                  hostname=self.esxi_hostname,
                                  status='NO_ACTION',
                                  msg='Host %s already in maintenance mode' %
                                  self.esxi_hostname)

        spec = vim.host.MaintenanceSpec()

        if self.vsan:
            spec.vsanMode = vim.vsan.host.DecommissionMode()
            spec.vsanMode.objectAction = self.vsan

        try:
            task = self.host.EnterMaintenanceMode_Task(
                self.module.params['timeout'], self.module.params['evacuate'],
                spec)

            success, result = wait_for_task(task)

            self.module.exit_json(changed=success,
                                  hostsystem=str(self.host),
                                  hostname=self.esxi_hostname,
                                  status='ENTER',
                                  msg='Host %s entered maintenance mode' %
                                  self.esxi_hostname)

        except TaskError as e:
            self.module.fail_json(
                msg='Host %s failed to enter maintenance mode due to %s' %
                (self.esxi_hostname, to_native(e)))
Example #19
0
    def create_rule_spec(self):
        """
        Create DRS rule
        """
        changed = False
        if self.affinity_rule:
            rule = vim.cluster.AffinityRuleSpec()
        else:
            rule = vim.cluster.AntiAffinityRuleSpec()

        rule.vm = self.vm_obj_list
        rule.enabled = self.enabled
        rule.mandatory = self.mandatory
        rule.name = self.rule_name

        rule_spec = vim.cluster.RuleSpec(info=rule, operation='add')
        config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])

        try:
            task = self.cluster_obj.ReconfigureEx(config_spec, modify=True)
            changed, result = wait_for_task(task)
        except vmodl.fault.InvalidRequest as e:
            result = to_native(e.msg)
        except Exception as e:
            result = to_native(e)

        if changed:
            rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name)
            result = self.normalize_rule_spec(rule_obj)

        return changed, result
Example #20
0
    def update_rule_spec(self, rule_obj=None):
        """
        Update DRS rule
        """
        changed = False

        rule_obj.vm = self.vm_obj_list

        if (rule_obj.mandatory != self.mandatory):
            rule_obj.mandatory = self.mandatory

        if (rule_obj.enabled != self.enabled):
            rule_obj.enabled = self.enabled

        rule_spec = vim.cluster.RuleSpec(info=rule_obj, operation='edit')
        config_spec = vim.cluster.ConfigSpec(rulesSpec=[rule_spec])

        try:
            task = self.cluster_obj.ReconfigureCluster_Task(config_spec,
                                                            modify=True)
            changed, result = wait_for_task(task)
        except vmodl.fault.InvalidRequest as e:
            result = to_native(e.msg)
        except Exception as e:
            result = to_native(e)

        if changed:
            rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name)
            result = self.normalize_rule_spec(rule_obj)

        return changed, result
    def state_update_cluster(self):
        """
        Update cluster configuration of existing cluster
        """
        changed, result = False, None
        cluster_config_spec = vim.cluster.ConfigSpecEx()
        diff = False  # Triggers Reconfigure Task only when there is a change
        if self.check_ha_config_diff() and not self.ignore_ha:
            cluster_config_spec.dasConfig = self.configure_ha()
            diff = True
        if self.check_drs_config_diff() and not self.ignore_drs:
            cluster_config_spec.drsConfig = self.configure_drs()
            diff = True
        if self.check_vsan_config_diff() and not self.ignore_vsan:
            cluster_config_spec.vsanConfig = self.configure_vsan()
            diff = True

        try:
            if not self.module.check_mode and diff:
                task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
                changed, result = wait_for_task(task)
            self.module.exit_json(changed=changed, result=result)
        except vmodl.RuntimeFault as runtime_fault:
            self.module.fail_json(msg=to_native(runtime_fault.msg))
        except vmodl.MethodFault as method_fault:
            self.module.fail_json(msg=to_native(method_fault.msg))
        except TaskError as task_e:
            self.module.fail_json(msg=to_native(task_e))
        except Exception as generic_exc:
            self.module.fail_json(msg="Failed to update cluster"
                                      " due to generic exception %s" % to_native(generic_exc))
    def reconfigure_vm(self, config_spec, device_type):
        """
        Reconfigure virtual machine after modifying device spec
        Args:
            config_spec: Config Spec
            device_type: Type of device being modified

        Returns: Boolean status 'changed' and actual task result

        """
        changed, results = (False, '')
        try:
            # Perform actual VM reconfiguration
            task = self.vm.ReconfigVM_Task(spec=config_spec)
            changed, results = wait_for_task(task)
        except vim.fault.InvalidDeviceSpec as invalid_device_spec:
            self.module.fail_json(
                msg="Failed to manage %s on given virtual machine due to invalid"
                " device spec : %s" %
                (device_type, to_native(invalid_device_spec.msg)),
                details="Please check ESXi server logs for more details.")
        except vim.fault.RestrictedVersion as e:
            self.module.fail_json(
                msg="Failed to reconfigure virtual machine due to"
                " product versioning restrictions: %s" % to_native(e.msg))

        return changed, results
 def state_update_evc(self):
     """
     Update EVC Mode
     """
     changed, result = False, None
     try:
         if not self.module.check_mode and self.current_evc_mode != self.evc_mode:
             evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode)
             changed, result = wait_for_task(evc_task)
         if self.module.check_mode and self.current_evc_mode != self.evc_mode:
             changed, result = True, None
         if self.current_evc_mode == self.evc_mode:
             self.module.exit_json(
                 changed=changed,
                 msg=
                 "EVC Mode is already set to '%(evc_mode)s' on '%(cluster_name)s'."
                 % self.params)
         self.module.exit_json(
             changed=changed,
             msg=
             "EVC Mode has been updated to '%(evc_mode)s' on '%(cluster_name)s'."
             % self.params)
     except TaskError as invalid_argument:
         self.module.fail_json(msg="Failed to update EVC mode: %s" %
                               to_native(invalid_argument))
    def migrate_network_adapter_vds(self):
        vm_configspec = vim.vm.ConfigSpec()
        nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo(
        )
        port = vim.dvs.PortConnection()
        devicespec = vim.vm.device.VirtualDeviceSpec()

        pg = self.find_dvspg_by_name()

        if pg is None:
            self.module.fail_json(msg="The standard portgroup was not found")

        dvswitch = pg.config.distributedVirtualSwitch
        port.switchUuid = dvswitch.uuid
        port.portgroupKey = pg.key
        nic.port = port

        for device in self.vm.config.hardware.device:
            if isinstance(device, vim.vm.device.VirtualEthernetCard):
                devicespec.device = device
                devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
                devicespec.device.backing = nic
                vm_configspec.deviceChange.append(devicespec)

        task = self.vm.ReconfigVM_Task(vm_configspec)
        changed, result = wait_for_task(task)
        self.module.exit_json(changed=changed, result=result)
    def configure_vsan(self):
        """
        Manage VSAN configuration

        """
        changed, result = False, None

        if self.check_vsan_config_diff():
            if not self.module.check_mode:
                cluster_config_spec = vim.cluster.ConfigSpecEx()
                cluster_config_spec.vsanConfig = vim.vsan.cluster.ConfigInfo()
                cluster_config_spec.vsanConfig.enabled = self.enable_vsan
                cluster_config_spec.vsanConfig.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo(
                )
                cluster_config_spec.vsanConfig.defaultConfig.autoClaimStorage = self.params.get(
                    'vsan_auto_claim_storage')
                try:
                    task = self.cluster.ReconfigureComputeResource_Task(
                        cluster_config_spec, True)
                    changed, result = wait_for_task(task)
                except vmodl.RuntimeFault as runtime_fault:
                    self.module.fail_json(msg=to_native(runtime_fault.msg))
                except vmodl.MethodFault as method_fault:
                    self.module.fail_json(msg=to_native(method_fault.msg))
                except TaskError as task_e:
                    self.module.fail_json(msg=to_native(task_e))
                except Exception as generic_exc:
                    self.module.fail_json(msg="Failed to update cluster"
                                          " due to generic exception %s" %
                                          to_native(generic_exc))
            else:
                changed = True

        self.module.exit_json(changed=changed, result=result)
Example #26
0
    def state_destroy_dvspg(self):
        changed = True
        result = None

        if not self.module.check_mode:
            task = self.dvs_portgroup.Destroy_Task()
            changed, result = wait_for_task(task)
        self.module.exit_json(changed=changed, result=str(result))
 def exit_maintenance(self):
     try:
         task = self.host.ExitMaintenanceMode_Task(timeout=15)
         success, result = wait_for_task(task)
     except Exception as generic_exc:
         self.module.fail_json(
             msg="Failed to exit maintenance mode due to %s" %
             to_native(generic_exc))
Example #28
0
    def create_port_group(self):
        config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()

        # Basic config
        config.name = self.module.params['portgroup_name']
        config.numPorts = self.module.params['num_ports']

        # Default port config
        config.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
        if self.module.params['vlan_trunk']:
            config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec()
            vlan_id_list = []
            for vlan_id_splitted in self.module.params['vlan_id'].split(','):
                try:
                    vlan_id_start, vlan_id_end = map(int, vlan_id_splitted.split('-'))
                    if vlan_id_start not in range(0, 4095) or vlan_id_end not in range(0, 4095):
                        self.module.fail_json(msg="vlan_id range %s specified is incorrect. The valid vlan_id range is from 0 to 4094." % vlan_id_splitted)
                    vlan_id_list.append(vim.NumericRange(start=vlan_id_start, end=vlan_id_end))
                except ValueError:
                    vlan_id_list.append(vim.NumericRange(start=int(vlan_id_splitted.strip()), end=int(vlan_id_splitted.strip())))
            config.defaultPortConfig.vlan.vlanId = vlan_id_list
        else:
            config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
            config.defaultPortConfig.vlan.vlanId = int(self.module.params['vlan_id'])
        config.defaultPortConfig.vlan.inherited = False
        config.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
        config.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=self.module.params['network_policy']['promiscuous'])
        config.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=self.module.params['network_policy']['forged_transmits'])
        config.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=self.module.params['network_policy']['mac_changes'])

        # Teaming Policy
        teamingPolicy = vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortTeamingPolicy()
        teamingPolicy.policy = vim.StringPolicy(value=self.module.params['teaming_policy']['load_balance_policy'])
        teamingPolicy.reversePolicy = vim.BoolPolicy(value=self.module.params['teaming_policy']['inbound_policy'])
        teamingPolicy.notifySwitches = vim.BoolPolicy(value=self.module.params['teaming_policy']['notify_switches'])
        teamingPolicy.rollingOrder = vim.BoolPolicy(value=self.module.params['teaming_policy']['rolling_order'])
        config.defaultPortConfig.uplinkTeamingPolicy = teamingPolicy

        # PG policy (advanced_policy)
        config.policy = vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy()
        config.policy.blockOverrideAllowed = self.module.params['port_policy']['block_override']
        config.policy.ipfixOverrideAllowed = self.module.params['port_policy']['ipfix_override']
        config.policy.livePortMovingAllowed = self.module.params['port_policy']['live_port_move']
        config.policy.networkResourcePoolOverrideAllowed = self.module.params['port_policy']['network_rp_override']
        config.policy.portConfigResetAtDisconnect = self.module.params['port_policy']['port_config_reset_at_disconnect']
        config.policy.securityPolicyOverrideAllowed = self.module.params['port_policy']['security_override']
        config.policy.shapingOverrideAllowed = self.module.params['port_policy']['shaping_override']
        config.policy.trafficFilterOverrideAllowed = self.module.params['port_policy']['traffic_filter_override']
        config.policy.uplinkTeamingOverrideAllowed = self.module.params['port_policy']['uplink_teaming_override']
        config.policy.vendorConfigOverrideAllowed = self.module.params['port_policy']['vendor_config_override']
        config.policy.vlanOverrideAllowed = self.module.params['port_policy']['vlan_override']

        # PG Type
        config.type = self.module.params['portgroup_type']

        task = self.dv_switch.AddDVPortgroup_Task([config])
        changed, result = wait_for_task(task)
        return changed, result
    def ensure(self):
        datastore_results = dict()
        change_datastore_list = []
        for datastore in self.datastore_objs:
            changed = False
            if self.state == 'present' and datastore.summary.maintenanceMode != 'normal':
                datastore_results[
                    datastore.
                    name] = "Datastore '%s' is already in maintenance mode." % datastore.name
                break
            elif self.state == 'absent' and datastore.summary.maintenanceMode == 'normal':
                datastore_results[
                    datastore.
                    name] = "Datastore '%s' is not in maintenance mode." % datastore.name
                break

            try:
                if self.state == 'present':
                    storage_replacement_result = datastore.DatastoreEnterMaintenanceMode(
                    )
                    task = storage_replacement_result.task
                else:
                    task = datastore.DatastoreExitMaintenanceMode_Task()

                success, result = wait_for_task(task)

                if success:
                    changed = True
                    if self.state == 'present':
                        datastore_results[
                            datastore.
                            name] = "Datastore '%s' entered in maintenance mode." % datastore.name
                    else:
                        datastore_results[
                            datastore.
                            name] = "Datastore '%s' exited from maintenance mode." % datastore.name
            except vim.fault.InvalidState as invalid_state:
                if self.state == 'present':
                    msg = "Unable to enter datastore '%s' in" % datastore.name
                else:
                    msg = "Unable to exit datastore '%s' from" % datastore.name
                msg += " maintenance mode due to : %s" % to_native(
                    invalid_state.msg)
                self.module.fail_json(msg=msg)
            except Exception as exc:
                if self.state == 'present':
                    msg = "Unable to enter datastore '%s' in" % datastore.name
                else:
                    msg = "Unable to exit datastore '%s' from" % datastore.name
                msg += " maintenance mode due to generic exception : %s" % to_native(
                    exc)
                self.module.fail_json(msg=msg)
            change_datastore_list.append(changed)

        changed = False
        if any(change_datastore_list):
            changed = True
        self.module.exit_json(changed=changed, results=datastore_results)
 def enter_maintenance(self):
     try:
         task = self.host.EnterMaintenanceMode_Task(timeout=15)
         success, result = wait_for_task(task)
     except Exception as e:
         self.module.fail_json(
             msg="Failed to enter maintenance mode."
             " Ensure that there are no powered on machines on the host. %s"
             % to_native(e))