Esempio n. 1
0
 def put_host_in_maintenance_mode(self, host_object):
     """Put host in maintenance mode, if not already"""
     if not host_object.runtime.inMaintenanceMode:
         try:
             try:
                 maintenance_mode_task = host_object.EnterMaintenanceMode_Task(
                     300, True, None)
             except vim.fault.InvalidState as invalid_state:
                 self.module.fail_json(
                     msg="The host is already in maintenance mode : %s" %
                     to_native(invalid_state))
             except vim.fault.Timedout as timed_out:
                 self.module.fail_json(
                     msg="The maintenance mode operation timed out : %s" %
                     to_native(timed_out))
             except vim.fault.Timedout as timed_out:
                 self.module.fail_json(
                     msg="The maintenance mode operation was canceled : %s"
                     % to_native(timed_out))
             wait_for_task(maintenance_mode_task)
         except TaskError as task_err:
             self.module.fail_json(
                 msg="Failed to put the host in maintenance mode : %s" %
                 to_native(task_err))
Esempio n. 2
0
    def set_vsan_service_type(self):
        """
        Set VSAN service type
        Returns: result of UpdateVsan_Task

        """
        result = None
        vsan_system = self.esxi_host_obj.configManager.vsanSystem

        vsan_port_config = vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig()
        vsan_port_config.device = self.vnic.device

        vsan_config = vim.vsan.host.ConfigInfo()
        vsan_config.networkInfo = vim.vsan.host.ConfigInfo.NetworkInfo()
        vsan_config.networkInfo.port = [vsan_port_config]
        if not self.module.check_mode:
            try:
                vsan_task = vsan_system.UpdateVsan_Task(vsan_config)
                wait_for_task(vsan_task)
            except TaskError as task_err:
                self.module.fail_json(
                    msg="Failed to set service type to vsan for %s : %s" % (self.vnic.device, to_native(task_err))
                )
        return result
Esempio n. 3
0
    def create_vspan_session(self):
        """Builds up the session, adds the parameters that we specified, then creates it on the vSwitch"""

        session = vim.dvs.VmwareDistributedVirtualSwitch.VspanSession(
            name=self.name,
            enabled=True
        )
        if self.session_type is not None:
            session.sessionType = self.session_type
            if self.session_type == 'encapsulatedRemoteMirrorSource':
                self.check_source_port_received(session)
                self.check_source_port_transmitted(session)
                self.check_destination_port(session)
            if self.session_type == 'remoteMirrorSource':
                self.check_source_port_received(session)
                self.check_source_port_transmitted(session)
                self.check_destination_port(session)
            if self.session_type == 'remoteMirrorDest':
                self.check_source_port_received(session)
                self.check_destination_port(session)
            if self.session_type == 'dvPortMirror':
                self.check_source_port_received(session)
                self.check_source_port_transmitted(session)
                self.check_destination_port(session)

        self.check_self_properties(session)

        config_version = self.dv_switch.config.configVersion
        s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec(vspanSession=session, operation="add")
        c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec(vspanConfigSpec=[s_spec], configVersion=config_version)
        task = self.dv_switch.ReconfigureDvs_Task(c_spec)
        try:
            wait_for_task(task)
        except Exception:
            self.restore_original_state()
            self.module.fail_json(msg=task.info.error.msg)
    def configure_vsan(self):
        """
        Manage VSAN configuration

        """
        changed, result = False, None

        if self.check_vsan_config_diff():
            if not self.module.check_mode:
                vSanSpec = vim.vsan.ReconfigSpec(
                    modify=True,
                )
                vSanSpec.vsanClusterConfig = vim.vsan.cluster.ConfigInfo(
                    enabled=self.enable_vsan
                )
                vSanSpec.vsanClusterConfig.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo(
                    autoClaimStorage=self.params.get('vsan_auto_claim_storage')
                )
                if self.advanced_options is not None:
                    vSanSpec.extendedConfig = vim.vsan.VsanExtendedConfig()
                    if self.advanced_options['automatic_rebalance'] is not None:
                        vSanSpec.extendedConfig.proactiveRebalanceInfo = vim.vsan.ProactiveRebalanceInfo(
                            enabled=self.advanced_options['automatic_rebalance']
                        )
                    if self.advanced_options['disable_site_read_locality'] is not None:
                        vSanSpec.extendedConfig.disableSiteReadLocality = self.advanced_options['disable_site_read_locality']
                    if self.advanced_options['large_cluster_support'] is not None:
                        vSanSpec.extendedConfig.largeScaleClusterSupport = self.advanced_options['large_cluster_support']
                    if self.advanced_options['object_repair_timer'] is not None:
                        vSanSpec.extendedConfig.objectRepairTimer = self.advanced_options['object_repair_timer']
                    if self.advanced_options['thin_swap'] is not None:
                        vSanSpec.extendedConfig.enableCustomizedSwapObject = self.advanced_options['thin_swap']
                try:
                    task = self.vsanClusterConfigSystem.VsanClusterReconfig(self.cluster, vSanSpec)
                    changed, result = wait_for_task(vim.Task(task._moId, self.si._stub))
                except vmodl.RuntimeFault as runtime_fault:
                    self.module.fail_json(msg=to_native(runtime_fault.msg))
                except vmodl.MethodFault as method_fault:
                    self.module.fail_json(msg=to_native(method_fault.msg))
                except TaskError as task_e:
                    self.module.fail_json(msg=to_native(task_e))
                except Exception as generic_exc:
                    self.module.fail_json(msg="Failed to update cluster"
                                              " due to generic exception %s" % to_native(generic_exc))
            else:
                changed = True

        self.module.exit_json(changed=changed, result=result)
Esempio n. 5
0
    def ensure(self):
        datastore_results = dict()
        change_datastore_list = []
        for datastore in self.datastore_objs:
            changed = False
            if self.state == 'present' and datastore.summary.maintenanceMode != 'normal':
                datastore_results[datastore.name] = "Datastore '%s' is already in maintenance mode." % datastore.name
                break
            if self.state == 'absent' and datastore.summary.maintenanceMode == 'normal':
                datastore_results[datastore.name] = "Datastore '%s' is not in maintenance mode." % datastore.name
                break

            try:
                if self.state == 'present':
                    storage_replacement_result = datastore.DatastoreEnterMaintenanceMode()
                    task = storage_replacement_result.task
                else:
                    task = datastore.DatastoreExitMaintenanceMode_Task()

                success, result = wait_for_task(task)

                if success:
                    changed = True
                    if self.state == 'present':
                        datastore_results[datastore.name] = "Datastore '%s' entered in maintenance mode." % datastore.name
                    else:
                        datastore_results[datastore.name] = "Datastore '%s' exited from maintenance mode." % datastore.name
            except vim.fault.InvalidState as invalid_state:
                if self.state == 'present':
                    msg = "Unable to enter datastore '%s' in" % datastore.name
                else:
                    msg = "Unable to exit datastore '%s' from" % datastore.name
                msg += " maintenance mode due to : %s" % to_native(invalid_state.msg)
                self.module.fail_json(msg=msg)
            except Exception as exc:
                if self.state == 'present':
                    msg = "Unable to enter datastore '%s' in" % datastore.name
                else:
                    msg = "Unable to exit datastore '%s' from" % datastore.name
                msg += " maintenance mode due to generic exception : %s" % to_native(exc)
                self.module.fail_json(msg=msg)
            change_datastore_list.append(changed)

        changed = False
        if any(change_datastore_list):
            changed = True
        self.module.exit_json(changed=changed, datastore_status=datastore_results)
Esempio n. 6
0
    def set_vsan_service_type(self, enable_vsan):
        """
        Set VSAN service type
        Returns: result of UpdateVsan_Task

        """
        result = None
        vsan_system = self.esxi_host_obj.configManager.vsanSystem

        vsan_system_config = vsan_system.config
        vsan_config = vim.vsan.host.ConfigInfo()

        vsan_config.networkInfo = vsan_system_config.networkInfo
        current_vsan_vnics = [portConfig.device for portConfig in vsan_system_config.networkInfo.port]
        changed = False
        result = "%s NIC %s (currently enabled NICs: %s) : " % ("Enable" if enable_vsan else "Disable", self.vnic.device, current_vsan_vnics)
        if not enable_vsan:
            if self.vnic.device in current_vsan_vnics:
                vsan_config.networkInfo.port = list(filter(lambda portConfig: portConfig.device != self.vnic.device, vsan_config.networkInfo.port))
                changed = True
        else:
            if self.vnic.device not in current_vsan_vnics:
                vsan_port_config = vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig()
                vsan_port_config.device = self.vnic.device

                if vsan_config.networkInfo is None:
                    vsan_config.networkInfo = vim.vsan.host.ConfigInfo.NetworkInfo()
                    vsan_config.networkInfo.port = [vsan_port_config]
                else:
                    vsan_config.networkInfo.port.append(vsan_port_config)
                changed = True

        if not self.module.check_mode and changed:
            try:
                vsan_task = vsan_system.UpdateVsan_Task(vsan_config)
                task_result = wait_for_task(vsan_task)
                if task_result[0]:
                    result += "Success"
                else:
                    result += "Failed"
            except TaskError as task_err:
                self.module.fail_json(
                    msg="Failed to set service type to vsan for %s : %s" % (self.vnic.device, to_native(task_err))
                )
        if self.module.check_mode:
            result += "Dry-run"
        return result
Esempio n. 7
0
 def destroy_datacenter(self):
     results = dict(changed=False)
     try:
         if self.datacenter_obj and not self.module.check_mode:
             task = self.datacenter_obj.Destroy_Task()
             changed, result = wait_for_task(task)
             results['changed'] = changed
             results['result'] = result
         self.module.exit_json(**results)
     except (vim.fault.VimFault, vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
         self.module.fail_json(msg="Failed to delete a datacenter"
                                   " '%s' due to : %s" % (self.datacenter_name,
                                                          to_native(runtime_fault.msg)))
     except Exception as generic_exc:
         self.module.fail_json(msg="Failed to delete a datacenter"
                                   " '%s' due to generic error: %s" % (self.datacenter_name,
                                                                       to_native(generic_exc)))
Esempio n. 8
0
 def update_lacp_group_config(self, switch_object, lacp_group_spec):
     """Update LACP group config"""
     try:
         task = switch_object.UpdateDVSLacpGroupConfig_Task(lacpGroupSpec=lacp_group_spec)
         result = wait_for_task(task)
     except vim.fault.DvsFault as dvs_fault:
         self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault))
     except vmodl.fault.NotSupported as not_supported:
         self.module.fail_json(
             msg="Multiple Link Aggregation Control Protocol groups not supported on the switch : %s" %
             to_native(not_supported)
         )
     except TaskError as invalid_argument:
         self.module.fail_json(
             msg="Failed to update Link Aggregation Group : %s" % to_native(invalid_argument)
         )
     return result
    def state_remove_rp(self):
        changed = True
        result = None
        if self.module.check_mode:
            self.module.exit_json(changed=changed)

        resource_pool_config = self.generate_rp_config_return_value(True)
        try:
            task = self.resource_pool_obj.Destroy()
            success, result = wait_for_task(task)

        except Exception:
            self.module.fail_json(
                msg="Failed to remove resource pool '%s' '%s'" %
                (self.resource_pool, self.resource_pool))
        self.module.exit_json(changed=changed,
                              resource_pool_config=resource_pool_config)
Esempio n. 10
0
 def state_disable_evc(self):
     """
     Disable EVC Mode
     """
     changed, result = False, None
     try:
         if not self.module.check_mode:
             evc_task = self.evcm.DisableEvcMode_Task()
             changed, result = wait_for_task(evc_task)
         if self.module.check_mode:
             changed = True
         self.module.exit_json(
             changed=changed,
             msg="EVC Mode has been disabled on cluster '%s'." %
             self.cluster_name)
     except TaskError as invalid_argument:
         self.module.fail_json(msg="Failed to disable EVC mode: %s" %
                               to_native(invalid_argument))
Esempio n. 11
0
    def create_rule_spec(self):
        """
        Create DRS rule
        """
        changed = False
        result = None
        if self.affinity_rule:
            rule = vim.cluster.AffinityRuleSpec()
        else:
            rule = vim.cluster.AntiAffinityRuleSpec()

        rule.vm = self.vm_obj_list
        rule.enabled = self.enabled
        rule.mandatory = self.mandatory
        rule.name = self.rule_name

        rule_spec = vim.cluster.RuleSpec(info=rule, operation='add')
        config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])

        try:
            if not self.module.check_mode:
                task = self.cluster_obj.ReconfigureEx(config_spec, modify=True)
                changed, result = wait_for_task(task)
        except vmodl.fault.InvalidRequest as e:
            result = to_native(e.msg)
        except Exception as e:
            result = to_native(e)

        if changed:
            rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name)
            result = self.normalize_rule_spec(rule_obj)

        if self.module.check_mode:
            changed = True
            result = dict(
                rule_key='',
                rule_enabled=rule.enabled,
                rule_name=self.rule_name,
                rule_mandatory=rule.mandatory,
                rule_uuid='',
                rule_vms=[vm.name for vm in rule.vm],
                rule_affinity=self.affinity_rule,
            )
        return changed, result
Esempio n. 12
0
 def state_enable_evc(self):
     """
     Enable EVC Mode
     """
     changed, result = False, None
     try:
         if not self.module.check_mode:
             evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode)
             changed, result = wait_for_task(evc_task)
         if self.module.check_mode:
             changed = True
         self.module.exit_json(
             changed=changed,
             msg=
             "EVC Mode for '%(evc_mode)s' has been enabled on '%(cluster_name)s'."
             % self.params)
     except TaskError as invalid_argument:
         self.module.fail_json(msg="Failed to enable EVC mode: %s" %
                               to_native(invalid_argument))
Esempio n. 13
0
    def state_destroy_cluster(self):
        """
        Destroy cluster
        """
        changed, result = True, None

        try:
            if not self.module.check_mode:
                task = self.cluster.Destroy_Task()
                changed, result = wait_for_task(task)
            self.module.exit_json(changed=changed, result=result)
        except vim.fault.VimFault as vim_fault:
            self.module.fail_json(msg=to_native(vim_fault.msg))
        except vmodl.RuntimeFault as runtime_fault:
            self.module.fail_json(msg=to_native(runtime_fault.msg))
        except vmodl.MethodFault as method_fault:
            self.module.fail_json(msg=to_native(method_fault.msg))
        except Exception as generic_exc:
            self.module.fail_json(msg="Failed to destroy cluster"
                                      " due to generic exception %s" % to_native(generic_exc))
def create_vsan_cluster(host_system, new_cluster_uuid):
    host_config_manager = host_system.configManager
    vsan_system = host_config_manager.vsanSystem

    vsan_config = vim.vsan.host.ConfigInfo()
    vsan_config.enabled = True

    if new_cluster_uuid is not None:
        vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
        vsan_config.clusterInfo.uuid = new_cluster_uuid

    vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
    vsan_config.storageInfo.autoClaimStorage = True

    task = vsan_system.UpdateVsan_Task(vsan_config)
    changed, result = wait_for_task(task)

    host_status = vsan_system.QueryHostStatus()
    cluster_uuid = host_status.uuid

    return changed, result, cluster_uuid
Esempio n. 15
0
    def configure_drs(self):
        """
        Manage DRS configuration

        """
        changed, result = False, None

        if self.check_drs_config_diff():
            if not self.module.check_mode:
                cluster_config_spec = vim.cluster.ConfigSpecEx()
                cluster_config_spec.drsConfig = vim.cluster.DrsConfigInfo()
                cluster_config_spec.drsConfig.enabled = self.enable_drs
                cluster_config_spec.drsConfig.enableVmBehaviorOverrides = self.params.get(
                    'drs_enable_vm_behavior_overrides')
                cluster_config_spec.drsConfig.defaultVmBehavior = self.params.get(
                    'drs_default_vm_behavior')
                cluster_config_spec.drsConfig.vmotionRate = self.params.get(
                    'drs_vmotion_rate')

                if self.changed_advanced_settings:
                    cluster_config_spec.drsConfig.option = self.changed_advanced_settings

                try:
                    task = self.cluster.ReconfigureComputeResource_Task(
                        cluster_config_spec, True)
                    changed, result = wait_for_task(task)
                except vmodl.RuntimeFault as runtime_fault:
                    self.module.fail_json(msg=to_native(runtime_fault.msg))
                except vmodl.MethodFault as method_fault:
                    self.module.fail_json(msg=to_native(method_fault.msg))
                except TaskError as task_e:
                    self.module.fail_json(msg=to_native(task_e))
                except Exception as generic_exc:
                    self.module.fail_json(msg="Failed to update cluster"
                                          " due to generic exception %s" %
                                          to_native(generic_exc))
            else:
                changed = True

        self.module.exit_json(changed=changed, result=result)
    def delete(self, rule_name=None):
        """
        Delete DRS rule using name
        """
        changed = False
        if rule_name is None:
            rule_name = self.rule_name

        rule = self.get_rule_key_by_name(rule_name=rule_name)
        if rule is not None:
            rule_key = int(rule.key)
            rule_spec = vim.cluster.RuleSpec(removeKey=rule_key, operation='remove')
            config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
            try:
                task = self.cluster_obj.ReconfigureEx(config_spec, modify=True)
                changed, result = wait_for_task(task)
            except vmodl.fault.InvalidRequest as e:
                result = to_native(e.msg)
            except Exception as e:
                result = to_native(e)
        else:
            result = 'No rule named %s exists' % self.rule_name
        return changed, result
    def EnterMaintenanceMode(self):
        if self.host.runtime.inMaintenanceMode:
            self.module.exit_json(changed=False,
                                  hostsystem=str(self.host),
                                  hostname=self.esxi_hostname,
                                  status='NO_ACTION',
                                  msg='Host %s already in maintenance mode' %
                                  self.esxi_hostname)

        spec = vim.host.MaintenanceSpec()

        if self.vsan:
            spec.vsanMode = vim.vsan.host.DecommissionMode()
            spec.vsanMode.objectAction = self.vsan

        try:
            if not self.module.check_mode:
                task = self.host.EnterMaintenanceMode_Task(
                    self.module.params['timeout'],
                    self.module.params['evacuate'], spec)

                success, result = wait_for_task(task)
            else:
                success = True

            self.module.exit_json(changed=success,
                                  hostsystem=str(self.host),
                                  hostname=self.esxi_hostname,
                                  status='ENTER',
                                  msg='Host %s entered maintenance mode' %
                                  self.esxi_hostname)

        except TaskError as e:
            self.module.fail_json(
                msg='Host %s failed to enter maintenance mode due to %s' %
                (self.esxi_hostname, to_native(e)))
Esempio n. 18
0
    def reconfigure_vm(self, config_spec, device_type):
        """
        Reconfigure virtual machine after modifying device spec
        Args:
            config_spec: Config Spec
            device_type: Type of device being modified

        Returns: Boolean status 'changed' and actual task result

        """
        changed, results = (False, '')
        try:
            # Perform actual VM reconfiguration
            task = self.vm.ReconfigVM_Task(spec=config_spec)
            changed, results = wait_for_task(task)
        except vim.fault.InvalidDeviceSpec as invalid_device_spec:
            self.module.fail_json(msg="Failed to manage %s on given virtual machine due to invalid"
                                      " device spec : %s" % (device_type, to_native(invalid_device_spec.msg)),
                                  details="Please check ESXi server logs for more details.")
        except vim.fault.RestrictedVersion as e:
            self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
                                      " product versioning restrictions: %s" % to_native(e.msg))

        return changed, results
    def modify_dvs_host(self, operation):
        changed, result = False, None
        spec = vim.DistributedVirtualSwitch.ConfigSpec()
        spec.configVersion = self.dv_switch.config.configVersion
        spec.host = [vim.dvs.HostMember.ConfigSpec()]
        spec.host[0].operation = operation
        spec.host[0].host = self.host
        if self.vendor_specific_config:
            config = list()
            for item in self.vendor_specific_config:
                config.append(
                    vim.dvs.KeyedOpaqueBlob(key=item['key'],
                                            opaqueData=item['value']))
            spec.host[0].vendorSpecificConfig = config

        if operation in ("edit", "add"):
            spec.host[0].backing = vim.dvs.HostMember.PnicBacking()
            count = 0

            for nic in self.vmnics:
                spec.host[0].backing.pnicSpec.append(
                    vim.dvs.HostMember.PnicSpec())
                spec.host[0].backing.pnicSpec[count].pnicDevice = nic
                spec.host[0].backing.pnicSpec[
                    count].uplinkPortgroupKey = self.uplink_portgroup.key
                count += 1

        try:
            task = self.dv_switch.ReconfigureDvs_Task(spec)
            changed, result = wait_for_task(task)
        except vmodl.fault.NotSupported as not_supported:
            self.module.fail_json(
                msg="Failed to configure DVS host %s as it is not"
                " compatible with the VDS version." % self.esxi_hostname,
                details=to_native(not_supported.msg))
        return changed, result
Esempio n. 20
0
    def configure_disk_controllers(self):
        """
        Do disk controller management, add or remove

        Return: Operation result
        """
        if self.params['gather_disk_controller_facts']:
            results = {
                'changed': False,
                'failed': False,
                'disk_controller_data': self.gather_disk_controller_facts()
            }
            return results

        controller_config = self.sanitize_disk_controller_config()
        for disk_ctl_config in controller_config:
            if disk_ctl_config and disk_ctl_config['state'] == 'present':
                # create new USB controller, bus number is 0
                if disk_ctl_config[
                        'type'] in self.device_helper.usb_device_type.keys():
                    usb_exists, has_disks_attached = self.check_ctl_disk_exist(
                        disk_ctl_config['type'])
                    if usb_exists:
                        self.module.warn(
                            "'%s' USB controller already exists, can not add more."
                            % disk_ctl_config['type'])
                    else:
                        disk_controller_new = self.create_controller(
                            disk_ctl_config['type'],
                            disk_ctl_config.get('bus_sharing'))
                        self.config_spec.deviceChange.append(
                            disk_controller_new)
                        self.change_detected = True
                # create other disk controller
                else:
                    if disk_ctl_config.get('controller_number') is not None:
                        disk_controller_new = self.create_controller(
                            disk_ctl_config['type'],
                            disk_ctl_config.get('bus_sharing'),
                            disk_ctl_config.get('controller_number'))
                        self.config_spec.deviceChange.append(
                            disk_controller_new)
                        self.change_detected = True
                    else:
                        if disk_ctl_config[
                                'type'] in self.device_helper.scsi_device_type.keys(
                                ):
                            self.module.warn(
                                "Already 4 SCSI controllers, can not add new '%s' controller."
                                % disk_ctl_config['type'])
                        else:
                            self.module.warn(
                                "Already 4 '%s' controllers, can not add new one."
                                % disk_ctl_config['type'])
            elif disk_ctl_config and disk_ctl_config['state'] == 'absent':
                existing_ctl, has_disks_attached = self.check_ctl_disk_exist(
                    disk_ctl_config['type'],
                    disk_ctl_config.get('controller_number'))
                if existing_ctl is not None:
                    if not has_disks_attached:
                        ctl_spec = vim.vm.device.VirtualDeviceSpec()
                        ctl_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
                        ctl_spec.device = existing_ctl
                        self.config_spec.deviceChange.append(ctl_spec)
                        self.change_detected = True
                    else:
                        self.module.warn(
                            "Can not remove specified controller, type '%s', bus number '%s',"
                            " there are disks attaching to it." %
                            (disk_ctl_config['type'],
                             disk_ctl_config.get('controller_number')))
                else:
                    self.module.warn(
                        "Can not find specified controller to remove, type '%s', bus number '%s'."
                        % (disk_ctl_config['type'],
                           disk_ctl_config.get('controller_number')))

        try:
            task = self.current_vm_obj.ReconfigVM_Task(spec=self.config_spec)
            wait_for_task(task)
        except vim.fault.InvalidDeviceSpec as e:
            self.module.fail_json(
                msg=
                "Failed to configure controller on given virtual machine due to invalid"
                " device spec : %s" % to_native(e.msg),
                details="Please check ESXi server logs for more details.")
        except vim.fault.RestrictedVersion as e:
            self.module.fail_json(
                msg="Failed to reconfigure virtual machine due to"
                " product versioning restrictions: %s" % to_native(e.msg))
        except TaskError as task_e:
            self.module.fail_json(msg=to_native(task_e))

        if task.info.state == 'error':
            results = {
                'changed': self.change_detected,
                'failed': True,
                'msg': task.info.error.msg
            }
        else:
            if self.change_detected:
                time.sleep(self.sleep_time)
            results = {
                'changed': self.change_detected,
                'failed': False,
                'disk_controller_data': self.gather_disk_controller_facts()
            }

        return results
    def ensure(self):
        """Manage uplink portgroup"""
        changed = changed_uplink_pg_policy = changed_vlan_trunk_range = changed_lacp = False
        results = dict(changed=changed)
        results['dvswitch'] = self.switch_name
        changed_list = []

        uplink_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
        # Use the same version in the new spec; The version will be increased by one by the API automatically
        uplink_pg_spec.configVersion = self.dvs.config.uplinkPortgroup[0].config.configVersion
        uplink_pg_config = self.dvs.config.uplinkPortgroup[0].config

        # Check name
        if self.uplink_pg_name:
            results['name'] = self.uplink_pg_name
            if uplink_pg_config.name != self.uplink_pg_name:
                changed = True
                changed_list.append("name")
                results['name_previous'] = uplink_pg_config.name
                uplink_pg_spec.name = self.uplink_pg_name
        else:
            results['name'] = uplink_pg_config.name

        # Check description
        results['description'] = self.uplink_pg_description
        if uplink_pg_config.description != self.uplink_pg_description:
            changed = True
            changed_list.append("description")
            results['description_previous'] = uplink_pg_config.description
            uplink_pg_spec.description = self.uplink_pg_description

        # Check port policies
        results['adv_reset_at_disconnect'] = self.uplink_pg_reset
        results['adv_block_ports'] = self.uplink_pg_block_ports
        results['adv_vendor_conf'] = self.uplink_pg_vendor_conf
        results['adv_vlan'] = self.uplink_pg_vlan
        results['adv_netflow'] = self.uplink_pg_netflow
        results['adv_traffic_filtering'] = self.uplink_pg_tf
        uplink_pg_policy_spec = vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy()
        uplink_pg_policy_spec.portConfigResetAtDisconnect = self.uplink_pg_reset
        uplink_pg_policy_spec.blockOverrideAllowed = self.uplink_pg_block_ports
        uplink_pg_policy_spec.vendorConfigOverrideAllowed = self.uplink_pg_vendor_conf
        uplink_pg_policy_spec.vlanOverrideAllowed = self.uplink_pg_vlan
        uplink_pg_policy_spec.ipfixOverrideAllowed = self.uplink_pg_netflow
        uplink_pg_policy_spec.trafficFilterOverrideAllowed = self.uplink_pg_tf
        # There's no information available if the following option are deprecated, but
        # they aren't visible in the vSphere Client
        uplink_pg_policy_spec.shapingOverrideAllowed = False
        uplink_pg_policy_spec.livePortMovingAllowed = False
        uplink_pg_policy_spec.uplinkTeamingOverrideAllowed = False
        uplink_pg_policy_spec.securityPolicyOverrideAllowed = False
        uplink_pg_policy_spec.networkResourcePoolOverrideAllowed = False
        # Check policies
        if uplink_pg_config.policy.portConfigResetAtDisconnect != self.uplink_pg_reset:
            changed_uplink_pg_policy = True
            results['adv_reset_at_disconnect_previous'] = uplink_pg_config.policy.portConfigResetAtDisconnect
        if uplink_pg_config.policy.blockOverrideAllowed != self.uplink_pg_block_ports:
            changed_uplink_pg_policy = True
            results['adv_block_ports_previous'] = uplink_pg_config.policy.blockOverrideAllowed
        if uplink_pg_config.policy.vendorConfigOverrideAllowed != self.uplink_pg_vendor_conf:
            changed_uplink_pg_policy = True
            results['adv_vendor_conf_previous'] = uplink_pg_config.policy.vendorConfigOverrideAllowed
        if uplink_pg_config.policy.vlanOverrideAllowed != self.uplink_pg_vlan:
            changed_uplink_pg_policy = True
            results['adv_vlan_previous'] = uplink_pg_config.policy.vlanOverrideAllowed
        if uplink_pg_config.policy.ipfixOverrideAllowed != self.uplink_pg_netflow:
            changed_uplink_pg_policy = True
            results['adv_netflow_previous'] = uplink_pg_config.policy.ipfixOverrideAllowed
        if uplink_pg_config.policy.trafficFilterOverrideAllowed != self.uplink_pg_tf:
            changed_uplink_pg_policy = True
            results['adv_traffic_filtering_previous'] = uplink_pg_config.policy.trafficFilterOverrideAllowed
        if changed_uplink_pg_policy:
            changed = True
            changed_list.append("advanced")
            uplink_pg_spec.policy = uplink_pg_policy_spec

        uplink_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()

        # Check VLAN trunk
        results['vlan_trunk_range'] = self.uplink_pg_vlan_trunk_range
        vlan_id_ranges = self.uplink_pg_vlan_trunk_range
        trunk_vlan_spec = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec()
        vlan_id_list = []
        for vlan_id_range in vlan_id_ranges:
            vlan_id_range_found = False
            vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range(vlan_id_range)
            # Check if range is already configured
            for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
                if current_vlan_id_range.start == int(vlan_id_start) and current_vlan_id_range.end == int(vlan_id_end):
                    vlan_id_range_found = True
                    break
            if vlan_id_range_found is False:
                changed_vlan_trunk_range = True
            vlan_id_list.append(
                vim.NumericRange(start=int(vlan_id_start), end=int(vlan_id_end))
            )
        # Check if range needs to be removed
        for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
            vlan_id_range_found = False
            for vlan_id_range in vlan_id_ranges:
                vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range(vlan_id_range)
                if (current_vlan_id_range.start == int(vlan_id_start)
                        and current_vlan_id_range.end == int(vlan_id_end)):
                    vlan_id_range_found = True
                    break
            if vlan_id_range_found is False:
                changed_vlan_trunk_range = True
        trunk_vlan_spec.vlanId = vlan_id_list
        if changed_vlan_trunk_range:
            changed = True
            changed_list.append("vlan trunk range")
            current_vlan_id_list = []
            for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
                if current_vlan_id_range.start == current_vlan_id_range.end:
                    current_vlan_id_range_string = current_vlan_id_range.start
                else:
                    current_vlan_id_range_string = '-'.join(
                        [str(current_vlan_id_range.start), str(current_vlan_id_range.end)]
                    )
                current_vlan_id_list.append(current_vlan_id_range_string)
            results['vlan_trunk_range_previous'] = current_vlan_id_list
            uplink_pg_spec.defaultPortConfig.vlan = trunk_vlan_spec

        # Check LACP
        lacp_support_mode = self.get_lacp_support_mode(self.support_mode)
        if lacp_support_mode == 'basic':
            results['lacp_status'] = self.lacp_status
            lacp_spec = vim.dvs.VmwareDistributedVirtualSwitch.UplinkLacpPolicy()
            lacp_enabled = False
            if self.lacp_status == 'enabled':
                lacp_enabled = True
            if uplink_pg_config.defaultPortConfig.lacpPolicy.enable.value != lacp_enabled:
                changed_lacp = True
                changed_list.append("lacp status")
                if uplink_pg_config.defaultPortConfig.lacpPolicy.enable.value:
                    results['lacp_status_previous'] = 'enabled'
                else:
                    results['lacp_status_previous'] = 'disabled'
                lacp_spec.enable = vim.BoolPolicy()
                lacp_spec.enable.inherited = False
                lacp_spec.enable.value = lacp_enabled
            if lacp_enabled and uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value != self.lacp_mode:
                results['lacp_mode'] = self.lacp_mode
                changed_lacp = True
                changed_list.append("lacp mode")
                results['lacp_mode_previous'] = uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value
                lacp_spec.mode = vim.StringPolicy()
                lacp_spec.mode.inherited = False
                lacp_spec.mode.value = self.lacp_mode
            if changed_lacp:
                changed = True
                uplink_pg_spec.defaultPortConfig.lacpPolicy = lacp_spec

        # Check NetFlow
        results['netflow_enabled'] = self.uplink_pg_netflow_enabled
        netflow_enabled_spec = vim.BoolPolicy()
        netflow_enabled_spec.inherited = False
        netflow_enabled_spec.value = self.uplink_pg_netflow_enabled
        if uplink_pg_config.defaultPortConfig.ipfixEnabled.value != self.uplink_pg_netflow_enabled:
            changed = True
            results['netflow_enabled_previous'] = uplink_pg_config.defaultPortConfig.ipfixEnabled.value
            changed_list.append("netflow")
            uplink_pg_spec.defaultPortConfig.ipfixEnabled = netflow_enabled_spec

        # TODO: Check Traffic filtering and marking

        # Check Block all ports
        results['block_all_ports'] = self.uplink_pg_block_all_ports
        block_all_ports_spec = vim.BoolPolicy()
        block_all_ports_spec.inherited = False
        block_all_ports_spec.value = self.uplink_pg_block_all_ports
        if uplink_pg_config.defaultPortConfig.blocked.value != self.uplink_pg_block_all_ports:
            changed = True
            changed_list.append("block all ports")
            results['block_all_ports_previous'] = uplink_pg_config.defaultPortConfig.blocked.value
            uplink_pg_spec.defaultPortConfig.blocked = block_all_ports_spec

        if changed:
            if self.module.check_mode:
                changed_suffix = ' would be changed'
            else:
                changed_suffix = ' changed'
            if len(changed_list) > 2:
                message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
            elif len(changed_list) == 2:
                message = ' and '.join(changed_list)
            elif len(changed_list) == 1:
                message = changed_list[0]
            message += changed_suffix
            if not self.module.check_mode:
                try:
                    task = self.dvs.config.uplinkPortgroup[0].ReconfigureDVPortgroup_Task(uplink_pg_spec)
                    wait_for_task(task)
                except TaskError as invalid_argument:
                    self.module.fail_json(msg="Failed to update uplink portgroup : %s" % to_native(invalid_argument))
        else:
            message = "Uplink portgroup already configured properly"
        results['changed'] = changed
        results['result'] = message

        self.module.exit_json(**results)
Esempio n. 22
0
    def update_dvswitch(self):
        """Check and update DVS settings"""
        changed = changed_settings = changed_ldp = changed_version = changed_health_check = False
        results = dict(changed=changed)
        results['dvswitch'] = self.switch_name
        changed_list = []

        config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
        # Use the same version in the new spec; The version will be increased by one by the API automatically
        config_spec.configVersion = self.dvs.config.configVersion

        # Check MTU
        results['mtu'] = self.mtu
        if self.dvs.config.maxMtu != self.mtu:
            changed = changed_settings = True
            changed_list.append("mtu")
            results['mtu_previous'] = config_spec.maxMtu
            config_spec.maxMtu = self.mtu

        # Check Discovery Protocol type and operation
        ldp_protocol = self.dvs.config.linkDiscoveryProtocolConfig.protocol
        ldp_operation = self.dvs.config.linkDiscoveryProtocolConfig.operation
        if self.discovery_protocol == 'disabled':
            results['discovery_protocol'] = self.discovery_protocol
            results['discovery_operation'] = 'n/a'
            if ldp_protocol != 'cdp' or ldp_operation != 'none':
                changed_ldp = True
                results['discovery_protocol_previous'] = ldp_protocol
                results['discovery_operation_previous'] = ldp_operation
        else:
            results['discovery_protocol'] = self.discovery_protocol
            results['discovery_operation'] = self.discovery_operation
            if ldp_protocol != self.discovery_protocol or ldp_operation != self.discovery_operation:
                changed_ldp = True
                if ldp_protocol != self.discovery_protocol:
                    results['discovery_protocol_previous'] = ldp_protocol
                if ldp_operation != self.discovery_operation:
                    results['discovery_operation_previous'] = ldp_operation
        if changed_ldp:
            changed = changed_settings = True
            changed_list.append("discovery protocol")
            config_spec.linkDiscoveryProtocolConfig = self.create_ldp_spec()

        # Check Multicast filtering mode
        results['multicast_filtering_mode'] = self.multicast_filtering_mode
        multicast_filtering_mode = self.get_api_mc_filtering_mode(
            self.multicast_filtering_mode)
        if self.dvs.config.multicastFilteringMode != multicast_filtering_mode:
            changed = changed_settings = True
            changed_list.append("multicast filtering")
            results[
                'multicast_filtering_mode_previous'] = self.dvs.config.multicastFilteringMode
            config_spec.multicastFilteringMode = multicast_filtering_mode

        # Check administrator contact
        results['contact'] = self.contact_name
        results['contact_details'] = self.contact_details
        if self.dvs.config.contact.name != self.contact_name or self.dvs.config.contact.contact != self.contact_details:
            changed = changed_settings = True
            changed_list.append("contact")
            results['contact_previous'] = self.dvs.config.contact.name
            results[
                'contact_details_previous'] = self.dvs.config.contact.contact
            config_spec.contact = self.create_contact_spec()

        # Check description
        results['description'] = self.description
        if self.dvs.config.description != self.description:
            changed = changed_settings = True
            changed_list.append("description")
            results['description_previous'] = self.dvs.config.description
            if self.description is None:
                # need to use empty string; will be set to None by API
                config_spec.description = ''
            else:
                config_spec.description = self.description

        # Check uplinks
        results['uplink_quantity'] = self.uplink_quantity
        if len(self.dvs.config.uplinkPortPolicy.uplinkPortName
               ) != self.uplink_quantity:
            changed = changed_settings = True
            changed_list.append("uplink quantity")
            results['uplink_quantity_previous'] = len(
                self.dvs.config.uplinkPortPolicy.uplinkPortName)
            config_spec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy(
            )
            # just replace the uplink array if uplinks need to be added
            if len(self.dvs.config.uplinkPortPolicy.uplinkPortName
                   ) < self.uplink_quantity:
                for count in range(1, self.uplink_quantity + 1):
                    config_spec.uplinkPortPolicy.uplinkPortName.append(
                        "%s%d" % (self.uplink_prefix, count))
            # just replace the uplink array if uplinks need to be removed
            if len(self.dvs.config.uplinkPortPolicy.uplinkPortName
                   ) > self.uplink_quantity:
                for count in range(1, self.uplink_quantity + 1):
                    config_spec.uplinkPortPolicy.uplinkPortName.append(
                        "%s%d" % (self.uplink_prefix, count))
            results['uplinks'] = config_spec.uplinkPortPolicy.uplinkPortName
            results[
                'uplinks_previous'] = self.dvs.config.uplinkPortPolicy.uplinkPortName
        else:
            # No uplink name check; uplink names can't be changed easily if they are used by a portgroup
            results[
                'uplinks'] = self.dvs.config.uplinkPortPolicy.uplinkPortName

        # Check Health Check
        results['health_check_vlan'] = self.health_check_vlan
        results['health_check_teaming'] = self.health_check_teaming
        results['health_check_vlan_interval'] = self.health_check_vlan_interval
        results[
            'health_check_teaming_interval'] = self.health_check_teaming_interval
        (health_check_config, changed_health_check, changed_vlan, vlan_previous,
         changed_vlan_interval, vlan_interval_previous, changed_teaming, teaming_previous,
         changed_teaming_interval, teaming_interval_previous) = \
            self.check_health_check_config(self.dvs.config.healthCheckConfig)
        if changed_health_check:
            changed = True
            changed_list.append("health check")
            if changed_vlan:
                results['health_check_vlan_previous'] = vlan_previous
            if changed_vlan_interval:
                results[
                    'health_check_vlan_interval_previous'] = vlan_interval_previous
            if changed_teaming:
                results['health_check_teaming_previous'] = teaming_previous
            if changed_teaming_interval:
                results[
                    'health_check_teaming_interval_previous'] = teaming_interval_previous

        # Check switch version
        if self.switch_version:
            results['version'] = self.switch_version
            if self.dvs.config.productInfo.version != self.switch_version:
                changed_version = True
                spec_product = self.create_product_spec(self.switch_version)
        else:
            results['version'] = self.vcenter_switch_version
            if self.dvs.config.productInfo.version != self.vcenter_switch_version:
                changed_version = True
                spec_product = self.create_product_spec(
                    self.vcenter_switch_version)
        if changed_version:
            changed = True
            changed_list.append("switch version")
            results['version_previous'] = self.dvs.config.productInfo.version

        if changed:
            if self.module.check_mode:
                changed_suffix = ' would be changed'
            else:
                changed_suffix = ' changed'
            if len(changed_list) > 2:
                message = ', '.join(changed_list[:-1]) + ', and ' + str(
                    changed_list[-1])
            elif len(changed_list) == 2:
                message = ' and '.join(changed_list)
            elif len(changed_list) == 1:
                message = changed_list[0]
            message += changed_suffix
            if not self.module.check_mode:
                if changed_settings:
                    self.update_dvs_config(self.dvs, config_spec)
                if changed_health_check:
                    self.update_health_check_config(self.dvs,
                                                    health_check_config)
                if changed_version:
                    task = self.dvs.PerformDvsProductSpecOperation_Task(
                        "upgrade", spec_product)
                    try:
                        wait_for_task(task)
                    except TaskError as invalid_argument:
                        self.module.fail_json(
                            msg="Failed to update DVS version : %s" %
                            to_native(invalid_argument))
        else:
            message = "DVS already configured properly"
        results['changed'] = changed
        results['result'] = message

        self.module.exit_json(**results)
Esempio n. 23
0
    def create_dvswitch(self):
        """Create a DVS"""
        changed = True
        results = dict(changed=changed)

        spec = vim.DistributedVirtualSwitch.CreateSpec()
        spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
        # Name
        results['dvswitch'] = self.switch_name
        spec.configSpec.name = self.switch_name
        # MTU
        results['mtu'] = self.mtu
        spec.configSpec.maxMtu = self.mtu
        # Discovery Protocol type and operation
        results['discovery_protocol'] = self.discovery_protocol
        results['discovery_operation'] = self.discovery_operation
        spec.configSpec.linkDiscoveryProtocolConfig = self.create_ldp_spec()
        # Administrator contact
        results['contact'] = self.contact_name
        results['contact_details'] = self.contact_details
        if self.contact_name or self.contact_details:
            spec.contact = self.create_contact_spec()
        # Description
        results['description'] = self.description
        if self.description:
            spec.description = self.description
        # Uplinks
        results['uplink_quantity'] = self.uplink_quantity
        spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy(
        )
        for count in range(1, self.uplink_quantity + 1):
            spec.configSpec.uplinkPortPolicy.uplinkPortName.append(
                "%s%d" % (self.uplink_prefix, count))
        results['uplinks'] = spec.configSpec.uplinkPortPolicy.uplinkPortName
        # Version
        results['version'] = self.switch_version
        if self.switch_version:
            spec.productInfo = self.create_product_spec(self.switch_version)

        if self.module.check_mode:
            result = "DVS would be created"
        else:
            # Create DVS
            network_folder = self.folder_obj
            task = network_folder.CreateDVS_Task(spec)
            try:
                wait_for_task(task)
            except TaskError as invalid_argument:
                self.module.fail_json(msg="Failed to create DVS : %s" %
                                      to_native(invalid_argument))
            # Find new DVS
            self.dvs = find_dvs_by_name(self.content, self.switch_name)
            changed_multicast = False
            spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
            # Use the same version in the new spec; The version will be increased by one by the API automatically
            spec.configVersion = self.dvs.config.configVersion
            # Set multicast filtering mode
            results['multicast_filtering_mode'] = self.multicast_filtering_mode
            multicast_filtering_mode = self.get_api_mc_filtering_mode(
                self.multicast_filtering_mode)
            if self.dvs.config.multicastFilteringMode != multicast_filtering_mode:
                changed_multicast = True
                spec.multicastFilteringMode = multicast_filtering_mode
            spec.multicastFilteringMode = self.get_api_mc_filtering_mode(
                self.multicast_filtering_mode)
            if changed_multicast:
                self.update_dvs_config(self.dvs, spec)
            # Set Health Check config
            results['health_check_vlan'] = self.health_check_vlan
            results['health_check_teaming'] = self.health_check_teaming
            result = self.check_health_check_config(
                self.dvs.config.healthCheckConfig)
            changed_health_check = result[1]
            if changed_health_check:
                self.update_health_check_config(self.dvs, result[0])
            result = "DVS created"
        self.module.exit_json(changed=changed, result=to_native(result))
Esempio n. 24
0
    def configure_ha(self):
        """
        Manage HA Configuration

        """
        changed, result = False, None

        if self.check_ha_config_diff():
            if not self.module.check_mode:
                cluster_config_spec = vim.cluster.ConfigSpecEx()
                cluster_config_spec.dasConfig = vim.cluster.DasConfigInfo()
                cluster_config_spec.dasConfig.enabled = self.enable_ha

                if self.enable_ha:
                    vm_tool_spec = vim.cluster.VmToolsMonitoringSettings()
                    vm_tool_spec.enabled = True
                    vm_tool_spec.vmMonitoring = self.params.get('ha_vm_monitoring')
                    vm_tool_spec.failureInterval = self.params.get('ha_vm_failure_interval')
                    vm_tool_spec.minUpTime = self.params.get('ha_vm_min_up_time')
                    vm_tool_spec.maxFailures = self.params.get('ha_vm_max_failures')
                    vm_tool_spec.maxFailureWindow = self.params.get('ha_vm_max_failure_window')

                    das_vm_config = vim.cluster.DasVmSettings()
                    das_vm_config.restartPriority = self.params.get('ha_restart_priority')
                    das_vm_config.isolationResponse = self.host_isolation_response
                    das_vm_config.vmToolsMonitoringSettings = vm_tool_spec
                    cluster_config_spec.dasConfig.defaultVmSettings = das_vm_config

                cluster_config_spec.dasConfig.admissionControlEnabled = self.ha_admission_control

                if self.ha_admission_control:
                    if self.params.get('slot_based_admission_control'):
                        cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
                        policy = self.params.get('slot_based_admission_control')
                        cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
                    elif self.params.get('reservation_based_admission_control'):
                        cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverResourcesAdmissionControlPolicy()
                        policy = self.params.get('reservation_based_admission_control')
                        auto_compute_percentages = policy.get('auto_compute_percentages')
                        cluster_config_spec.dasConfig.admissionControlPolicy.autoComputePercentages = auto_compute_percentages
                        cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
                        if not auto_compute_percentages:
                            cluster_config_spec.dasConfig.admissionControlPolicy.cpuFailoverResourcesPercent = \
                                policy.get('cpu_failover_resources_percent')
                            cluster_config_spec.dasConfig.admissionControlPolicy.memoryFailoverResourcesPercent = \
                                policy.get('memory_failover_resources_percent')
                    elif self.params.get('failover_host_admission_control'):
                        cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverHostAdmissionControlPolicy()
                        policy = self.params.get('failover_host_admission_control')
                        cluster_config_spec.dasConfig.admissionControlPolicy.failoverHosts = self.get_failover_hosts()

                cluster_config_spec.dasConfig.hostMonitoring = self.params.get('ha_host_monitoring')
                cluster_config_spec.dasConfig.vmMonitoring = self.params.get('ha_vm_monitoring')

                if self.changed_advanced_settings:
                    cluster_config_spec.dasConfig.option = self.changed_advanced_settings

                try:
                    task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
                    changed, result = wait_for_task(task)
                except vmodl.RuntimeFault as runtime_fault:
                    self.module.fail_json(msg=to_native(runtime_fault.msg))
                except vmodl.MethodFault as method_fault:
                    self.module.fail_json(msg=to_native(method_fault.msg))
                except TaskError as task_e:
                    self.module.fail_json(msg=to_native(task_e))
                except Exception as generic_exc:
                    self.module.fail_json(msg="Failed to update cluster"
                                              " due to generic exception %s" % to_native(generic_exc))
            else:
                changed = True

        self.module.exit_json(changed=changed, result=result)
    def __init__(self, module):
        super(VmotionManager, self).__init__(module)
        self.vm = None
        self.vm_uuid = self.params.get('vm_uuid', None)
        self.use_instance_uuid = self.params.get('use_instance_uuid', False)
        self.vm_name = self.params.get('vm_name', None)
        self.moid = self.params.get('moid') or None
        result = dict()

        self.get_vm()
        if self.vm is None:
            vm_id = self.vm_uuid or self.vm_name or self.moid
            self.module.fail_json(
                msg="Failed to find the virtual machine with %s" % vm_id)

        # Get Destination Host System if specified by user
        dest_host_name = self.params.get('destination_host', None)
        self.host_object = None
        if dest_host_name is not None:
            self.host_object = find_hostsystem_by_name(content=self.content,
                                                       hostname=dest_host_name)

        # Get Destination Datastore if specified by user
        dest_datastore = self.params.get('destination_datastore', None)
        self.datastore_object = None
        if dest_datastore is not None:
            self.datastore_object = find_datastore_by_name(
                content=self.content, datastore_name=dest_datastore)

        # At-least one of datastore, host system is required to migrate
        if self.datastore_object is None and self.host_object is None:
            self.module.fail_json(msg="Unable to find destination datastore"
                                  " and destination host system.")

        # Get Destination resourcepool
        dest_resourcepool = self.params.get('destination_resourcepool', None)
        self.resourcepool_object = None
        if dest_resourcepool:
            self.resourcepool_object = find_resource_pool_by_name(
                content=self.content, resource_pool_name=dest_resourcepool)
        elif not dest_resourcepool and dest_host_name:
            self.resourcepool_object = self.host_object.parent.resourcePool
        # Fail if resourcePool object is not found
        if self.resourcepool_object is None:
            self.module.fail_json(
                msg=
                "Unable to destination resource pool object which is required")

        # Check if datastore is required, this check is required if destination
        # and source host system does not share same datastore.
        host_datastore_required = []
        for vm_datastore in self.vm.datastore:
            if self.host_object and vm_datastore not in self.host_object.datastore:
                host_datastore_required.append(True)
            else:
                host_datastore_required.append(False)

        if any(host_datastore_required) and dest_datastore is None:
            msg = "Destination host system does not share" \
                  " datastore ['%s'] with source host system ['%s'] on which" \
                  " virtual machine is located.  Please specify destination_datastore" \
                  " to rectify this problem." % ("', '".join([ds.name for ds in self.host_object.datastore]),
                                                 "', '".join([ds.name for ds in self.vm.datastore]))

            self.module.fail_json(msg=msg)

        storage_vmotion_needed = True
        change_required = True

        if self.host_object and self.datastore_object:
            # We have both host system and datastore object
            if not self.datastore_object.summary.accessible:
                # Datastore is not accessible
                self.module.fail_json(msg='Destination datastore %s is'
                                      ' not accessible.' % dest_datastore)

            if self.datastore_object not in self.host_object.datastore:
                # Datastore is not associated with host system
                self.module.fail_json(
                    msg="Destination datastore %s provided"
                    " is not associated with destination"
                    " host system %s. Please specify"
                    " datastore value ['%s'] associated with"
                    " the given host system." %
                    (dest_datastore, dest_host_name, "', '".join(
                        [ds.name for ds in self.host_object.datastore])))

            if self.vm.runtime.host.name == dest_host_name and dest_datastore in [
                    ds.name for ds in self.vm.datastore
            ]:
                change_required = False

        if self.host_object and self.datastore_object is None:
            if self.vm.runtime.host.name == dest_host_name:
                # VM is already located on same host
                change_required = False

            storage_vmotion_needed = False

        elif self.datastore_object and self.host_object is None:
            if self.datastore_object in self.vm.datastore:
                # VM is already located on same datastore
                change_required = False

            if not self.datastore_object.summary.accessible:
                # Datastore is not accessible
                self.module.fail_json(msg='Destination datastore %s is'
                                      ' not accessible.' % dest_datastore)

        if module.check_mode:
            result['running_host'] = module.params['destination_host']
            result['changed'] = True
            module.exit_json(**result)

        if change_required:
            # Migrate VM and get Task object back
            task_object = self.migrate_vm()
            # Wait for task to complete
            try:
                wait_for_task(task_object)
            except TaskError as task_error:
                self.module.fail_json(msg=to_native(task_error))
            # If task was a success the VM has moved, update running_host and complete module
            if task_object.info.state == vim.TaskInfo.State.success:
                # The storage layout is not automatically refreshed, so we trigger it to get coherent module return values
                if storage_vmotion_needed:
                    self.vm.RefreshStorageInfo()
                result['running_host'] = module.params['destination_host']
                result['changed'] = True
                module.exit_json(**result)
            else:
                msg = 'Unable to migrate virtual machine due to an error, please check vCenter'
                if task_object.info.error is not None:
                    msg += " : %s" % task_object.info.error
                module.fail_json(msg=msg)
        else:
            try:
                host = self.vm.summary.runtime.host
                result['running_host'] = host.summary.config.name
            except vim.fault.NoPermission:
                result['running_host'] = 'NA'
            result['changed'] = False
            module.exit_json(**result)
Esempio n. 26
0
    def ensure(self):
        """Manage Active Directory Authentication for an ESXi host system"""
        results = dict(changed=False, result=dict())
        desired_state = self.params.get('ad_state')
        domain = self.params.get('ad_domain')
        ad_user = self.params.get('ad_user')
        ad_password = self.params.get('ad_password')
        host_change_list = []
        for host in self.hosts:
            changed = False
            results['result'][host.name] = dict(msg='')

            active_directory_info = self.get_ad_info(host)

            results['result'][host.name]['ad_state'] = desired_state
            results['result'][host.name]['ad_domain'] = domain
            if desired_state == 'present':
                # Don't do anything if already enabled and joined
                if active_directory_info.enabled:
                    # Joined and no problems with the domain membership
                    if active_directory_info.domainMembershipStatus == 'ok':
                        results['result'][host.name]['changed'] = False
                        results['result'][host.name][
                            'membership_state'] = active_directory_info.domainMembershipStatus
                        results['result'][host.name][
                            'joined_domain'] = active_directory_info.joinedDomain
                        results['result'][host.name][
                            'trusted_domains'] = active_directory_info.trustedDomain
                        results['result'][host.name]['msg'] = (
                            "Host is joined to AD domain and there are no problems with the domain membership"
                        )
                    # Joined, but problems with the domain membership
                    else:
                        changed = results['result'][
                            host.name]['changed'] = True
                        results['result'][host.name][
                            'membership_state'] = active_directory_info.domainMembershipStatus
                        results['result'][host.name][
                            'joined_domain'] = active_directory_info.joinedDomain
                        results['result'][host.name][
                            'trusted_domains'] = active_directory_info.trustedDomain
                        msg = host.name + " is joined to AD domain, but "
                        if active_directory_info.domainMembershipStatus == 'clientTrustBroken':
                            msg += "the client side of the trust relationship is broken"
                        elif active_directory_info.domainMembershipStatus == 'inconsistentTrust':
                            msg += "unexpected domain controller responded"
                        elif active_directory_info.domainMembershipStatus == 'noServers':
                            msg += "no domain controllers could be reached to confirm"
                        elif active_directory_info.domainMembershipStatus == 'serverTrustBroken':
                            msg += "the server side of the trust relationship is broken (or bad machine password)"
                        elif active_directory_info.domainMembershipStatus == 'otherProblem':
                            msg += "there are some problems with the domain membership"
                        elif active_directory_info.domainMembershipStatus == 'unknown':
                            msg += "the Active Directory integration provider does not support domain trust checks"
                        results['result'][host.name]['msg'] = msg
                        self.module.fail_json(msg=msg)
                # Enable and join AD domain
                else:
                    if self.module.check_mode:
                        changed = results['result'][
                            host.name]['changed'] = True
                        results['result'][
                            host.name]['ad_state_previous'] = "absent"
                        results['result'][
                            host.name]['ad_state_current'] = "present"
                        results['result'][host.name][
                            'msg'] = "Host would be joined to AD domain"
                    else:
                        ad_authentication = self.get_ad_auth_object(host)
                        try:
                            try:
                                task = ad_authentication.JoinDomain(
                                    domainName=domain,
                                    userName=ad_user,
                                    password=ad_password)
                                wait_for_task(task)
                            except TaskError as task_err:
                                self.module.fail_json(
                                    msg="Failed to join domain : %s" %
                                    to_native(task_err))
                            changed = results['result'][
                                host.name]['changed'] = True
                            results['result'][
                                host.name]['ad_state_previous'] = "absent"
                            results['result'][
                                host.name]['ad_state_current'] = "present"
                            results['result'][
                                host.name]['msg'] = "Host joined to AD domain"
                            active_directory_info = self.get_ad_info(host)
                            results['result'][host.name][
                                'membership_state'] = active_directory_info.domainMembershipStatus
                        except vim.fault.InvalidState as invalid_state:
                            self.module.fail_json(
                                msg="The host has already joined a domain : %s"
                                % to_native(invalid_state.msg))
                        except vim.fault.HostConfigFault as host_fault:
                            self.module.fail_json(
                                msg=
                                "The host configuration prevents the join operation from succeeding : %s"
                                % to_native(host_fault.msg))
                        except vim.fault.InvalidLogin as invalid_login:
                            self.module.fail_json(
                                msg="Credentials aren't valid : %s" %
                                to_native(invalid_login.msg))
                        except vim.fault.TaskInProgress as task_in_progress:
                            self.module.fail_json(
                                msg=
                                "The ActiveDirectoryAuthentication object is busy : %s"
                                % to_native(task_in_progress.msg))
                        except vim.fault.BlockedByFirewall as blocked_by_firewall:
                            self.module.fail_json(
                                msg=
                                "Ports needed by the join operation are blocked by the firewall : %s"
                                % to_native(blocked_by_firewall.msg))
                        except vim.fault.DomainNotFound as not_found:
                            self.module.fail_json(
                                msg=
                                "The domain controller can't be reached : %s" %
                                to_native(not_found.msg))
                        except vim.fault.NoPermissionOnAD as no_permission:
                            self.module.fail_json(
                                msg=
                                "The specified user has no right to add hosts to the domain : %s"
                                % to_native(no_permission.msg))
                        except vim.fault.InvalidHostName as invalid_host:
                            self.module.fail_json(
                                msg=
                                "The domain part of the host's FQDN doesn't match the domain being joined : %s"
                                % to_native(invalid_host.msg))
                        except vim.fault.ClockSkew as clock_skew:
                            self.module.fail_json(
                                msg=
                                "The clocks of the host and the domain controller differ by more "
                                "than the allowed amount of time : %s" %
                                to_native(clock_skew.msg))
                        except vim.fault.ActiveDirectoryFault as ad_fault:
                            self.module.fail_json(
                                msg="An error occurred during AD join : %s" %
                                to_native(ad_fault.msg))
            elif desired_state == 'absent':
                # Don't do anything not joined to any AD domain
                if not active_directory_info.enabled:
                    results['result'][host.name]['changed'] = False
                    results['result'][host.name]['ad_state_current'] = "absent"
                    results['result'][
                        host.name]['msg'] = "Host isn't joined to an AD domain"
                # Disable and leave AD domain
                else:
                    if self.module.check_mode:
                        changed = results['result'][
                            host.name]['changed'] = True
                        results['result'][
                            host.name]['ad_state_previous'] = "present"
                        results['result'][
                            host.name]['ad_state_current'] = "absent"
                        results['result'][host.name]['msg'] = "Host would leave the AD domain '%s'" % \
                            active_directory_info.joinedDomain
                    else:
                        ad_authentication = self.get_ad_auth_object(host)
                        try:
                            try:
                                task = ad_authentication.LeaveCurrentDomain(
                                    force=True)
                                wait_for_task(task)
                            except TaskError as task_err:
                                self.module.fail_json(
                                    msg="Failed to join domain : %s" %
                                    to_native(task_err))
                            changed = results['result'][
                                host.name]['changed'] = True
                            results['result'][
                                host.name]['ad_state_previous'] = "present"
                            results['result'][
                                host.name]['ad_state_current'] = "absent"
                            results['result'][host.name]['msg'] = "Host left the AD domain '%s'" % \
                                active_directory_info.joinedDomain
                        except vim.fault.InvalidState as invalid_state:
                            self.module.fail_json(
                                msg=
                                "The host is not in a domain or there are active permissions for "
                                "Active Directory users : %s" %
                                to_native(invalid_state.msg))
                        except vim.fault.AuthMinimumAdminPermission as admin_permission:
                            self.module.fail_json(
                                msg=
                                "This change would leave the system with no Administrator permission "
                                "on the root node : %s" %
                                to_native(admin_permission.msg))
                        except vim.fault.TaskInProgress as task_in_progress:
                            self.module.fail_json(
                                msg=
                                "The ActiveDirectoryAuthentication object is busy : %s"
                                % to_native(task_in_progress.msg))
                        except vim.fault.NonADUserRequired as non_ad_user:
                            self.module.fail_json(
                                msg=
                                "Only non Active Directory users can initiate the leave domain operation : %s"
                                % to_native(non_ad_user.msg))
                        except vim.fault.ActiveDirectoryFault as ad_fault:
                            self.module.fail_json(
                                msg="An error occurred during AD leave : %s" %
                                to_native(ad_fault.msg))

            host_change_list.append(changed)

        if any(host_change_list):
            results['changed'] = True
        self.module.exit_json(**results)
Esempio n. 27
0
    def _nic_present(self, network_params=None):
        changed = False
        diff = {'before': {}, 'after': {}}
        # backwards compatibility, clean up when params['networks']
        # has been removed
        if network_params:
            force = network_params['force']
            label = network_params['label']
            mac_address = network_params['mac_address']
            network_name = network_params['network_name']
            switch = network_params['switch']
            vlan_id = network_params['vlan_id']
        else:
            force = self.params['force']
            label = self.params['label']
            mac_address = self.params['mac_address']
            network_name = self.params['network_name']
            switch = self.params['switch']
            vlan_id = self.params['vlan_id']

        vm_obj = self.get_vm()
        if not vm_obj:
            self.module.fail_json(
                msg='could not find vm: {0}'.format(self.params['name']))

        network_obj = self._get_network_object(vm_obj, network_params)
        nic_info, nic_obj_lst = self._get_nics_from_vm(vm_obj)
        label_lst = [d.get('label') for d in nic_info]
        mac_addr_lst = [d.get('mac_address') for d in nic_info]
        vlan_id_lst = [d.get('vlan_id') for d in nic_info]
        network_name_lst = [d.get('network_name') for d in nic_info]

        # TODO: make checks below less inelegant
        if ((vlan_id in vlan_id_lst or network_name in network_name_lst)
                and not mac_address and not label and not force):
            for nic in nic_info:
                diff['before'].update({nic.get('mac_address'): copy.copy(nic)})
                diff['after'].update({nic.get('mac_address'): copy.copy(nic)})
            return diff, changed, nic_info

        if not network_obj and (network_name or vlan_id):
            self.module.fail_json(
                msg=
                'unable to find specified network_name/vlan_id ({0}), check parameters'
                .format(network_name or vlan_id))

        for nic in nic_info:
            diff['before'].update({nic.get('mac_address'): copy.copy(nic)})

        if (mac_address
                and mac_address in mac_addr_lst) or (label
                                                     and label in label_lst):
            for nic_obj in nic_obj_lst:
                if (mac_address and nic_obj.macAddress == mac_address) or (
                        label and label == nic_obj.deviceInfo.label):
                    device_spec = self._new_nic_spec(vm_obj, nic_obj,
                                                     network_params)

            # fabricate diff for check_mode
            if self.module.check_mode:
                for nic in nic_info:
                    nic_mac = nic.get('mac_address')
                    nic_label = nic.get('label')
                    if nic_mac == mac_address or nic_label == label:
                        diff['after'][nic_mac] = copy.deepcopy(nic)
                        diff['after'][nic_mac].update(
                            {'switch': switch or nic['switch']})
                        if network_obj:
                            diff['after'][nic_mac].update({
                                'vlan_id':
                                self._get_vlanid_from_network(network_obj),
                                'network_name':
                                network_obj.name
                            })
                    else:
                        diff['after'].update({nic_mac: copy.deepcopy(nic)})

        if (not mac_address or mac_address
                not in mac_addr_lst) and (not label or label not in label_lst):
            device_spec = self._new_nic_spec(vm_obj, None, network_params)
            device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
            if self.module.check_mode:
                # fabricate diff/returns for checkmode
                diff['after'] = copy.deepcopy(diff['before'])
                nic_mac = mac_address
                if not nic_mac:
                    nic_mac = 'AA:BB:CC:DD:EE:FF'
                if not label:
                    label = 'check_mode_adapter'
                diff['after'].update({
                    nic_mac: {
                        'vlan_id': self._get_vlanid_from_network(network_obj),
                        'network_name': network_obj.name,
                        'label': label,
                        'mac_address': nic_mac,
                        'unit_number': 40000
                    }
                })

        if self.module.check_mode:
            network_info = [diff['after'][i] for i in diff['after']]
            if diff['after'] != diff['before']:
                changed = True
            return diff, changed, network_info

        if not self.module.check_mode:
            try:
                task = vm_obj.ReconfigVM_Task(
                    vim.vm.ConfigSpec(deviceChange=[device_spec]))
                wait_for_task(task)
            except (vim.fault.InvalidDeviceSpec,
                    vim.fault.RestrictedVersion) as e:
                self.module.fail_json(msg='failed to reconfigure guest',
                                      detail=e.msg)

            if task.info.state == 'error':
                self.module.fail_json(msg='failed to reconfigure guest',
                                      detail=task.info.error.msg)

            vm_obj = self.get_vm()
            network_info, nic_obj_lst = self._get_nics_from_vm(vm_obj)
            for nic in network_info:
                diff['after'].update({nic.get('mac_address'): copy.copy(nic)})

            if diff['after'] != diff['before']:
                changed = True
            return diff, changed, network_info
Esempio n. 28
0
def main():
    argument_spec = vmware_argument_spec()
    argument_spec.update(
        name=dict(type='str'),
        name_match=dict(type='str', choices=['first', 'last'],
                        default='first'),
        uuid=dict(type='str'),
        moid=dict(type='str'),
        use_instance_uuid=dict(type='bool', default=False),
        dest_folder=dict(type='str', required=True),
        datacenter=dict(type='str', required=True),
    )
    module = AnsibleModule(argument_spec=argument_spec,
                           required_one_of=[['name', 'uuid', 'moid']],
                           mutually_exclusive=[['name', 'uuid', 'moid']],
                           supports_check_mode=True)

    # FindByInventoryPath() does not require an absolute path
    # so we should leave the input folder path unmodified
    module.params['dest_folder'] = module.params['dest_folder'].rstrip('/')
    pyv = PyVmomiHelper(module)
    search_index = pyv.content.searchIndex

    # Check if the VM exists before continuing
    vm = pyv.get_vm()

    # VM exists
    if vm:
        try:
            vm_path = pyv.get_vm_path(pyv.content, vm).lstrip('/')
            if module.params['name']:
                vm_name = module.params['name']
            else:
                vm_name = vm.name

            vm_full = vm_path + '/' + vm_name
            folder = search_index.FindByInventoryPath(
                module.params['dest_folder'])
            if folder is None:
                module.fail_json(msg="Folder name and/or path does not exist")
            vm_to_move = search_index.FindByInventoryPath(vm_full)
            if module.check_mode:
                module.exit_json(changed=True, instance=pyv.gather_facts(vm))
            if vm_path != module.params['dest_folder'].lstrip('/'):
                move_task = folder.MoveInto([vm_to_move])
                changed, err = wait_for_task(move_task)
                if changed:
                    module.exit_json(changed=True,
                                     instance=pyv.gather_facts(vm))
            else:
                module.exit_json(instance=pyv.gather_facts(vm))
        except Exception as exc:
            module.fail_json(msg="Failed to move VM with exception %s" %
                             to_native(exc))
    else:
        if module.check_mode:
            module.exit_json(changed=False)
        vm_id = (module.params.get('uuid') or module.params.get('name')
                 or module.params.get('moid'))
        module.fail_json(msg="Unable to find VM %s to move to %s" %
                         (vm_id, module.params.get('dest_folder')))
Esempio n. 29
0
    def ensure(self):
        """
        Manage internal state management
        """
        state = self.module.params.get('state')
        datacenter_name = self.module.params.get('datacenter')
        folder_type = self.module.params.get('folder_type')
        folder_name = self.module.params.get('folder_name')
        parent_folder = self.module.params.get('parent_folder', None)
        results = {'changed': False, 'result': {}}
        if state == 'present':
            # Check if the folder already exists
            p_folder_obj = None
            if parent_folder:
                if "/" in parent_folder:
                    parent_folder_parts = parent_folder.strip('/').split('/')
                    p_folder_obj = None
                    for part in parent_folder_parts:
                        part_folder_obj = self.get_folder(
                            datacenter_name=datacenter_name,
                            folder_name=part,
                            folder_type=folder_type,
                            parent_folder=p_folder_obj)
                        if not part_folder_obj:
                            self.module.fail_json(
                                msg="Could not find folder %s" % part)
                        p_folder_obj = part_folder_obj
                    child_folder_obj = self.get_folder(
                        datacenter_name=datacenter_name,
                        folder_name=folder_name,
                        folder_type=folder_type,
                        parent_folder=p_folder_obj)
                    if child_folder_obj:
                        results['result'] = "Folder %s already exists under" \
                                            " parent folder %s" % (folder_name, parent_folder)
                        self.module.exit_json(**results)
                else:
                    p_folder_obj = self.get_folder(
                        datacenter_name=datacenter_name,
                        folder_name=parent_folder,
                        folder_type=folder_type)

                    if not p_folder_obj:
                        self.module.fail_json(
                            msg="Parent folder %s does not exist" %
                            parent_folder)

                    # Check if folder exists under parent folder
                    child_folder_obj = self.get_folder(
                        datacenter_name=datacenter_name,
                        folder_name=folder_name,
                        folder_type=folder_type,
                        parent_folder=p_folder_obj)
                    if child_folder_obj:
                        results['result']['path'] = self.get_folder_path(
                            child_folder_obj)
                        results['result'] = "Folder %s already exists under" \
                                            " parent folder %s" % (folder_name, parent_folder)
                        self.module.exit_json(**results)
            else:
                folder_obj = self.get_folder(datacenter_name=datacenter_name,
                                             folder_name=folder_name,
                                             folder_type=folder_type)

                if folder_obj:
                    results['result']['path'] = self.get_folder_path(
                        folder_obj)
                    results['result'][
                        'msg'] = "Folder %s already exists" % folder_name
                    self.module.exit_json(**results)

            # Create a new folder
            try:
                if parent_folder and p_folder_obj:
                    if self.module.check_mode:
                        results['msg'] = "Folder '%s' of type '%s' under '%s' will be created." % \
                                         (folder_name, folder_type, parent_folder)
                    else:
                        new_folder = p_folder_obj.CreateFolder(folder_name)
                        results['result']['path'] = self.get_folder_path(
                            new_folder)
                        results['result']['msg'] = "Folder '%s' of type '%s' under '%s' created" \
                            " successfully." % (folder_name, folder_type, parent_folder)
                    results['changed'] = True
                elif not parent_folder and not p_folder_obj:
                    if self.module.check_mode:
                        results[
                            'msg'] = "Folder '%s' of type '%s' will be created." % (
                                folder_name, folder_type)
                    else:
                        new_folder = self.datacenter_folder_type[
                            folder_type].CreateFolder(folder_name)
                        results['result'][
                            'msg'] = "Folder '%s' of type '%s' created successfully." % (
                                folder_name, folder_type)
                        results['result']['path'] = self.get_folder_path(
                            new_folder)
                    results['changed'] = True
            except vim.fault.DuplicateName as duplicate_name:
                # To be consistent with the other vmware modules, We decided to accept this error
                # and the playbook should simply carry on with other tasks.
                # User will have to take care of this exception
                # https://github.com/ansible/ansible/issues/35388#issuecomment-362283078
                results['changed'] = False
                results['msg'] = "Failed to create folder as another object has same name" \
                                 " in the same target folder : %s" % to_native(duplicate_name.msg)
            except vim.fault.InvalidName as invalid_name:
                self.module.fail_json(
                    msg="Failed to create folder as folder name is not a valid "
                    "entity name : %s" % to_native(invalid_name.msg))
            except Exception as general_exc:
                self.module.fail_json(
                    msg="Failed to create folder due to generic"
                    " exception : %s " % to_native(general_exc))
            self.module.exit_json(**results)
        elif state == 'absent':
            # Check if the folder already exists
            p_folder_obj = None
            if parent_folder:
                if "/" in parent_folder:
                    parent_folder_parts = parent_folder.strip('/').split('/')
                    p_folder_obj = None
                    for part in parent_folder_parts:
                        part_folder_obj = self.get_folder(
                            datacenter_name=datacenter_name,
                            folder_name=part,
                            folder_type=folder_type,
                            parent_folder=p_folder_obj)
                        if not part_folder_obj:
                            self.module.fail_json(
                                msg="Could not find folder %s" % part)
                        p_folder_obj = part_folder_obj
                    folder_obj = self.get_folder(
                        datacenter_name=datacenter_name,
                        folder_name=folder_name,
                        folder_type=folder_type,
                        parent_folder=p_folder_obj)
                else:
                    p_folder_obj = self.get_folder(
                        datacenter_name=datacenter_name,
                        folder_name=parent_folder,
                        folder_type=folder_type)

                    if not p_folder_obj:
                        self.module.fail_json(
                            msg="Parent folder %s does not exist" %
                            parent_folder)

                    # Check if folder exists under parent folder
                    folder_obj = self.get_folder(
                        datacenter_name=datacenter_name,
                        folder_name=folder_name,
                        folder_type=folder_type,
                        parent_folder=p_folder_obj)
            else:
                folder_obj = self.get_folder(datacenter_name=datacenter_name,
                                             folder_name=folder_name,
                                             folder_type=folder_type)
            if folder_obj:
                try:
                    if parent_folder:
                        if self.module.check_mode:
                            results['changed'] = True
                            results['msg'] = "Folder '%s' of type '%s' under '%s' will be removed." % \
                                             (folder_name, folder_type, parent_folder)
                        else:
                            if folder_type == 'vm':
                                task = folder_obj.UnregisterAndDestroy()
                            else:
                                task = folder_obj.Destroy()
                            results['changed'], results['msg'] = wait_for_task(
                                task=task)
                    else:
                        if self.module.check_mode:
                            results['changed'] = True
                            results[
                                'msg'] = "Folder '%s' of type '%s' will be removed." % (
                                    folder_name, folder_type)
                        else:
                            if folder_type == 'vm':
                                task = folder_obj.UnregisterAndDestroy()
                            else:
                                task = folder_obj.Destroy()
                            results['changed'], results['msg'] = wait_for_task(
                                task=task)
                except vim.fault.ConcurrentAccess as concurrent_access:
                    self.module.fail_json(
                        msg="Failed to remove folder as another client"
                        " modified folder before this operation : %s" %
                        to_native(concurrent_access.msg))
                except vim.fault.InvalidState as invalid_state:
                    self.module.fail_json(
                        msg="Failed to remove folder as folder is in"
                        " invalid state : %s" % to_native(invalid_state.msg))
                except Exception as gen_exec:
                    self.module.fail_json(
                        msg="Failed to remove folder due to generic"
                        " exception %s " % to_native(gen_exec))
            self.module.exit_json(**results)
Esempio n. 30
0
    def _nic_absent(self, network_params=None):
        changed = False
        diff = {'before': {}, 'after': {}}
        if network_params:
            mac_address = network_params['mac_address']
        else:
            mac_address = self.params['mac_address']

        device_spec = None
        vm_obj = self.get_vm()
        if not vm_obj:
            self.module.fail_json(
                msg='could not find vm: {0}'.format(self.params['name']))
        nic_info, nic_obj_lst = self._get_nics_from_vm(vm_obj)

        for nic in nic_info:
            diff['before'].update({nic['mac_address']: copy.copy(nic)})

        network_info = copy.deepcopy(nic_info)

        for nic_obj in nic_obj_lst:
            if nic_obj.macAddress == mac_address:
                if self.module.check_mode:
                    changed = True
                    for nic in nic_info:
                        if nic.get('mac_address') != nic_obj.macAddress:
                            diff['after'].update(
                                {nic['mac_address']: copy.copy(nic)})
                    network_info = [
                        nic for nic in nic_info
                        if nic.get('mac_address') != nic_obj.macAddress
                    ]
                    return diff, changed, network_info
                device_spec = vim.vm.device.VirtualDeviceSpec(
                    device=nic_obj,
                    operation=vim.vm.device.VirtualDeviceSpec.Operation.remove)
                break

        if not device_spec:
            diff['after'] = diff['before']
            return diff, changed, network_info

        try:
            task = vm_obj.ReconfigVM_Task(
                vim.vm.ConfigSpec(deviceChange=[device_spec]))
            wait_for_task(task)
        except (vim.fault.InvalidDeviceSpec, vim.fault.RestrictedVersion) as e:
            self.module.fail_json(msg='failed to reconfigure guest',
                                  detail=e.msg)

        if task.info.state == 'error':
            self.module.fail_json(msg='failed to reconfigure guest',
                                  detail=task.info.error.msg)

        vm_obj = self.get_vm()
        nic_info, nic_obj_lst = self._get_nics_from_vm(vm_obj)

        for nic in nic_info:
            diff['after'].update({nic.get('mac_address'): copy.copy(nic)})

        network_info = nic_info
        if diff['after'] != diff['before']:
            changed = True

        return diff, changed, network_info