def reconfigure_vm_serial_port(self, vm_obj):
        """
        Reconfigure vm with new or modified serial port config spec
        """
        self.get_serial_port_config_spec(vm_obj)
        try:
            # configure create tasks first
            if self.serial_ports:
                for serial_port in self.serial_ports:
                    # each type of serial port is of config_spec.device = vim.vm.device.VirtualSerialPort() object type
                    # because serial ports differ in the backing types and config_spec.device has to be unique,
                    # we are creating a new spec for every create port configuration
                    spec = vim.vm.ConfigSpec()
                    spec.deviceChange.append(serial_port)
                    task = vm_obj.ReconfigVM_Task(spec=spec)
                    wait_for_task(task)
            task = vm_obj.ReconfigVM_Task(spec=self.config_spec)
            wait_for_task(task)
        except vim.fault.InvalidDatastorePath as e:
            self.module.fail_json(msg="Failed to configure serial port on given virtual machine due to invalid path: %s" % to_native(e.msg))
        except vim.fault.RestrictedVersion as e:
            self.module.fail_json(msg="Failed to reconfigure virtual machine due to product versioning restrictions: %s" % to_native(e.msg))
        if task.info.state == 'error':
            results = {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg}
        else:
            serial_port_info = get_serial_port_info(vm_obj)
            results = {'changed': self.change_applied, 'failed': False, 'serial_port_info': serial_port_info}

        return results
Example #2
0
    def inject_ovf_env(self):
        attrib = {
            'xmlns': 'http://schemas.dmtf.org/ovf/environment/1',
            'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
            'xmlns:oe': 'http://schemas.dmtf.org/ovf/environment/1',
            'xmlns:ve': 'http://www.vmware.com/schema/ovfenv',
            'oe:id': '',
            've:esxId': self.entity._moId
        }
        env = ET.Element('Environment', **attrib)

        platform = ET.SubElement(env, 'PlatformSection')
        ET.SubElement(platform, 'Kind').text = self.content.about.name
        ET.SubElement(platform, 'Version').text = self.content.about.version
        ET.SubElement(platform, 'Vendor').text = self.content.about.vendor
        ET.SubElement(platform, 'Locale').text = 'US'

        prop_section = ET.SubElement(env, 'PropertySection')
        for key, value in self.params['properties'].items():
            params = {
                'oe:key': key,
                'oe:value': str(value) if isinstance(value, bool) else value
            }
            ET.SubElement(prop_section, 'Property', **params)

        opt = vim.option.OptionValue()
        opt.key = 'guestinfo.ovfEnv'
        opt.value = '<?xml version="1.0" encoding="UTF-8"?>' + to_native(
            ET.tostring(env))

        config_spec = vim.vm.ConfigSpec()
        config_spec.extraConfig = [opt]

        task = self.entity.ReconfigVM_Task(config_spec)
        wait_for_task(task)
Example #3
0
    def reconnect_host_to_vcenter(self):
        reconnecthost_args = {}
        reconnecthost_args['reconnectSpec'] = vim.HostSystem.ReconnectSpec()
        reconnecthost_args['reconnectSpec'].syncState = True

        if self.esxi_username is not None or self.esxi_password is not None:
            reconnecthost_args['cnxSpec'] = vim.host.ConnectSpec()
            reconnecthost_args['cnxSpec'].hostName = self.esxi_hostname
            reconnecthost_args['cnxSpec'].userName = self.esxi_username
            reconnecthost_args['cnxSpec'].password = self.esxi_password
            reconnecthost_args['cnxSpec'].force = True
            reconnecthost_args['cnxSpec'].sslThumbprint = ""

            try:
                task = self.host.ReconnectHost_Task(**reconnecthost_args)
                success, result = wait_for_task(task)
                return success, result
            except TaskError as add_task_error:
                # See add_host_to_vcenter
                ssl_verify_fault = add_task_error.args[0]
                reconnecthost_args['cnxSpec'].sslThumbprint = ssl_verify_fault.thumbprint

        task = self.host.ReconnectHost_Task(**reconnecthost_args)
        success, result = wait_for_task(task)
        return success, result
Example #4
0
    def reconnect_host_to_vcenter(self):
        reconnecthost_args = {}
        reconnecthost_args['reconnectSpec'] = vim.HostSystem.ReconnectSpec()
        reconnecthost_args['reconnectSpec'].syncState = True

        if self.esxi_username is not None or self.esxi_password is not None:
            reconnecthost_args['cnxSpec'] = self.get_host_connect_spec()

            for count in range(0, 2):
                try:
                    task = self.host.ReconnectHost_Task(**reconnecthost_args)
                    success, result = wait_for_task(task)
                    return success, result
                except TaskError as task_error_exception:
                    task_error = task_error_exception.args[0]
                    if self.esxi_ssl_thumbprint == '' and isinstance(task_error, vim.fault.SSLVerifyFault):
                        # User has not specified SSL Thumbprint for ESXi host,
                        # try to grab it using SSLVerifyFault exception
                        reconnecthost_args['cnxSpec'].sslThumbprint = task_error.thumbprint
                    else:
                        self.module.fail_json(msg="Failed to reconnect host %s to vCenter: %s" % (self.esxi_hostname,
                                                                                                  to_native(task_error.msg)))
            self.module.fail_json(msg="Failed to reconnect host %s to vCenter" % self.esxi_hostname)
        else:
            try:
                task = self.host.ReconnectHost_Task(**reconnecthost_args)
                success, result = wait_for_task(task)
                return success, result
            except TaskError as task_error_exception:
                task_error = task_error_exception.args[0]
                self.module.fail_json(msg="Failed to reconnect host %s to vCenter due to %s" % (self.esxi_hostname,
                                                                                                to_native(task_error.msg)))
Example #5
0
    def __create_vm_group(self):

        # Check if anything has changed when editing
        if self.__operation == 'add' or (self.__operation == 'edit' and
                                         self.__check_if_vms_hosts_changed()):

            group = vim.cluster.VmGroup()

            group.name = self.__group_name
            group.vm = self.__vm_obj_list

            group_spec = vim.cluster.GroupSpec(info=group,
                                               operation=self.__operation)
            config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])

            # Check if dry run
            if not self.module.check_mode:
                task = self.__cluster_obj.ReconfigureEx(config_spec,
                                                        modify=True)
                wait_for_task(task)

            self.__set_result(group)
            self.__changed = True

        if self.__operation == 'edit':
            self.__msg = "Updated vm group %s successfully" % (
                self.__group_name)
        else:
            self.__msg = "Created vm group %s successfully" % (
                self.__group_name)
Example #6
0
 def clone(self):
     # clone the vm/template on destination VC
     vm_folder = find_folder_by_name(
         content=self.destination_content,
         folder_name=self.params['destination_vm_folder'])
     if not vm_folder:
         self.module.fail_json(
             msg=
             "Destination folder does not exist. Please refer to the documentation to correctly specify the folder."
         )
     vm_name = self.params['destination_vm_name']
     task = self.vm_obj.Clone(folder=vm_folder,
                              name=vm_name,
                              spec=self.clone_spec)
     wait_for_task(task)
     if task.info.state == 'error':
         result = {
             'changed': False,
             'failed': True,
             'msg': task.info.error.msg
         }
     else:
         vm_info = self.get_new_vm_info(vm_name)
         result = {'changed': True, 'failed': False, 'vm_info': vm_info}
     return result
Example #7
0
    def delete_drs_group(self):
        """
        Function to delete a DRS host/vm group
        """

        if self.__group_obj is not None:

            self.__changed = True

            # Check if dry run
            if not self.module.check_mode:

                group_spec = vim.cluster.GroupSpec(removeKey=self.__group_name,
                                                   operation=self.__operation)
                config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])

                task = self.__cluster_obj.ReconfigureEx(config_spec,
                                                        modify=True)
                wait_for_task(task)

        # Dont throw error if group does not exist. Simply set changed = False
        if self.__changed:
            self.__msg = "Deleted group `%s` successfully" % (
                self.__group_name)
        else:
            self.__msg = "DRS group `%s` does not exists or already deleted" % (
                self.__group_name)
Example #8
0
    def set_port_security_promiscuous(self, ports, state):
        """Set the given port to the given promiscuous state.
        Parameters
        ----------
        port : str[]
            PortKey
        state: bool
            State of the promiscuous mode, if true its allowed, else not.
        """
        # Creating the new port policy
        port_spec = []
        vim_bool = vim.BoolPolicy(value=state)
        port_policy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy(allowPromiscuous=vim_bool)
        port_settings = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy(securityPolicy=port_policy)
        for port in ports:
            temp_port_spec = vim.dvs.DistributedVirtualPort.ConfigSpec(
                operation="edit",
                key=port,
                setting=port_settings
            )
            port_spec.append(temp_port_spec)

        task = self.dv_switch.ReconfigureDVPort_Task(port_spec)
        try:
            wait_for_task(task)
        except Exception:
            self.restore_original_state()
            self.module.fail_json(msg=task.info.error.msg)
Example #9
0
    def delete(self, rule_name=None):
        """
        Function to delete VM-Host DRS rule using name
        """
        changed = False

        if rule_name is None:
            rule_name = self.__rule_name

        rule_obj = self.__get_rule_key_by_name(rule_name=rule_name)

        if rule_obj is not None:

            rule_key = int(rule_obj.key)
            rule_spec = vim.cluster.RuleSpec(removeKey=rule_key,
                                             operation='remove')
            config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])

            if not self.module.check_mode:

                task = self.__cluster_obj.ReconfigureEx(config_spec,
                                                        modify=True)
                wait_for_task(task)

            self.__changed = True

        if self.__changed:
            self.__msg = "Deleted DRS rule `%s` successfully" % (
                self.__rule_name)
        else:
            self.__msg = "DRS Rule `%s` does not exists or already deleted" % (
                self.__rule_name)
Example #10
0
    def add_host_to_vcenter(self):
        host_connect_spec = vim.host.ConnectSpec()
        host_connect_spec.hostName = self.esxi_hostname
        host_connect_spec.userName = self.esxi_username
        host_connect_spec.password = self.esxi_password
        host_connect_spec.force = True
        host_connect_spec.sslThumbprint = ""
        as_connected = True
        esxi_license = None
        resource_pool = None

        try:
            task = self.cluster.AddHost_Task(host_connect_spec, as_connected,
                                             resource_pool, esxi_license)
            success, result = wait_for_task(task)
            return success, result
        except TaskError as add_task_error:
            # This is almost certain to fail the first time.
            # In order to get the sslThumbprint we first connect
            # get the vim.fault.SSLVerifyFault then grab the sslThumbprint
            # from that object.
            #
            # args is a tuple, selecting the first tuple
            ssl_verify_fault = add_task_error.args[0]
            host_connect_spec.sslThumbprint = ssl_verify_fault.thumbprint

        task = self.cluster.AddHost_Task(host_connect_spec, as_connected,
                                         resource_pool, esxi_license)
        success, result = wait_for_task(task)
        return success, result
    def _update_version3_resources(self, resources):
        allocations = list()

        for resource in resources:
            allocation = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource(
            )
            allocation.allocationInfo = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource.ResourceAllocation(
            )
            allocation.key = resource['name']
            if 'limit' in resource:
                allocation.allocationInfo.limit = resource['limit']
            if 'reservation' in resource:
                allocation.allocationInfo.reservation = resource['reservation']
            if 'shares_level' in resource:
                allocation.allocationInfo.shares = vim.SharesInfo()
                allocation.allocationInfo.shares.level = resource[
                    'shares_level']
                if 'shares' in resource and resource[
                        'shares_level'] == 'custom':
                    allocation.allocationInfo.shares.shares = resource[
                        'shares']
                elif resource['shares_level'] == 'custom':
                    self.module.fail_json(
                        msg=
                        "Resource %s, shares_level set to custom but shares not specified"
                        % resource['name'])

            allocations.append(allocation)

        spec = vim.DistributedVirtualSwitch.ConfigSpec()
        spec.configVersion = self.dvs.config.configVersion
        spec.infrastructureTrafficResourceConfig = allocations

        task = self.dvs.ReconfigureDvs_Task(spec)
        wait_for_task(task)
Example #12
0
    def add_host_to_vcenter(self):
        host_connect_spec = vim.host.ConnectSpec()
        host_connect_spec.hostName = self.esxi_hostname
        host_connect_spec.userName = self.esxi_username
        host_connect_spec.password = self.esxi_password
        host_connect_spec.force = True
        host_connect_spec.sslThumbprint = ""
        as_connected = True
        esxi_license = None
        resource_pool = None

        try:
            task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
            success, result = wait_for_task(task)
            return success, result
        except TaskError as add_task_error:
            # This is almost certain to fail the first time.
            # In order to get the sslThumbprint we first connect
            # get the vim.fault.SSLVerifyFault then grab the sslThumbprint
            # from that object.
            #
            # args is a tuple, selecting the first tuple
            ssl_verify_fault = add_task_error.args[0]
            host_connect_spec.sslThumbprint = ssl_verify_fault.thumbprint

        task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
        success, result = wait_for_task(task)
        return success, result
Example #13
0
    def reconfigure_vm_network(self, vm_obj):
        network_list = self.sanitize_network_params()
        # gather network adapter info only
        if (self.params['gather_network_info'] is not None and self.params['gather_network_info']) or len(network_list) == 0:
            results = {'changed': False, 'failed': False, 'network_data': self.get_network_info(vm_obj)}
        # do reconfigure then gather info
        else:
            self.get_network_config_spec(vm_obj, network_list)
            try:
                task = vm_obj.ReconfigVM_Task(spec=self.config_spec)
                wait_for_task(task)
            except vim.fault.InvalidDeviceSpec as e:
                self.module.fail_json(msg="Failed to configure network adapter on given virtual machine due to invalid"
                                          " device spec : %s" % to_native(e.msg),
                                      details="Please check ESXi server logs for more details.")
            except vim.fault.RestrictedVersion as e:
                self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
                                          " product versioning restrictions: %s" % to_native(e.msg))
            if task.info.state == 'error':
                results = {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg}
            else:
                network_info = self.get_network_info(vm_obj)
                results = {'changed': self.change_detected, 'failed': False, 'network_data': network_info}

        return results
Example #14
0
    def reconnect_host_to_vcenter(self):
        reconnecthost_args = {}
        reconnecthost_args['reconnectSpec'] = vim.HostSystem.ReconnectSpec()
        reconnecthost_args['reconnectSpec'].syncState = True

        if self.esxi_username is not None or self.esxi_password is not None:
            reconnecthost_args['cnxSpec'] = self.get_host_connect_spec()

            for count in range(0, 2):
                try:
                    task = self.host.ReconnectHost_Task(**reconnecthost_args)
                    success, result = wait_for_task(task)
                    return success, result
                except TaskError as task_error_exception:
                    task_error = task_error_exception.args[0]
                    if self.esxi_ssl_thumbprint == '' and isinstance(task_error, vim.fault.SSLVerifyFault):
                        # User has not specified SSL Thumbprint for ESXi host,
                        # try to grab it using SSLVerifyFault exception
                        reconnecthost_args['cnxSpec'].sslThumbprint = task_error.thumbprint
                    else:
                        self.module.fail_json(msg="Failed to reconnect host %s to vCenter: %s" % (self.esxi_hostname,
                                                                                                  to_native(task_error.msg)))
            self.module.fail_json(msg="Failed to reconnect host %s to vCenter" % self.esxi_hostname)
        else:
            try:
                task = self.host.ReconnectHost_Task(**reconnecthost_args)
                success, result = wait_for_task(task)
                return success, result
            except TaskError as task_error_exception:
                task_error = task_error_exception.args[0]
                self.module.fail_json(msg="Failed to reconnect host %s to vCenter due to %s" % (self.esxi_hostname,
                                                                                                to_native(task_error.msg)))
Example #15
0
 def update_dvs_config(self, switch_object, spec):
     """Update DVS config"""
     try:
         task = switch_object.ReconfigureDvs_Task(spec)
         wait_for_task(task)
     except TaskError as invalid_argument:
         self.module.fail_json(msg="Failed to update DVS : %s" %
                               to_native(invalid_argument))
Example #16
0
    def create(self):
        """
        Function to create a host VM-Host DRS rule if rule does not exist
        """
        rule_obj = self.__get_rule_key_by_name(rule_name=self.__rule_name)

        # Check if rule exists
        if rule_obj:

            operation = 'edit'
            rule_changed = self.__check_rule_has_changed(rule_obj)

        else:
            operation = 'add'

        # Check if anything has changed when editing
        if operation == 'add' or (operation == 'edit'
                                  and rule_changed is True):

            rule = vim.cluster.VmHostRuleInfo()

            # Check if already rule exists
            if rule_obj:
                # This need to be set in order to edit a existing rule
                rule.key = rule_obj.key

            rule.enabled = self.__enabled
            rule.mandatory = self.__mandatory
            rule.name = self.__rule_name

            if self.__affinity_rule:
                rule.affineHostGroupName = self.__host_group_name
            else:
                rule.antiAffineHostGroupName = self.__host_group_name

            rule.vmGroupName = self.__vm_group_name

            rule_spec = vim.cluster.RuleSpec(info=rule, operation=operation)
            config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])

            if not self.module.check_mode:

                task = self.__cluster_obj.ReconfigureEx(config_spec,
                                                        modify=True)
                wait_for_task(task)

            self.__changed = True

        rule_obj = self.__get_rule_key_by_name(rule_name=self.__rule_name)
        self.__result = self.__normalize_vm_host_rule_spec(rule_obj)

        if operation == 'edit':
            self.__msg = "Updated DRS rule `%s` successfully" % (
                self.__rule_name)
        else:
            self.__msg = "Created DRS rule `%s` successfully" % (
                self.__rule_name)
Example #17
0
def main():

    argument_spec = vmware_argument_spec()
    argument_spec.update(
        dict(
            vm_name=dict(required=True, aliases=['vm'], type='str'),
            destination_host=dict(required=True, aliases=['destination'], type='str'),
        )
    )
    module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)

    if not HAS_PYVMOMI:
        module.fail_json(msg='pyVmomi is required for this module')

    content = connect_to_api(module=module)

    vm_object = find_vm_by_name(content=content, vm_name=module.params['vm_name'])
    host_object = find_hostsystem_by_name(content=content, hostname=module.params['destination_host'])

    # Setup result
    result = {
        'changed': False
    }

    # Check if we could find the VM or Host
    if not vm_object:
        module.fail_json(msg='Cannot find virtual machine')
    if not host_object:
        module.fail_json(msg='Cannot find host')

    # Make sure VM isn't already at the destination
    if vm_object.runtime.host.name == module.params['destination_host']:
        module.exit_json(**result)

    if not module.check_mode:
        # Migrate VM and get Task object back
        task_object = migrate_vm(vm_object=vm_object, host_object=host_object)

        # Wait for task to complete
        wait_for_task(task_object)

        # If task was a success the VM has moved, update running_host and complete module
        if task_object.info.state == vim.TaskInfo.State.success:
            vm_object = find_vm_by_name(content=content, vm_name=module.params['vm_name'])
            result['running_host'] = vm_object.runtime.host.name
            result['changed'] = True
            module.exit_json(**result)
        else:
            if task_object.info.error is None:
                module.fail_json(msg='Unable to migrate VM due to an error, please check vCenter')
            else:
                module.fail_json(msg='Unable to migrate VM due to an error: %s' % task_object.info.error)
    else:
        # If we are in check mode return a result as if move was performed
        result['running_host'] = module.params['destination_host']
        result['changed'] = True
        module.exit_json(**result)
Example #18
0
def main():

    argument_spec = vmware_argument_spec()
    argument_spec.update(
        dict(
            vm_name=dict(required=True, aliases=['vm'], type='str'),
            destination_host=dict(required=True, aliases=['destination'], type='str'),
        )
    )
    module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)

    if not HAS_PYVMOMI:
        module.fail_json(msg='pyVmomi is required for this module')

    content = connect_to_api(module=module)

    vm_object = find_vm_by_name(content=content, vm_name=module.params['vm_name'])
    host_object = find_hostsystem_by_name(content=content, hostname=module.params['destination_host'])

    # Setup result
    result = {
        'changed': False
    }

    # Check if we could find the VM or Host
    if not vm_object:
        module.fail_json(msg='Cannot find virtual machine')
    if not host_object:
        module.fail_json(msg='Cannot find host')

    # Make sure VM isn't already at the destination
    if vm_object.runtime.host.name == module.params['destination_host']:
        module.exit_json(**result)

    if not module.check_mode:
        # Migrate VM and get Task object back
        task_object = migrate_vm(vm_object=vm_object, host_object=host_object)

        # Wait for task to complete
        wait_for_task(task_object)

        # If task was a success the VM has moved, update running_host and complete module
        if task_object.info.state == vim.TaskInfo.State.success:
            vm_object = find_vm_by_name(content=content, vm_name=module.params['vm_name'])
            result['running_host'] = vm_object.runtime.host.name
            result['changed'] = True
            module.exit_json(**result)
        else:
            if task_object.info.error is None:
                module.fail_json(msg='Unable to migrate VM due to an error, please check vCenter')
            else:
                module.fail_json(msg='Unable to migrate VM due to an error: %s' % task_object.info.error)
    else:
        # If we are in check mode return a result as if move was performed
        result['running_host'] = module.params['destination_host']
        result['changed'] = True
        module.exit_json(**result)
Example #19
0
def set_vnc_extraconfig(content, vm, enabled, ip, port, password):
    result = dict(
        changed=False,
        failed=False,
    )
    # set new values
    key_prefix = "remotedisplay.vnc."
    new_values = dict()
    for key in ['enabled', 'ip', 'port', 'password']:
        new_values[key_prefix + key] = ""
    if enabled:
        new_values[key_prefix + "enabled"] = "true"
        new_values[key_prefix + "password"] = str(password).strip()
        new_values[key_prefix + "ip"] = str(ip).strip()
        new_values[key_prefix + "port"] = str(port).strip()

    # get current vnc config
    current_values = get_vnc_extraconfig(vm)
    # check if any value is changed
    reconfig_vm = False
    for key, val in new_values.items():
        key = key.replace(key_prefix, "")
        current_value = current_values.get(key, "")
        # enabled is not case-sensitive
        if key == "enabled":
            current_value = current_value.lower()
            val = val.lower()
        if current_value != val:
            reconfig_vm = True
    if not reconfig_vm:
        return result
    # reconfigure vm
    spec = vim.vm.ConfigSpec()
    spec.extraConfig = []
    for key, val in new_values.items():
        opt = vim.option.OptionValue()
        opt.key = key
        opt.value = val
        spec.extraConfig.append(opt)
    task = vm.ReconfigVM_Task(spec)
    try:
        wait_for_task(task)
    except TaskError as task_err:
        result['failed'] = True
        result['msg'] = to_native(task_err)

    if task.info.state == 'error':
        result['failed'] = True
        result['msg'] = task.info.error.msg
    else:
        result['changed'] = True
        result['instance'] = gather_vm_facts(content, vm)
    return result
 def clone(self):
     # clone the vm/template on destination VC
     vm_folder = find_folder_by_name(content=self.destination_content, folder_name=self.params['destination_vm_folder'])
     vm_name = self.params['destination_vm_name']
     task = self.vm_obj.Clone(folder=vm_folder, name=vm_name, spec=self.clone_spec)
     wait_for_task(task)
     if task.info.state == 'error':
         result = {'changed': False, 'failed': True, 'msg': task.info.error.msg}
     else:
         vm_info = self.get_new_vm_info(vm_name)
         result = {'changed': True, 'failed': False, 'vm_info': vm_info}
     return result
Example #21
0
    def take_vm_screenshot(self):
        if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn:
            self.module.fail_json(
                msg="VM is %s, valid power state is poweredOn." %
                self.current_vm_obj.runtime.powerState)
        try:
            task = self.current_vm_obj.CreateScreenshot_Task()
            wait_for_task(task)
        except vim.fault.FileFault as e:
            self.module.fail_json(
                msg=
                "Failed to create screenshot due to errors when creating or accessing one or more"
                " files needed for this operation, %s" % to_native(e.msg))
        except vim.fault.InvalidState as e:
            self.module.fail_json(
                msg=
                "Failed to create screenshot due to VM is not ready to respond to such requests,"
                " %s" % to_native(e.msg))
        except vmodl.RuntimeFault as e:
            self.module.fail_json(
                msg="Failed to create screenshot due to runtime fault, %s," %
                to_native(e.msg))
        except vim.fault.TaskInProgress as e:
            self.module.fail_json(
                msg="Failed to create screenshot due to VM is busy, %s" %
                to_native(e.msg))

        if task.info.state == 'error':
            return {
                'changed': self.change_detected,
                'failed': True,
                'msg': task.info.error.msg
            }
        else:
            download_file_size = None
            self.change_detected = True
            file_url = self.generate_http_access_url(task.info.result)
            if self.params.get('local_path'):
                if file_url:
                    download_file_size = self.download_screenshot_file(
                        file_url=file_url,
                        local_file_path=self.params['local_path'],
                        file_name=task.info.result.split('/')[-1])
            screenshot_facts = self.get_screenshot_facts(
                task.info, file_url, download_file_size)
            return {
                'changed': self.change_detected,
                'failed': False,
                'screenshot_info': screenshot_facts
            }
Example #22
0
    def state_remove_host(self):
        changed = True
        result = None
        if not self.module.check_mode:
            if not self.host.runtime.inMaintenanceMode:
                maintenance_mode_task = self.host.EnterMaintenanceMode_Task(300, True, None)
                changed, result = wait_for_task(maintenance_mode_task)

            if changed:
                task = self.host.Destroy_Task()
                changed, result = wait_for_task(task)
            else:
                raise Exception(result)
        self.module.exit_json(changed=changed, result=str(result))
    def set_nioc_version(self):
        upgrade_spec = vim.DistributedVirtualSwitch.ConfigSpec()
        upgrade_spec.configVersion = self.dvs.config.configVersion
        if not self.version:
            self.version = 'version2'
        upgrade_spec.networkResourceControlVersion = self.version

        try:
            task = self.dvs.ReconfigureDvs_Task(spec=upgrade_spec)
            wait_for_task(task)
        except vmodl.RuntimeFault as runtime_fault:
            self.module.fail_json(
                msg="RuntimeFault when setting NIOC version: %s " %
                to_native(runtime_fault.msg))
Example #24
0
    def state_remove_host(self):
        changed = True
        result = None
        if not self.module.check_mode:
            if not self.host.runtime.inMaintenanceMode:
                maintenance_mode_task = self.host.EnterMaintenanceMode_Task(300, True, None)
                changed, result = wait_for_task(maintenance_mode_task)

            if changed:
                task = self.host.Destroy_Task()
                changed, result = wait_for_task(task)
            else:
                raise Exception(result)
        self.module.exit_json(changed=changed, result=str(result))
Example #25
0
 def update_health_check_config(self, switch_object, health_check_config):
     """Update Health Check config"""
     try:
         task = switch_object.UpdateDVSHealthCheckConfig_Task(healthCheckConfig=health_check_config)
     except vim.fault.DvsFault as dvs_fault:
         self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault))
     except vmodl.fault.NotSupported as not_supported:
         self.module.fail_json(msg="Health check not supported on the switch : %s" % to_native(not_supported))
     except TaskError as invalid_argument:
         self.module.fail_json(msg="Failed to configure health check : %s" % to_native(invalid_argument))
     try:
         wait_for_task(task)
     except TaskError as invalid_argument:
         self.module.fail_json(msg="Failed to update health check config : %s" % to_native(invalid_argument))
Example #26
0
    def register_vm(self, template=False):

        result = dict(
            changed=False,
            failed=False,
        )

        f_obj = self.fobj_from_folder_path(dc=self.params['datacenter'], folder=self.params['folder'])
        # abort if no strategy was successful
        if f_obj is None:
            self.module.fail_json(msg='No folder matched the path: %(folder)s' % self.params)
        destfolder = f_obj

        if self.params['esxi_hostname'] is None:
            esxhost = self.select_host(self.params['datastore'])
        else:
            esxhost = self.cache.get_esx_host(self.params['esxi_hostname'])

        if template:
            task = destfolder.RegisterVM_Task("[%s] %s" % (self.params['datastore'], self.params['path']), self.params['name'], asTemplate=True, host=esxhost)
        else:
            # Now we need a resource pool
            if self.params['esxi_hostname']:
                resource_pool = self.select_resource_pool_by_host(esxhost)
            elif self.params['resource_pool_cluster_root']:
                if self.params['cluster'] is None:
                    self.module.fail_json(msg='resource_pool_cluster_root requires a cluster name')
                else:
                    rp_cluster = self.cache.get_cluster(self.params['cluster'])
                    if not rp_cluster:
                        self.module.fail_json(msg="Failed to find a cluster named %(cluster)s" % self.params)
                    resource_pool = rp_cluster.resourcePool
            else:
                resource_pool = self.select_resource_pool_by_name(self.params['resource_pool'])

            if resource_pool is None:
                self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster and resource_pool_cluster_root')
            # Now finally register the VM
            task = destfolder.RegisterVM_Task("[%s] %s" % (self.params['datastore'], self.params['path']), self.params['name'], asTemplate=False, host=esxhost, pool=resource_pool)
                                       
        if task:
            wait_for_task(task)
            if task.info.state == 'error':
                result['failed'] = True
                result['msg'] = str(task.info.error.msg)
            else:
                result['changed'] = True

        return result
Example #27
0
 def destroy_dvswitch(self):
     """Delete a DVS"""
     changed = True
     results = dict(changed=changed)
     results['dvswitch'] = self.switch_name
     if self.module.check_mode:
         results['result'] = "DVS would be deleted"
     else:
         try:
             task = self.dvs.Destroy_Task()
         except vim.fault.VimFault as vim_fault:
             self.module.fail_json(msg="Failed to deleted DVS : %s" % to_native(vim_fault))
         wait_for_task(task)
         results['result'] = "DVS deleted"
     self.module.exit_json(**results)
Example #28
0
def EnterMaintenanceMode(module, host):

    if host.runtime.inMaintenanceMode:
        module.exit_json(
            changed=False,
            hostsystem=str(host),
            hostname=module.params['esxi_hostname'],
            status='NO_ACTION',
            msg='Host already in maintenance mode')

    spec = vim.host.MaintenanceSpec()

    if module.params['vsan']:
        spec.vsanMode = vim.vsan.host.DecommissionMode()
        spec.vsanMode.objectAction = module.params['vsan']

    try:
        task = host.EnterMaintenanceMode_Task(
            module.params['timeout'],
            module.params['evacuate'],
            spec)

        success, result = wait_for_task(task)

        return dict(changed=success,
                    hostsystem=str(host),
                    hostname=module.params['esxi_hostname'],
                    status='ENTER',
                    msg='Host entered maintenance mode')

    except TaskError:
        module.fail_json(
            msg='Host failed to enter maintenance mode')
 def state_update_evc(self):
     """
     Update EVC Mode
     """
     changed, result = False, None
     try:
         if not self.module.check_mode and self.current_evc_mode != self.evc_mode:
             evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode)
             changed, result = wait_for_task(evc_task)
         if self.module.check_mode and self.current_evc_mode != self.evc_mode:
             changed, result = True, None
         if self.current_evc_mode == self.evc_mode:
             self.module.exit_json(
                 changed=changed,
                 msg=
                 "EVC Mode is already set to '%(evc_mode)s' on '%(cluster_name)s'."
                 % self.params)
         self.module.exit_json(
             changed=changed,
             msg=
             "EVC Mode has been updated to '%(evc_mode)s' on '%(cluster_name)s'."
             % self.params)
     except TaskError as invalid_argument:
         self.module.fail_json(msg="Failed to update EVC mode: %s" %
                               to_native(invalid_argument))
Example #30
0
    def state_update_cluster(self):
        """
        Update cluster configuration of existing cluster
        """
        changed, result = False, None
        cluster_config_spec = vim.cluster.ConfigSpecEx()
        diff = False  # Triggers Reconfigure Task only when there is a change
        if self.check_ha_config_diff():
            cluster_config_spec.dasConfig = self.configure_ha()
            diff = True
        if self.check_drs_config_diff():
            cluster_config_spec.drsConfig = self.configure_drs()
            diff = True
        if self.check_vsan_config_diff():
            cluster_config_spec.vsanConfig = self.configure_vsan()
            diff = True

        try:
            if not self.module.check_mode and diff:
                task = self.cluster.ReconfigureComputeResource_Task(
                    cluster_config_spec, True)
                changed, result = wait_for_task(task)
            self.module.exit_json(changed=changed, result=result)
        except vmodl.RuntimeFault as runtime_fault:
            self.module.fail_json(msg=to_native(runtime_fault.msg))
        except vmodl.MethodFault as method_fault:
            self.module.fail_json(msg=to_native(method_fault.msg))
        except TaskError as task_e:
            self.module.fail_json(msg=to_native(task_e))
        except Exception as generic_exc:
            self.module.fail_json(msg="Failed to update cluster"
                                  " due to generic exception %s" %
                                  to_native(generic_exc))
Example #31
0
 def enter_maintenance(self):
     try:
         task = self.host.EnterMaintenanceMode_Task(timeout=15)
         success, result = wait_for_task(task)
     except Exception as e:
         self.module.fail_json(msg="Failed to enter maintenance mode."
                                   " Ensure that there are no powered on machines on the host. %s" % to_native(e))
Example #32
0
    def add_host_to_vcenter(self):
        host_connect_spec = self.get_host_connect_spec()
        as_connected = self.params.get('add_connected')
        esxi_license = None
        resource_pool = None

        for count in range(0, 2):
            try:
                task = None
                if self.folder:
                    task = self.folder.AddStandaloneHost(spec=host_connect_spec, addConnected=as_connected)
                elif self.cluster:
                    task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
                success, result = wait_for_task(task)
                return success, result
            except TaskError as task_error_exception:
                task_error = task_error_exception.args[0]
                if self.esxi_ssl_thumbprint == '' and isinstance(task_error, vim.fault.SSLVerifyFault):
                    # User has not specified SSL Thumbprint for ESXi host,
                    # try to grab it using SSLVerifyFault exception
                    host_connect_spec.sslThumbprint = task_error.thumbprint
                else:
                    self.module.fail_json(msg="Failed to add host %s to vCenter: %s" % (self.esxi_hostname,
                                                                                        to_native(task_error.msg)))
            except vmodl.fault.NotSupported:
                self.module.fail_json(msg="Failed to add host %s to vCenter as host is"
                                          " being added to a folder %s whose childType"
                                          " property does not contain"
                                          " \"ComputeResource\"." % (self.esxi_hostname, self.folder_name))
            except Exception as generic_exc:
                self.module.fail_json(msg="Failed to add host %s to vCenter: %s" % (self.esxi_hostname,
                                                                                    to_native(generic_exc)))

        self.module.fail_json(msg="Failed to add host %s to vCenter" % self.esxi_hostname)
    def migrate_network_adapter_vds(self):
        vm_configspec = vim.vm.ConfigSpec()
        nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
        port = vim.dvs.PortConnection()
        devicespec = vim.vm.device.VirtualDeviceSpec()

        pg = self.find_dvspg_by_name()

        if pg is None:
            self.module.fail_json(msg="The standard portgroup was not found")

        dvswitch = pg.config.distributedVirtualSwitch
        port.switchUuid = dvswitch.uuid
        port.portgroupKey = pg.key
        nic.port = port

        for device in self.vm.config.hardware.device:
            if isinstance(device, vim.vm.device.VirtualEthernetCard):
                devicespec.device = device
                devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
                devicespec.device.backing = nic
                vm_configspec.deviceChange.append(devicespec)

        task = self.vm.ReconfigVM_Task(vm_configspec)
        changed, result = wait_for_task(task)
        self.module.exit_json(changed=changed, result=result)
Example #34
0
    def power_on(self):
        facts = {}
        if self.params['power_on']:
            task = self.entity.PowerOn()
            if self.params['wait']:
                wait_for_task(task)
                if self.params['wait_for_ip_address']:
                    _facts = wait_for_vm_ip(self.si, self.entity)
                    if not _facts:
                        self.module.fail_json(msg='Waiting for IP address timed out')
                    facts.update(_facts)

        if not facts:
            facts.update(gather_vm_facts(self.si, self.entity))

        return facts
Example #35
0
 def restore_original_state(self):
     """In case of failure restore, the changes we made."""
     for port, state in self.modified_ports.items():
         self.set_port_security_promiscuous([port], state)
     if self.deleted_session is not None:
         session = self.deleted_session
         config_version = self.dv_switch.config.configVersion
         s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec(vspanSession=session, operation="add")
         c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec(vspanConfigSpec=[s_spec], configVersion=config_version)
         # Revert the delete
         task = self.dv_switch.ReconfigureDvs_Task(c_spec)
         try:
             wait_for_task(task)
         except Exception:
             self.restore_original_state()
             self.module.fail_json(msg=task.info.error.msg)
 def state_remove_host(self):
     """Remove host from vCenter"""
     changed = True
     result = None
     if self.module.check_mode:
         result = "Host would be removed from vCenter '%s'" % self.vcenter
     else:
         # Check parent type
         parent_type = self.get_parent_type(self.host_update)
         if parent_type == 'cluster':
             self.put_host_in_maintenance_mode(self.host_update)
         try:
             if self.folder_name:
                 task = self.host_parent_compute_resource.Destroy_Task()
             elif self.cluster_name:
                 task = self.host.Destroy_Task()
         except vim.fault.VimFault as vim_fault:
             self.module.fail_json(msg=vim_fault)
         try:
             changed, result = wait_for_task(task)
             result = "Host removed from vCenter '%s'" % self.vcenter
         except TaskError as task_error:
             self.module.fail_json(
                 msg="Failed to remove the host from vCenter '%s' : %s" %
                 (self.vcenter, to_native(task_error)))
     self.module.exit_json(changed=changed, result=str(result))
def main():
    argument_spec = vmware_argument_spec()
    argument_spec.update(
        name=dict(type='str'),
        name_match=dict(type='str', choices=['first', 'last'],
                        default='first'),
        uuid=dict(type='str'),
        use_instance_uuid=dict(type='bool', default=False),
        dest_folder=dict(type='str', required=True),
        datacenter=dict(type='str', required=True),
    )
    module = AnsibleModule(argument_spec=argument_spec,
                           required_one_of=[['name', 'uuid']],
                           mutually_exclusive=[['name', 'uuid']],
                           supports_check_mode=True)

    # FindByInventoryPath() does not require an absolute path
    # so we should leave the input folder path unmodified
    module.params['dest_folder'] = module.params['dest_folder'].rstrip('/')
    pyv = PyVmomiHelper(module)
    search_index = pyv.content.searchIndex

    # Check if the VM exists before continuing
    vm = pyv.get_vm()

    # VM exists
    if vm:
        try:
            vm_path = pyv.get_vm_path(pyv.content, vm).lstrip('/')
            if module.params['name']:
                vm_name = module.params['name']
            else:
                vm_name = vm.name

            vm_full = vm_path + '/' + vm_name
            folder = search_index.FindByInventoryPath(
                module.params['dest_folder'])
            if folder is None:
                module.fail_json(msg="Folder name and/or path does not exist")
            vm_to_move = search_index.FindByInventoryPath(vm_full)
            if module.check_mode:
                module.exit_json(changed=True, instance=pyv.gather_facts(vm))
            if vm_path != module.params['dest_folder'].lstrip('/'):
                move_task = folder.MoveInto([vm_to_move])
                changed, err = wait_for_task(move_task)
                if changed:
                    module.exit_json(changed=True,
                                     instance=pyv.gather_facts(vm))
            else:
                module.exit_json(instance=pyv.gather_facts(vm))
        except Exception as exc:
            module.fail_json(msg="Failed to move VM with exception %s" %
                             to_native(exc))
    else:
        if module.check_mode:
            module.exit_json(changed=False)
        module.fail_json(
            msg="Unable to find VM %s to move to %s" %
            ((module.params.get('uuid') or module.params.get('name')),
             module.params.get('dest_folder')))
Example #38
0
 def vm_power_on(self, vm_obj):
     facts = {}
     try:
         if self.params['power_on']:
             task = vm_obj.PowerOn()
             if self.params['wait']:
                 wait_for_task(task)
                 if self.params['wait_for_ip_address']:
                     _facts = wait_for_vm_ip(self.si, vm_obj)
                     if not _facts:
                         self.module.fail_json(
                             msg='Waiting for IP address timed out')
                     facts.update(_facts)
     except Exception, e:
         self.module.fail_json(msg="Error received from vCenter:%s" %
                               (e.message.msg))
Example #39
0
    def state_destroy_dvspg(self):
        changed = True
        result = None

        if not self.module.check_mode:
            task = self.dvs_portgroup.Destroy_Task()
            changed, result = wait_for_task(task)
        self.module.exit_json(changed=changed, result=str(result))
    def state_remove_rp(self):
        changed = True
        result = None
        resource_pool = self.select_resource_pool(self.host_obj)
        try:
            task = self.resource_pool_obj.Destroy()
            success, result = wait_for_task(task)

        except:
            self.module.fail_json(msg = "Failed to remove resource pool '%s' '%s'" % (self.resource_pool,resource_pool))
        self.module.exit_json(changed = changed, result = str(result))
    def state_remove_folder(self):
        changed = True
        result = None
        self.folder_expanded = self.folder.split("/")
        f = self.folder_expanded.pop()
        task = self.get_obj([vim.Folder], f).Destroy()

        try:
            success, result = wait_for_task(task)
        except Exception:
            self.module.fail_json(
                msg="Failed to remove folder '%s'" % self.folder)

        self.module.exit_json(changed=changed, result=str(result))
Example #42
0
    def state_destroy_cluster(self):
        changed = True
        result = None

        try:
            if not self.module.check_mode:
                task = self.cluster.Destroy_Task()
                changed, result = wait_for_task(task)
            self.module.exit_json(changed=changed, result=result)
        except vim.fault.VimFault as vim_fault:
            self.module.fail_json(msg=vim_fault.msg)
        except vmodl.RuntimeFault as runtime_fault:
            self.module.fail_json(msg=runtime_fault.msg)
        except vmodl.MethodFault as method_fault:
            self.module.fail_json(msg=method_fault.msg)
    def ensure(self):
        datastore_results = dict()
        change_datastore_list = []
        for datastore in self.datastore_objs:
            changed = False
            if self.state == 'present' and datastore.summary.maintenanceMode != 'normal':
                datastore_results[datastore.name] = "Datastore '%s' is already in maintenance mode." % datastore.name
                break
            elif self.state == 'absent' and datastore.summary.maintenanceMode == 'normal':
                datastore_results[datastore.name] = "Datastore '%s' is not in maintenance mode." % datastore.name
                break

            try:
                if self.state == 'present':
                    storage_replacement_result = datastore.DatastoreEnterMaintenanceMode()
                    task = storage_replacement_result.task
                else:
                    task = datastore.DatastoreExitMaintenanceMode_Task()

                success, result = wait_for_task(task)

                if success:
                    changed = True
                    if self.state == 'present':
                        datastore_results[datastore.name] = "Datastore '%s' entered in maintenance mode." % datastore.name
                    else:
                        datastore_results[datastore.name] = "Datastore '%s' exited from maintenance mode." % datastore.name
            except vim.fault.InvalidState as invalid_state:
                if self.state == 'present':
                    msg = "Unable to enter datastore '%s' in" % datastore.name
                else:
                    msg = "Unable to exit datastore '%s' from" % datastore.name
                msg += " maintenance mode due to : %s" % to_native(invalid_state.msg)
                self.module.fail_json(msg=msg)
            except Exception as exc:
                if self.state == 'present':
                    msg = "Unable to enter datastore '%s' in" % datastore.name
                else:
                    msg = "Unable to exit datastore '%s' from" % datastore.name
                msg += " maintenance mode due to generic exception : %s" % to_native(exc)
                self.module.fail_json(msg=msg)
            change_datastore_list.append(changed)

        changed = False
        if any(change_datastore_list):
            changed = True
        self.module.exit_json(changed=changed, results=datastore_results)
Example #44
0
 def destroy_datacenter(self):
     results = dict(changed=False)
     try:
         if self.datacenter_obj and not self.module.check_mode:
             task = self.datacenter_obj.Destroy_Task()
             changed, result = wait_for_task(task)
             results['changed'] = changed
             results['result'] = result
         self.module.exit_json(**results)
     except (vim.fault.VimFault, vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
         self.module.fail_json(msg="Failed to delete a datacenter"
                                   " '%s' due to : %s" % (self.datacenter_name,
                                                          to_native(runtime_fault.msg)))
     except Exception as generic_exc:
         self.module.fail_json(msg="Failed to delete a datacenter"
                                   " '%s' due to generic error: %s" % (self.datacenter_name,
                                                                       to_native(generic_exc)))
Example #45
0
def destroy_datacenter(context, module):
    result = None

    try:
        datacenter = get_datacenter(context, module)
        changed = False
        if datacenter:
            changed = True
            if not module.check_mode:
                task = datacenter.Destroy_Task()
                changed, result = wait_for_task(task)
        module.exit_json(changed=changed, result=result)
    except vim.fault.VimFault as vim_fault:
        module.fail_json(msg=vim_fault.msg)
    except vmodl.RuntimeFault as runtime_fault:
        module.fail_json(msg=runtime_fault.msg)
    except vmodl.MethodFault as method_fault:
        module.fail_json(msg=method_fault.msg)
Example #46
0
    def modify_dvs_host(self, operation):
        spec = vim.DistributedVirtualSwitch.ConfigSpec()
        spec.configVersion = self.dv_switch.config.configVersion
        spec.host = [vim.dvs.HostMember.ConfigSpec()]
        spec.host[0].operation = operation
        spec.host[0].host = self.host

        if operation in ("edit", "add"):
            spec.host[0].backing = vim.dvs.HostMember.PnicBacking()
            count = 0

            for nic in self.vmnics:
                spec.host[0].backing.pnicSpec.append(vim.dvs.HostMember.PnicSpec())
                spec.host[0].backing.pnicSpec[count].pnicDevice = nic
                spec.host[0].backing.pnicSpec[count].uplinkPortgroupKey = self.uplink_portgroup.key
                count += 1

        task = self.dv_switch.ReconfigureDvs_Task(spec)
        changed, result = wait_for_task(task)
        return changed, result
Example #47
0
def create_vsan_cluster(host_system, new_cluster_uuid):
    host_config_manager = host_system.configManager
    vsan_system = host_config_manager.vsanSystem

    vsan_config = vim.vsan.host.ConfigInfo()
    vsan_config.enabled = True

    if new_cluster_uuid is not None:
        vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
        vsan_config.clusterInfo.uuid = new_cluster_uuid

    vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
    vsan_config.storageInfo.autoClaimStorage = True

    task = vsan_system.UpdateVsan_Task(vsan_config)
    changed, result = wait_for_task(task)

    host_status = vsan_system.QueryHostStatus()
    cluster_uuid = host_status.uuid

    return changed, result, cluster_uuid
Example #48
0
    def create_port_group(self):
        config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()

        config.name = self.portgroup_name
        config.numPorts = self.num_ports

        # vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation
        # but this is the correct managed object type.

        config.defaultPortConfig = vim.VMwareDVSPortSetting()

        # vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the
        # pyvmomi documentation but this is the correct managed object type
        config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec()
        config.defaultPortConfig.vlan.inherited = False
        config.defaultPortConfig.vlan.vlanId = self.vlan_id
        config.type = self.portgroup_type

        spec = [config]
        task = self.dv_switch.AddDVPortgroup_Task(spec)
        changed, result = wait_for_task(task)
        return changed, result
Example #49
0
    def set_vsan_service_type(self):
        """
        Function to set VSAN service type
        Returns: True and result for success, False and result for failure

        """
        changed, result = (False, '')
        vsan_system = self.esxi_host_obj.configManager.vsanSystem

        vsan_port_config = vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig()
        vsan_port_config.device = self.vnic.device

        vsan_config = vim.vsan.host.ConfigInfo()
        vsan_config.networkInfo = vim.vsan.host.ConfigInfo.NetworkInfo()
        vsan_config.networkInfo.port = [vsan_port_config]
        try:
            vsan_task = vsan_system.UpdateVsan_Task(vsan_config)
            changed, result = wait_for_task(vsan_task)
        except Exception as e:
            self.module.fail_json(msg="Failed to set service type to vsan for"
                                      " %s : %s" % (self.vnic.device, to_native(e)))
        return changed, result
    def ensure(self):
        """
        Function to manage internal state of datastore cluster

        """
        results = dict(changed=False, result='')
        state = self.module.params.get('state')

        if self.datastore_cluster_obj:
            if state == 'present':
                results['result'] = "Datastore cluster '%s' already available." % self.datastore_cluster_name
            elif state == 'absent':
                # Delete datastore cluster
                if not self.module.check_mode:
                    task = self.datastore_cluster_obj.Destroy_Task()
                    changed, result = wait_for_task(task)
                else:
                    changed = True
                if changed:
                    results['result'] = "Datastore cluster '%s' deleted successfully." % self.datastore_cluster_name
                    results['changed'] = changed
                else:
                    self.module.fail_json(msg="Failed to delete datastore cluster '%s'." % self.datastore_cluster_name)
        else:
            if state == 'present':
                # Create datastore cluster
                if not self.module.check_mode:
                    try:
                        self.datacenter_obj.datastoreFolder.CreateStoragePod(name=self.datastore_cluster_name)
                    except Exception as generic_exc:
                        self.module.fail_json(msg="Failed to create datstore cluster"
                                                  " '%s' due to %s" % (self.datastore_cluster_name,
                                                                       to_native(generic_exc)))
                results['changed'] = True
                results['result'] = "Datastore cluster '%s' created successfully." % self.datastore_cluster_name
            elif state == 'absent':
                results['result'] = "Datastore cluster '%s' not available or already deleted." % self.datastore_cluster_name
        self.module.exit_json(**results)
Example #51
0
    def create_dvswitch(self, network_folder):
        result = None
        changed = False

        spec = vim.DistributedVirtualSwitch.CreateSpec()
        spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
        spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
        spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig()

        spec.configSpec.name = self.switch_name
        spec.configSpec.maxMtu = self.mtu
        spec.configSpec.linkDiscoveryProtocolConfig.protocol = self.discovery_proto
        spec.configSpec.linkDiscoveryProtocolConfig.operation = self.discovery_operation
        spec.productInfo = vim.dvs.ProductSpec()
        spec.productInfo.name = "DVS"
        spec.productInfo.vendor = "VMware"

        for count in range(1, self.uplink_quantity+1):
            spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count)

        task = network_folder.CreateDVS_Task(spec)
        changed, result = wait_for_task(task)
        return changed, result
Example #52
0
    def state_update_cluster(self):
        cluster_config_spec = vim.cluster.ConfigSpecEx()
        changed = True
        result = None

        if self.cluster.configurationEx.dasConfig.enabled != self.enable_ha:
            cluster_config_spec.dasConfig = self.configure_ha()
        if self.cluster.configurationEx.drsConfig.enabled != self.enable_drs:
            cluster_config_spec.drsConfig = self.configure_drs()
        if self.cluster.configurationEx.vsanConfigInfo.enabled != self.enable_vsan:
            cluster_config_spec.vsanConfig = self.configure_vsan()

        try:
            if not self.module.check_mode:
                task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
                changed, result = wait_for_task(task)
            self.module.exit_json(changed=changed, result=result)
        except vmodl.RuntimeFault as runtime_fault:
            self.module.fail_json(msg=runtime_fault.msg)
        except vmodl.MethodFault as method_fault:
            self.module.fail_json(msg=method_fault.msg)
        except TaskError as task_e:
            self.module.fail_json(msg=str(task_e))
Example #53
0
def ExitMaintenanceMode(module, host):
    if not host.runtime.inMaintenanceMode:
        module.exit_json(
            changed=False,
            hostsystem=str(host),
            hostname=module.params['esxi_hostname'],
            status='NO_ACTION',
            msg='Host not in maintenance mode')

    try:
        task = host.ExitMaintenanceMode_Task(
            module.params['timeout'])

        success, result = wait_for_task(task)

        return dict(changed=success,
                    hostsystem=str(host),
                    hostname=module.params['esxi_hostname'],
                    status='EXIT',
                    msg='Host exited maintenance mode')

    except TaskError:
        module.fail_json(
            msg='Host failed to exit maintenance mode')
Example #54
0
    def ensure(self):
        """
        Function to manage internal state management
        Returns:

        """
        state = self.module.params.get('state')
        folder_type = self.module.params.get('folder_type')
        folder_name = self.module.params.get('folder_name')
        parent_folder = self.module.params.get('parent_folder', None)
        results = dict(changed=False, result=dict())
        if state == 'present':
            # Create a new folder
            try:
                if parent_folder:
                    folder = self.get_folder_by_name(folder_name=parent_folder)
                    if folder:
                        folder.CreateFolder(folder_name)
                        results['changed'] = True
                        results['result'] = "Folder '%s' of type '%s' created under %s" \
                                            " successfully." % (folder_name, folder_type, parent_folder)
                    else:
                        self.module.fail_json(msg="Failed to find the parent folder %s"
                                                  " for folder %s" % (parent_folder, folder_name))
                else:
                    datacenter_folder_type = {
                        'vm': self.datacenter_obj.vmFolder,
                        'host': self.datacenter_obj.hostFolder,
                        'datastore': self.datacenter_obj.datastoreFolder,
                        'network': self.datacenter_obj.networkFolder,
                    }
                    datacenter_folder_type[folder_type].CreateFolder(folder_name)
                    results['changed'] = True
                    results['result'] = "Folder '%s' of type '%s' created successfully" % (folder_name, folder_type)
            except vim.fault.DuplicateName as duplicate_name:
                # To be consistent with the other vmware modules, We decided to accept this error
                # and the playbook should simply carry on with other tasks.
                # User will have to take care of this exception
                # https://github.com/ansible/ansible/issues/35388#issuecomment-362283078
                results['changed'] = False
                results['result'] = "Failed to create folder as another object has same name" \
                                    " in the same target folder : %s" % to_native(duplicate_name.msg)
            except vim.fault.InvalidName as invalid_name:
                self.module.fail_json(msg="Failed to create folder as folder name is not a valid "
                                          "entity name : %s" % to_native(invalid_name.msg))
            except Exception as general_exc:
                self.module.fail_json(msg="Failed to create folder due to generic"
                                          " exception : %s " % to_native(general_exc))
            self.module.exit_json(**results)
        elif state == 'absent':
            folder_obj = self.get_folder_by_name(folder_name=folder_name)
            if folder_obj:
                try:
                    task = folder_obj.UnregisterAndDestroy()
                    results['changed'], results['result'] = wait_for_task(task=task)
                except vim.fault.ConcurrentAccess as concurrent_access:
                    self.module.fail_json(msg="Failed to remove folder as another client"
                                              " modified folder before this operation : %s" % to_native(concurrent_access.msg))
                except vim.fault.InvalidState as invalid_state:
                    self.module.fail_json(msg="Failed to remove folder as folder is in"
                                              " invalid state" % to_native(invalid_state.msg))
                except Exception as e:
                    self.module.fail_json(msg="Failed to remove folder due to generic"
                                              " exception %s " % to_native(e))
            self.module.exit_json(**results)
Example #55
0
 def state_destroy_dvs(self):
     task = self.dvs.Destroy_Task()
     changed, result = wait_for_task(task)
     self.module.exit_json(changed=changed, result=str(result))
Example #56
0
 def exit_maintenance(self):
     try:
         task = self.host.ExitMaintenanceMode_Task(timeout=15)
         success, result = wait_for_task(task)
     except Exception as generic_exc:
         self.module.fail_json(msg="Failed to exit maintenance mode due to %s" % to_native(generic_exc))