def check_dvs_host_state(self): self.dv_switch = find_dvs_by_name(self.content, self.switch_name) if self.dv_switch is None: self.module.fail_json(msg="A distributed virtual switch %s " "does not exist" % self.switch_name) self.uplink_portgroup = self.find_dvs_uplink_pg() if self.uplink_portgroup is None: self.module.fail_json(msg="An uplink portgroup does not exist on" " the distributed virtual switch %s" % self.switch_name) self.host = self.find_host_attached_dvs() if self.host is None: # We still need the HostSystem object to add the host # to the distributed vswitch self.host = find_hostsystem_by_name(self.content, self.esxi_hostname) if self.host is None: self.module.fail_json( msg="The esxi_hostname %s does not exist " "in vCenter" % self.esxi_hostname) return 'absent' else: if self.check_uplinks(): return 'present' else: return 'update'
def check_host_state(self): """Check current state""" # Check if the host is already connected to vCenter self.host_update = find_hostsystem_by_name(self.content, self.esxi_hostname) if self.host_update: # The host name is unique in vCenter; A host with the same name cannot exist in another datacenter # However, the module will fail later if the target folder/cluster is in another datacenter as the host # Check if the host is connected under the target cluster if self.cluster_name: self.host, self.cluster = self.search_cluster(self.datacenter_name, self.cluster_name, self.esxi_hostname) if self.host: state = 'present' else: state = 'update' # Check if the host is connected under the target folder elif self.folder_name: self.folder = self.search_folder(self.folder_name) for child in self.folder.childEntity: if not child or not isinstance(child, vim.ComputeResource): continue try: if isinstance(child.host[0], vim.HostSystem) and child.name == self.esxi_hostname: self.host_parent_compute_resource = child self.host = child.host[0] break except IndexError: continue if self.host: state = 'present' else: state = 'update' else: state = 'absent' return state
def sanitize_params(self): """ this method is used to verify user provided parameters """ self.vm_obj = self.get_vm() if self.vm_obj is None: vm_id = self.vm_uuid or self.vm_name or self.moid self.module.fail_json( msg="Failed to find the VM/template with %s" % vm_id) # connect to destination VC self.destination_content = connect_to_api( self.module, hostname=self.destination_vcenter, username=self.destination_vcenter_username, password=self.destination_vcenter_password, port=self.destination_vcenter_port, validate_certs=self.destination_vcenter_validate_certs, ) # Check if vm name already exists in the destination VC vm = find_vm_by_name( content=self.destination_content, vm_name=self.params["destination_vm_name"], ) if vm: self.module.exit_json( changed=False, msg="A VM with the given name already exists") datastore_name = self.params["destination_datastore"] datastore_cluster = find_obj(self.destination_content, [vim.StoragePod], datastore_name) if datastore_cluster: # If user specified datastore cluster so get recommended datastore datastore_name = self.get_recommended_datastore( datastore_cluster_obj=datastore_cluster) # Check if get_recommended_datastore or user specified datastore exists or not self.destination_datastore = find_datastore_by_name( content=self.destination_content, datastore_name=datastore_name) if self.destination_datastore is None: self.module.fail_json(msg="Destination datastore not found.") self.destination_host = find_hostsystem_by_name( content=self.destination_content, hostname=self.params["destination_host"], ) if self.destination_host is None: self.module.fail_json(msg="Destination host not found.") if self.params["destination_resource_pool"]: self.destination_resource_pool = find_resource_pool_by_name( content=self.destination_content, resource_pool_name=self.params["destination_resource_pool"], ) else: self.destination_resource_pool = ( self.destination_host.parent.resourcePool)
def check_vmk_current_state(self): self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname) for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic: if vnic.device == self.device: # self.vnic = vnic if vnic.spec.distributedVirtualPort is None: if vnic.portgroup == self.current_portgroup_name: return "migrate_vss_vds" else: dvs = find_dvs_by_name(self.content, self.current_switch_name) if dvs is None: return "migrated" if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid: return "migrate_vds_vss"
def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( labels=dict(type="dict", default=dict(source="ansible")), license=dict(type="str", required=True), state=dict( type="str", default="present", choices=["absent", "present"] ), esxi_hostname=dict(type="str"), datacenter=dict(type="str"), cluster_name=dict(type="str"), ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True ) license = module.params["license"] state = module.params["state"] # FIXME: This does not seem to work on vCenter v6.0 labels = [] for k in module.params["labels"]: kv = vim.KeyValue() kv.key = k kv.value = module.params["labels"][k] labels.append(kv) result = dict(changed=False, diff=dict()) pyv = VcenterLicenseMgr(module) if not pyv.is_vcenter(): module.fail_json( msg="vcenter_license is meant for vCenter, hostname %s " "is not vCenter server." % module.params.get("hostname") ) lm = pyv.content.licenseManager result["licenses"] = pyv.list_keys(lm.licenses) if module._diff: result["diff"]["before"] = "\n".join(result["licenses"]) + "\n" if state == "present": if license not in result["licenses"]: result["changed"] = True if module.check_mode: result["licenses"].append(license) else: lm.AddLicense(license, labels) key = pyv.find_key(lm.licenses, license) if key is not None: lam = lm.licenseAssignmentManager assigned_license = None datacenter = module.params["datacenter"] datacenter_obj = None if datacenter: datacenter_obj = pyv.find_datacenter_by_name(datacenter) if not datacenter_obj: module.fail_json( msg="Unable to find the datacenter %(datacenter)s" % module.params ) cluster = module.params["cluster_name"] if cluster: cluster_obj = pyv.find_cluster_by_name( cluster_name=cluster, datacenter_name=datacenter_obj ) if not cluster_obj: msg = "Unable to find the cluster %(cluster_name)s" if datacenter: msg += " in datacenter %(datacenter)s" module.fail_json(msg=msg % module.params) entityId = cluster_obj._moId # assign to current vCenter, if esxi_hostname is not specified elif module.params["esxi_hostname"] is None: entityId = pyv.content.about.instanceUuid # if key name not contain "VMware vCenter Server" if pyv.content.about.name not in key.name: module.warn( 'License key "%s" (%s) is not suitable for "%s"' % (license, key.name, pyv.content.about.name) ) # assign to ESXi server else: esxi_host = find_hostsystem_by_name( pyv.content, module.params["esxi_hostname"] ) if esxi_host is None: module.fail_json( msg='Cannot find the specified ESXi host "%s".' % module.params["esxi_hostname"] ) entityId = esxi_host._moId # e.g., key.editionKey is "esx.enterprisePlus.cpuPackage", not sure all keys are in this format if "esx" not in key.editionKey: module.warn( 'License key "%s" edition "%s" is not suitable for ESXi server' % (license, key.editionKey) ) try: assigned_license = lam.QueryAssignedLicenses(entityId=entityId) except Exception as e: module.fail_json( msg='Could not query vCenter "%s" assigned license info due to %s.' % (entityId, to_native(e)) ) if not assigned_license or ( len(assigned_license) != 0 and assigned_license[0].assignedLicense.licenseKey != license ): try: lam.UpdateAssignedLicense( entity=entityId, licenseKey=license ) except Exception: module.fail_json( msg='Could not assign "%s" (%s) to vCenter.' % (license, key.name) ) result["changed"] = True result["licenses"] = pyv.list_keys(lm.licenses) else: module.fail_json( msg='License "%s" is not existing or can not be added' % license ) if module._diff: result["diff"]["after"] = "\n".join(result["licenses"]) + "\n" elif state == "absent" and license in result["licenses"]: # Check if key is in use key = pyv.find_key(lm.licenses, license) if key.used > 0: module.fail_json( msg='Cannot remove key "%s", still in use %s time(s).' % (license, key.used) ) result["changed"] = True if module.check_mode: result["licenses"].remove(license) else: lm.RemoveLicense(license) result["licenses"] = pyv.list_keys(lm.licenses) if module._diff: result["diff"]["after"] = "\n".join(result["licenses"]) + "\n" module.exit_json(**result)
def __init__(self, module): super(VmotionManager, self).__init__(module) self.vm = None self.vm_uuid = self.params.get("vm_uuid", None) self.use_instance_uuid = self.params.get("use_instance_uuid", False) self.vm_name = self.params.get("vm_name", None) self.moid = self.params.get("moid") or None result = dict() self.get_vm() if self.vm is None: vm_id = self.vm_uuid or self.vm_name or self.moid self.module.fail_json( msg="Failed to find the virtual machine with %s" % vm_id ) # Get Destination Host System if specified by user dest_host_name = self.params.get("destination_host", None) self.host_object = None if dest_host_name is not None: self.host_object = find_hostsystem_by_name( content=self.content, hostname=dest_host_name ) # Get Destination Datastore if specified by user dest_datastore = self.params.get("destination_datastore", None) self.datastore_object = None if dest_datastore is not None: self.datastore_object = find_datastore_by_name( content=self.content, datastore_name=dest_datastore ) # At-least one of datastore, host system is required to migrate if self.datastore_object is None and self.host_object is None: self.module.fail_json( msg="Unable to find destination datastore" " and destination host system." ) # Get Destination resourcepool dest_resourcepool = self.params.get("destination_resourcepool", None) self.resourcepool_object = None if dest_resourcepool: self.resourcepool_object = find_resource_pool_by_name( content=self.content, resource_pool_name=dest_resourcepool ) elif not dest_resourcepool and dest_host_name: self.resourcepool_object = self.host_object.parent.resourcePool # Fail if resourcePool object is not found if self.resourcepool_object is None: self.module.fail_json( msg="Unable to destination resource pool object which is required" ) # Check if datastore is required, this check is required if destination # and source host system does not share same datastore. host_datastore_required = [] for vm_datastore in self.vm.datastore: if ( self.host_object and vm_datastore not in self.host_object.datastore ): host_datastore_required.append(True) else: host_datastore_required.append(False) if any(host_datastore_required) and dest_datastore is None: msg = ( "Destination host system does not share" " datastore ['%s'] with source host system ['%s'] on which" " virtual machine is located. Please specify destination_datastore" " to rectify this problem." % ( "', '".join( [ds.name for ds in self.host_object.datastore] ), "', '".join([ds.name for ds in self.vm.datastore]), ) ) self.module.fail_json(msg=msg) storage_vmotion_needed = True change_required = True if self.host_object and self.datastore_object: # We have both host system and datastore object if not self.datastore_object.summary.accessible: # Datastore is not accessible self.module.fail_json( msg="Destination datastore %s is" " not accessible." % dest_datastore ) if self.datastore_object not in self.host_object.datastore: # Datastore is not associated with host system self.module.fail_json( msg="Destination datastore %s provided" " is not associated with destination" " host system %s. Please specify" " datastore value ['%s'] associated with" " the given host system." % ( dest_datastore, dest_host_name, "', '".join( [ds.name for ds in self.host_object.datastore] ), ) ) if ( self.vm.runtime.host.name == dest_host_name and dest_datastore in [ds.name for ds in self.vm.datastore] ): change_required = False if self.host_object and self.datastore_object is None: if self.vm.runtime.host.name == dest_host_name: # VM is already located on same host change_required = False storage_vmotion_needed = False elif self.datastore_object and self.host_object is None: if self.datastore_object in self.vm.datastore: # VM is already located on same datastore change_required = False if not self.datastore_object.summary.accessible: # Datastore is not accessible self.module.fail_json( msg="Destination datastore %s is" " not accessible." % dest_datastore ) if module.check_mode: result["running_host"] = module.params["destination_host"] result["changed"] = True module.exit_json(**result) if change_required: # Migrate VM and get Task object back task_object = self.migrate_vm() # Wait for task to complete try: wait_for_task(task_object) except TaskError as task_error: self.module.fail_json(msg=to_native(task_error)) # If task was a success the VM has moved, update running_host and complete module if task_object.info.state == vim.TaskInfo.State.success: # The storage layout is not automatically refreshed, so we trigger it to get coherent module return values if storage_vmotion_needed: self.vm.RefreshStorageInfo() result["running_host"] = module.params["destination_host"] result["changed"] = True module.exit_json(**result) else: msg = "Unable to migrate virtual machine due to an error, please check vCenter" if task_object.info.error is not None: msg += " : %s" % task_object.info.error module.fail_json(msg=msg) else: try: host = self.vm.summary.runtime.host result["running_host"] = host.summary.config.name except vim.fault.NoPermission: result["running_host"] = "NA" result["changed"] = False module.exit_json(**result)