def main(): """Sets up the module parameters, validates them and perform the change""" module = AnsibleModule( argument_spec=dict( vcenter_hostname=dict(required=True), username=dict(required=True), password=dict(required=True), guest=dict(required=True), resource_pool=dict(required=True), cluster=dict(required=True), sync=dict(required=False, type='bool', default=True) ), supports_check_mode=True ) server = VIServer() server.connect( module.params['vcenter_hostname'], module.params['username'], module.params['password']) virtualmachine = server.get_vm_by_name(module.params['guest']) old_name = virtualmachine.get_resource_pool_name() new_name = module.params['resource_pool'] # find the clusters ManagedObjectReference cluster = None clusters = server.get_clusters() for mor, name in clusters.iteritems(): if name == module.params['cluster']: cluster = mor break if cluster is None: module.fail_json(msg='Cluster %s not found on server %s' % (module.params['cluster'], module.params['vcenter_hostname'])) # find the new resource pools Managed Object Reference and migrate the VM rps = server.get_resource_pools(from_mor=cluster) for mor, path in rps.iteritems(): if re.match('.*%s$' % new_name, path): if not re.match('.*%s$' % old_name, path): if not module.check_mode: virtualmachine.migrate( resource_pool=mor, host=virtualmachine.get_property('hostname'), sync_run=module.params['sync']) module.exit_json(changed=True, changes=module.params) module.exit_json(changed=False, changes=module.params) module.fail_json(msg='Resource pool %s not found' % module.params['resource_pool'])
class VMWareSystem(MgmtSystemAPIBase): """Client to Vsphere API This class piggy backs off pysphere. Benefits of pysphere: - Don't need intimate knowledge w/ vsphere api itself. Detriments of pysphere: - Response often are not detailed enough. """ def __init__(self, hostname, username, password, **kwargs): self.api = VIServer() self.api.connect(hostname, username, password) def _get_vm(self, vm_name=None): if vm_name is None: raise Exception('Could not find a VM named %s.' % vm_name) else: try: vm = self.api.get_vm_by_name(vm_name) return vm except VIException as ex: raise Exception(ex) def _get_resource_pool(self, resource_pool_name=None): rps = self.api.get_resource_pools() for mor, path in rps.iteritems(): if re.match('.*%s' % resource_pool_name, path): return mor # Just pick the first return rps.keys()[0] def _find_ip(self, vm): maxwait = 600 net_info = None waitcount = 0 while net_info is None: if waitcount > maxwait: break net_info = vm.get_property('net', False) waitcount += 5 time.sleep(5) if net_info: ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' for ip in net_info[0]['ip_addresses']: if re.match(ipv4_re, ip) and ip != '127.0.0.1': return ip return None def start_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_on(): return True else: vm.power_on() ack = vm.get_status() if ack == 'POWERED ON': return True return False def stop_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_off(): return True else: vm.power_off() ack = vm.get_status() if ack == 'POWERED OFF': return True return False def delete_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_on(): self.stop_vm(vm_name) # When pysphere moves up to 0.1.8, we can just do: # vm.destroy() request = VI.Destroy_TaskRequestMsg() _this = request.new__this(vm._mor) _this.set_attribute_type(vm._mor.get_attribute_type()) request.set_element__this(_this) rtn = self.api._proxy.Destroy_Task(request)._returnval task = VITask(rtn, self.api) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: return True else: return False def create_vm(self, vm_name): raise NotImplementedError('This function has not yet been implemented.') def restart_vm(self, vm_name): if not self.stop_vm(vm_name): return False else: return self.start_vm(vm_name) def list_vm(self, **kwargs): vm_list = self.api.get_registered_vms(**kwargs) # The vms come back in an unhelpful format, so run them through a regex # Example vm name: '[datastore] vmname/vmname.vmx' def vm_name_generator(): for vm in vm_list: match = re.match(r'\[.*\] (.*)/\1\..*', vm) if match: yield match.group(1) # Unroll the VM name generator, and sort it to be more user-friendly return sorted(list(vm_name_generator())) def info(self): return '%s %s' % (self.api.get_server_type(), self.api.get_api_version()) def disconnect(self): self.api.disconnect() def vm_status(self, vm_name): state = self._get_vm(vm_name).get_status() print "vm " + vm_name + " status is " + state return state def is_vm_running(self, vm_name): state = self.vm_status(vm_name) return "POWERED ON" == state def is_vm_stopped(self, vm_name): state = self.vm_status(vm_name) return "POWERED OFF" == state def is_vm_suspended(self, vm_name): state = self.vm_status(vm_name) return "SUSPENDED" == state def suspend_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_off(): raise Exception('Could not suspend %s because it\'s not running.' % vm_name) else: vm.suspend() return self.is_vm_suspended(vm_name) def clone_vm(self, source_name, vm_name, resourcepool=None): vm = self._get_vm(source_name) if vm: clone = vm.clone(vm_name, sync_run=True, resourcepool=self._get_resource_pool(resourcepool)) return self._find_ip(clone) else: raise Exception('Could not clone %s' % source_name)
if __name__ == "__main__": #you can get the resource pools running s.get_resource_pools() RESOURCE_POOL = "/Resources" OVF_FILE = "ovf.ovf" #you can get the host names running s.get_hosts() HOST = "10.16.120.54" DATASTORE = "datastore1" NETWORK_MAPPING = {"bridged": "VM Network"} VAPP_NAME = "import1" s = VIServer() s.connect("10.16.120.178", "administrator", "R3dhat!") try: host = [k for k, v in s.get_hosts().items() if v == HOST][0] resource_pool = [ k for k, v in s.get_resource_pools().items() if v == RESOURCE_POOL ][0] datastore = [ k for k, v in s.get_datastores().items() if v == DATASTORE ][0] ovf = get_descriptor(OVF_FILE) descriptor_info = parse_descriptor(ovf) if hasattr(descriptor_info, "Warning"): print "Warning:", descriptor_info.Warning[0].LocalizedMessage if hasattr(descriptor_info, "Error"): print "Error:", descriptor_info.Error[0].LocalizedMessage exit() support_info = validate_host(host, ovf) import_spec = create_import_spec( resource_pool,
class VMWareSystem(MgmtSystemAPIBase): """Client to Vsphere API This class piggy backs off pysphere. Benefits of pysphere: - Don't need intimate knowledge w/ vsphere api itself. Detriments of pysphere: - Response often are not detailed enough. Args: hostname: The hostname of the system. username: The username to connect with. password: The password to connect with. Returns: A :py:class:`VMWareSystem` object. """ _api = None _stats_available = { 'num_vm': lambda self: len(self.list_vm()), 'num_host': lambda self: len(self.list_host()), 'num_cluster': lambda self: len(self.list_cluster()), 'num_template': lambda self: len(self.list_template()), 'num_datastore': lambda self: len(self.list_datastore()), } def __init__(self, hostname, username, password, **kwargs): self.hostname = hostname self.username = username self.password = password self.api = VIServer() @property def api(self): # wrap calls to the API with a keepalive check, reconnect if needed try: keepalive = self._api.keep_session_alive() if not keepalive: logger.debug('The connection to %s "%s" timed out' % (type(self).__name__, self.hostname)) except VIException as ex: if ex.fault == "Not Connected": # set this to trigger a connection below keepalive = None else: raise if not keepalive: self._connect() return self._api @api.setter def api(self, api): # Allow for changing the api object via public setter self._api = api def _connect(self): # Since self.api calls _connect, connect via self._api to prevent implosion logger.debug('Connecting to %s "%s"' % (type(self).__name__, self.hostname)) self._api.connect(self.hostname, self.username, self.password) def _get_vm(self, vm_name=None): """ Returns a vm from the VI object. Args: vm_name: The name of the VM. Returns: a pysphere object. """ if vm_name is None: raise Exception('Could not find a VM named %s.' % vm_name) else: try: vm = self.api.get_vm_by_name(vm_name) return vm except VIException as ex: raise Exception(ex) def does_vm_exist(self, name): """ Checks if a vm exists or not. Args: name: The name of the requested vm. Returns: A boolean, ``True`` if the vm exists, ``False`` if not. """ try: self._get_vm(name) return True except Exception: return False def _get_resource_pool(self, resource_pool_name=None): """ Returns a resource pool MOR for a specified name. Args: resource_pool_name: The name of the resource pool. Returns: The MOR of the resource pool. """ rps = self.api.get_resource_pools() for mor, path in rps.iteritems(): if re.match('.*%s' % resource_pool_name, path): return mor # Just pick the first return rps.keys()[0] def get_ip_address(self, vm_name): """ Returns the first IP address for the selected VM. Args: vm_name: The name of the vm to obtain the IP for. Returns: A string containing the first found IP that isn't the loopback device. """ vm = self._get_vm(vm_name) try: net_info, tc = wait_for(vm.get_property, ['net', False], fail_condition=None, delay=5, num_sec=600, message="get_ip_address from vsphere") except TimedOutError: net_info = None if net_info: ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' for ip in net_info[0]['ip_addresses']: if re.match(ipv4_re, ip) and ip != '127.0.0.1': return ip return None def _get_list_vms(self, get_template=False): """ Obtains a list of all VMs on the system. Optional flag to obtain template names too. Args: get_template: A boolean describing if it should return template names also. Returns: A list of VMs. """ template_or_vm_list = [] props = self.api._retrieve_properties_traversal(property_names=['name', 'config.template'], from_node=None, obj_type=MORTypes.VirtualMachine) for prop in props: vm = None template = None for elem in prop.PropSet: if elem.Name == "name": vm = elem.Val elif elem.Name == "config.template": template = elem.Val if vm is None or template is None: continue if template == bool(get_template): template_or_vm_list.append(vm) return template_or_vm_list def start_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_on(): return True else: vm.power_on() ack = vm.get_status() if ack == 'POWERED ON': return True return False def stop_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_off(): return True else: vm.power_off() ack = vm.get_status() if ack == 'POWERED OFF': return True return False def delete_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_on(): self.stop_vm(vm_name) # When pysphere moves up to 0.1.8, we can just do: # vm.destroy() request = VI.Destroy_TaskRequestMsg() _this = request.new__this(vm._mor) _this.set_attribute_type(vm._mor.get_attribute_type()) request.set_element__this(_this) rtn = self.api._proxy.Destroy_Task(request)._returnval task = VITask(rtn, self.api) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: return True else: return False def create_vm(self, vm_name): raise NotImplementedError('This function has not yet been implemented.') def restart_vm(self, vm_name): if not self.stop_vm(vm_name): return False else: return self.start_vm(vm_name) def list_vm(self): return self._get_list_vms() def list_template(self): return self._get_list_vms(get_template=True) def list_flavor(self): raise NotImplementedError('This function is not supported on this platform.') def list_host(self): return self.api.get_hosts() def list_datastore(self): return self.api.get_datastores() def list_cluster(self): return self.api.get_clusters() def info(self): return '%s %s' % (self.api.get_server_type(), self.api.get_api_version()) def disconnect(self): self.api.disconnect() def vm_status(self, vm_name): state = self._get_vm(vm_name).get_status() print "vm " + vm_name + " status is " + state return state def is_vm_running(self, vm_name): state = self.vm_status(vm_name) return "POWERED ON" == state def is_vm_stopped(self, vm_name): state = self.vm_status(vm_name) return "POWERED OFF" == state def is_vm_suspended(self, vm_name): state = self.vm_status(vm_name) return "SUSPENDED" == state def suspend_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_off(): raise Exception('Could not suspend %s because it\'s not running.' % vm_name) else: vm.suspend() return self.is_vm_suspended(vm_name) def clone_vm(self): raise NotImplementedError('clone_vm not implemented.') def deploy_template(self, template, *args, **kwargs): if 'resourcepool' not in kwargs: kwargs['resourcepool'] = None vm = self._get_vm(template) if vm: vm.clone(kwargs['vm_name'], sync_run=True, resourcepool=self._get_resource_pool(kwargs['resourcepool'])) return kwargs['vm_name'] else: raise Exception('Could not clone %s' % template) def remove_host_from_cluster(self, hostname): req = VI.DisconnectHost_TaskRequestMsg() mor = (key for key, value in self.api.get_hosts().items() if value == hostname).next() sys = VIMor(mor, 'HostSystem') _this = req.new__this(sys) _this.set_attribute_type(sys.get_attribute_type()) req.set_element__this(_this) task_mor = self.api._proxy.DisconnectHost_Task(req)._returnval t = VITask(task_mor, self.api) wait_for(lambda: 'success' in t.get_state()) self._destroy_host(hostname) def _destroy_host(self, hostname): req = VI.Destroy_TaskRequestMsg() mor = (key for key, value in self.api.get_hosts().items() if value == hostname).next() sys = VIMor(mor, 'HostSystem') _this = req.new__this(sys) _this.set_attribute_type(sys.get_attribute_type()) req.set_element__this(_this) task_mor = self.api._proxy.Destroy_Task(req)._returnval t = VITask(task_mor, self.api) wait_for(lambda: 'success' in t.get_state())
class Creator: # todo; add comment """ """ def __init__(self, manager_address, manager_user, manager_password): """ :param manager_address: :param manager_user: :param manager_password: """ self.esx_server = VIServer() self.esx_address = manager_address self.esx_user = manager_user self.esx_password = manager_password def __del__(self): self._disconnect_from_esx() # todo; add comment def _connect_to_esx(self): """ :raise: """ if not self.esx_server.is_connected(): try: self.esx_server.connect(self.esx_address, self.esx_user, self.esx_password) except Exception as inst: raise CreatorException(str(inst)) def _disconnect_from_esx(self): # todo; add comment """ """ if self.esx_server.is_connected(): self.esx_server.disconnect() def create_resource_pool( self, name, parent_rp="/", esx_hostname=None, cpu_resources=("normal", 4000, 0, True, -1), memory_resources=("normal", 163840, 0, True, -1), ): """ Creates a resource pool on esx server name - name for new resource pool parent_pr - parent resource pool esx_hostname - host name of esx server when resource pool will be created cpu_resources and memory_resources: tuple 0:share level - 'low' 'normal' 'high' 'custom' 1:share value - 'custom' share value, int 2:reservation - reserved CPU/Memory, int 3:expandable reservation - bool 4:limit - -1 - unlimited, another value - limit value, int :raise: CreatorException """ self._connect_to_esx() if parent_rp == "/": parent_rpmor = None try: rp_mor_temp = [k for k, v in self.esx_server.get_resource_pools().items() if v == "/Resources"] except IndexError: raise CreatorException("Couldn't find parent resource pool") if len(rp_mor_temp) == 0: raise CreatorException("Couldn't find parent resource pool") if esx_hostname: for rp in rp_mor_temp: prop = VIProperty(self.esx_server, rp) if prop.parent.name == esx_hostname: parent_rpmor = rp break if not parent_rpmor: raise CreatorException("Couldn't find host") else: parent_rpmor = rp_mor_temp[0] else: parent_rp = "/Resources" + parent_rp parent_rpmor = None try: parent_rp_temp = [k for k, v in self.esx_server.get_resource_pools().items() if v == parent_rp] except IndexError: raise CreatorException("Couldn't find parent a resource pool") if len(parent_rp_temp) == 0: raise CreatorException("Couldn't find parent a resource pool") ##222 if len(parent_rp_temp) == 1: parent_rpmor = parent_rp_temp[0] elif esx_hostname: for rp in parent_rp_temp: prop = VIProperty(self.esx_server, rp) while prop.parent.name != "host": prop = prop.parent if prop.name == esx_hostname: parent_rpmor = rp break else: raise CreatorException("ESX Hostname must be specified") req = VI.CreateResourcePoolRequestMsg() _this = req.new__this(parent_rpmor) _this.set_attribute_type(parent_rpmor.get_attribute_type()) req.set_element__this(_this) req.Name = name spec = req.new_spec() cpu_allocation = spec.new_cpuAllocation() memory_allocation = spec.new_memoryAllocation() # cpu allocation settings shares = cpu_allocation.new_shares() shares.Level = cpu_resources[0] shares.Shares = cpu_resources[1] cpu_allocation.Shares = shares cpu_allocation.Reservation = cpu_resources[2] cpu_allocation.ExpandableReservation = cpu_resources[3] cpu_allocation.Limit = cpu_resources[4] spec.CpuAllocation = cpu_allocation # memory allocation settings shares = memory_allocation.new_shares() shares.Level = memory_resources[0] shares.Shares = memory_resources[1] memory_allocation.Shares = shares memory_allocation.Reservation = memory_resources[2] memory_allocation.ExpandableReservation = memory_resources[3] memory_allocation.Limit = memory_resources[4] spec.MemoryAllocation = memory_allocation req.Spec = spec try: self.esx_server._proxy.CreateResourcePool(req) except Exception as inst: self._disconnect_from_esx() inst = str(inst) if "already exist" in inst: raise ExistenceException("Couldn't create resource pool '%s', because it already exist" % name) else: raise CreatorException("Couldn't create the resource pool with name '%s'" % name) def destroy_resource_pool(self, name, esx_hostname=None): """ Destroy named resource pool; vm included in this pool will be thrown on upper resource pool :param esx_hostname: host name of esx server, which contains resource pool :param name: name of resource pool :raise: CreatorException, ExistenceException """ self._connect_to_esx() if name[0] != "/": rp_name = "/Resources/" + name else: rp_name = "/Resources" + name try: rp_mor_temp = [k for k, v in self.esx_server.get_resource_pools().items() if v == rp_name] except IndexError: raise ExistenceException("Couldn't find resource pool '%s'" % name) rpmor = "" if esx_hostname: for rp in rp_mor_temp: prop = VIProperty(self.esx_server, rp) while prop.parent.name != "host": prop = prop.parent if prop.name == esx_hostname: rpmor = rp break elif len(rp_mor_temp) == 1: rpmor = rp_mor_temp[0] else: raise CreatorException("ESX Hostname must be specified") req = VI.Destroy_TaskRequestMsg() _this = req.new__this(rpmor) _this.set_attribute_type(rpmor.get_attribute_type()) req.set_element__this(_this) try: self.esx_server._proxy.Destroy_Task(req) # self._disconnect_from_esx() except Exception: self._disconnect_from_esx() raise def destroy_resource_pool_with_vms(self, name, esx_hostname=None): """ Destroy named resource pool; vm included in this pool also will be destroyed :param name: name of resource pool :param esx_hostname: host name of esx server, which contains resource pool :raise: CreatorException, ExistenceException """ self._connect_to_esx() if not name: raise CreatorException("Couldn't specify resource pool name") if name[0] != "/": rp_name = "/Resources/" + name else: rp_name = "/Resources" + name try: rp_mor_temp = [k for k, v in self.esx_server.get_resource_pools().items() if v == rp_name] except IndexError: raise CreatorException("Couldn't find resource pool '%s'" % name) rpmor = "" if esx_hostname: for rp in rp_mor_temp: prop = VIProperty(self.esx_server, rp) while prop.parent.name != "host": prop = prop.parent if prop.name == esx_hostname: rpmor = rp break elif len(rp_mor_temp) == 1: rpmor = rp_mor_temp[0] else: raise ExistenceException("Couldn't find resource pool '%s'" % name) prop = VIProperty(self.esx_server, rpmor) vms = [str(k.name) for k in prop.vm] for k in vms: self.destroy_vm(k) self.destroy_resource_pool(rp_name[10:], esx_hostname) # self._disconnect_from_esx() def destroy_vm(self, vmname): """ Destroys virtual machine by name :param vmname: virtual machine name :raise: ExistenceException, CreatorException """ self._connect_to_esx() try: vm = self.esx_server.get_vm_by_name(vmname) except Exception as error: self._disconnect_from_esx() raise ExistenceException("Couldn't find VM '%s' - %s" % (vmname, error.message)) try: if vm.is_powered_on() or vm.is_powering_off() or vm.is_reverting(): vm.power_off() request = VI.Destroy_TaskRequestMsg() _this = request.new__this(vm._mor) _this.set_attribute_type(vm._mor.get_attribute_type()) request.set_element__this(_this) ret = self.esx_server._proxy.Destroy_Task(request)._returnval # Wait for the task to finish task = VITask(ret, self.esx_server) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status != task.STATE_SUCCESS: raise CreatorException("Couldn't destroy vm - " + task.get_error_message()) except Exception: self._disconnect_from_esx() raise CreatorException("Couldn't destroy the virtual machine %s" % vmname) def create_vm_old( self, vmname, esx_hostname=None, iso=None, datacenter=None, resource_pool="/", networks=None, datastore=None, description=None, guestosid="debian4Guest", memorysize=512, cpucount=1, disk_space=1048576, ): """ Creates virtual machine :param vmname: name of virtual machine :param esx_hostname: host's name, when vm will be created; if not specified and ESX contains more than 1 hosts - raise CreatorException :param iso: path to .ISO image; must be stored in the same datastore as the virtual machine is created :param datacenter: name of datacenter, which contain hosts and datastores :param resource_pool: name of resource pool, when VM will be created (e.g. win-servers/win2003) if resource_pool_path is not defined, VM will created in root of inventory :param networks: list of existing port groups. NIC are based on this list :param datastore: name of datastore, which will be contain VM files; if not specified, VM will be placed in first datastore, which is avaliable for chosen host :param description: description for VM :param guestosid: ID for guest OS; full list http://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html :param memorysize: size of RAM, which will be avaliable on VM, in Mb :param cpucount: count of CPU, which will be avaliable on VM :param create_hard_drive: if True, new .vmdk disk will be created and added to VM :param disk_space: hard drive's maximal size, in Kb """ params = {} if vmname: params["vm_name"] = vmname else: raise AttributeError("Couldn't specify a virtual machine name") if iso: params["iso"] = iso if datacenter: params["datacenter_name"] = datacenter if datastore: params["datastore_name"] = datastore if resource_pool: params["resource_pool_name"] = resource_pool if not networks: networks = [] if networks: params["networks"] = networks if not description: params["description"] = "Description of %s" % vmname else: params["description"] = description params["esx_hostname"] = esx_hostname params["guestosid"] = guestosid params["memory_size"] = memorysize params["cpu_count"] = cpucount params["disk_size"] = disk_space try: self.create_vm(params) except ExistenceException as error: raise except CreatorException as error: raise except Exception as error: raise def create_vm(self, vm_options): """ Creates a virtual machine on ESXi server :param vm_options: dict, which contain parameters for VM 'vm_name' 'iso' 'datacenter_name' 'datastore_name' 'resource_pool_name' 'networks' 'description' 'esx_hostname' 'hard_drive' 'guestosid' 'memory_size' 'cpu_count' 'disk_size' See create_vm_old for details :raise: CreatorException, ExistenceException """ self._connect_to_esx() # VM NAME vm_name = None try: vm_name = str(vm_options["vm_name"]) vm_temp = self.esx_server.get_vm_by_name(vm_name) if vm_temp: raise ExistenceException('VM "%s" already exists' % vm_name) except KeyError: raise CreatorException("Must specify VM name") except pysphere.VIException as inst: if "[Object Not Found]" in str(inst): pass # HOSTNAME hosts = self.esx_server.get_hosts() try: esx_hostname = vm_options["esx_hostname"] if not esx_hostname: raise KeyError elif not (esx_hostname in hosts.values()): raise CreatorException("Couldn't find host '%s'" % esx_hostname) except KeyError: if len(hosts.values()) > 1: raise CreatorException("More than 1 host - must specify ESX Hostname") elif not hosts.values(): raise CreatorException("Couldn't find available host") esx_hostname = hosts[0] # MOR and PROPERTIES hostmor = [k for k, v in hosts.items() if v == esx_hostname][0] hostprop = VIProperty(self.esx_server, hostmor) # DATACENTER - FIX EXCEPTION # todo: fix self.esx_server.get_datacenters().values() dcs = self.esx_server.get_datacenters() dc_values = dcs.values() try: dc_name = vm_options["datacenter_name"] if not dc_name in dc_values: raise CreatorException("Couldn't find datacenter '%s'" + dc_name) except KeyError: if len(dc_values) > 1: raise CreatorException("More than 1 datacenter - must specify ESX Hostname") elif not dc_values: raise CreatorException("Couldn't find available datacenter") dc_name = dc_values[0] # MOR and PROPERTIES dcmor = [k for k, v in dcs.items() if v == dc_name][0] dcprops = VIProperty(self.esx_server, dcmor) # DATASTORE dcs = hostprop.datastore try: ds_name = vm_options["datastore_name"] ds_list = [] for ds in dcs: ds_list.append(ds.name) if not ds_name in ds_list: raise CreatorException("Couldn't find datastore or datastore is not available") except KeyError: if len(dcs) > 1: raise CreatorException("More than 1 datastore on ESX - must specify datastore name") elif not dcs: raise CreatorException("Couldn't find available datastore") ds_name = dcs[0].name # RESOURCE POOL resource_pool_name = "" try: resource_pool_name = vm_options["resource_pool_name"] if resource_pool_name == "/": pass elif resource_pool_name[0] != "/": resource_pool_name = "/{0}".format(resource_pool_name) except KeyError: resource_pool_name = "/" finally: rpmor = self._fetch_resource_pool(resource_pool_name, esx_hostname) if not rpmor: raise CreatorException("Couldn't find resource pool '%s'" % resource_pool_name) # NETWORKS try: networks = list(vm_options["networks"]) except Exception: networks = [] try: iso = vm_options["iso"] if iso == False: iso = None else: # todo: hide magic iso = iso[iso.find(ds_name) + len(ds_name) + 1 :] except KeyError: iso = None # Description try: description = vm_options["description"] except KeyError: description = "Description for VM %s" % vm_name try: guestosid = vm_options["guestosid"] except KeyError: guestosid = "otherGuest" try: memory_size = int(vm_options["memory_size"]) if memory_size <= 0: raise CreatorException("Disk size must be greater than 0") except Exception: memory_size = DEFAULT_MEMORY_SIZE # MB try: cpu_count = int(vm_options["cpu_count"]) except Exception: cpu_count = DEFAULT_CPU_COUNT try: disk_size = int(vm_options["disk_size"]) if disk_size < 0: raise CreatorException("Disk size must be greater than 0") except Exception: disk_size = DEFAULT_DISK_SIZE # KB crprops = self._fetch_computer_resource(dcprops, hostmor) vmfmor = dcprops.vmFolder._obj # CREATE VM CONFIGURATION # get config target request = VI.QueryConfigTargetRequestMsg() _this = request.new__this(crprops.environmentBrowser._obj) _this.set_attribute_type(crprops.environmentBrowser._obj.get_attribute_type()) request.set_element__this(_this) h = request.new_host(hostmor) h.set_attribute_type(hostmor.get_attribute_type()) request.set_element_host(h) config_target = self.esx_server._proxy.QueryConfigTarget(request)._returnval # get default devices request = VI.QueryConfigOptionRequestMsg() _this = request.new__this(crprops.environmentBrowser._obj) _this.set_attribute_type(crprops.environmentBrowser._obj.get_attribute_type()) request.set_element__this(_this) h = request.new_host(hostmor) h.set_attribute_type(hostmor.get_attribute_type()) request.set_element_host(h) config_option = self.esx_server._proxy.QueryConfigOption(request)._returnval defaul_devs = config_option.DefaultDevice # get network name avaliable_networks = 0 for net in networks: for n in config_target.Network: if n.Network.Accessible and n.Network.Name == net: avaliable_networks += 1 break if len(networks) != avaliable_networks: msg = "" for n in config_target.Network: msg = "%s %s" % (msg, str(n.Network.Name)) raise CreatorException("Couldn't find all networks; founded: %s" % msg) # raise ExistenceException("Couldn't find network") # get datastore ds = None for d in config_target.Datastore: if d.Datastore.Accessible and d.Datastore.Name == ds_name: ds = d.Datastore.Datastore ds_name = d.Datastore.Name break if not ds: raise CreatorException("Datastore is not available") volume_name = "[%s]" % ds_name # add parameters to the create vm task create_vm_request = VI.CreateVM_TaskRequestMsg() config = create_vm_request.new_config() vmfiles = config.new_files() vmfiles.set_element_vmPathName(volume_name) config.set_element_files(vmfiles) config.set_element_name(vm_name) config.set_element_annotation(description) config.set_element_memoryMB(memory_size) config.set_element_numCPUs(cpu_count) config.set_element_guestId(guestosid) devices = [] # add a scsi controller disk_ctrl_key = 1 scsi_ctrl_spec = config.new_deviceChange() scsi_ctrl_spec.set_element_operation("add") scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass() scsi_ctrl.set_element_busNumber(0) scsi_ctrl.set_element_key(disk_ctrl_key) scsi_ctrl.set_element_sharedBus("noSharing") scsi_ctrl_spec.set_element_device(scsi_ctrl) devices.append(scsi_ctrl_spec) # find ide controller if iso: ide_ctlr = None for dev in defaul_devs: if dev.typecode.type[1] == "VirtualIDEController": ide_ctlr = dev # add a cdrom based on a physical device if ide_ctlr: cd_spec = config.new_deviceChange() cd_spec.set_element_operation("add") cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass() cd_device_backing = VI.ns0.VirtualCdromIsoBackingInfo_Def("cd_device_backing").pyclass() ds_ref = cd_device_backing.new_datastore(ds) ds_ref.set_attribute_type(ds.get_attribute_type()) cd_device_backing.set_element_datastore(ds_ref) cd_device_backing.set_element_fileName("%s %s" % (volume_name, iso)) cd_ctrl.set_element_backing(cd_device_backing) cd_ctrl.set_element_key(20) cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key()) cd_ctrl.set_element_unitNumber(0) cd_spec.set_element_device(cd_ctrl) devices.append(cd_spec) # create a new disk - file based - for the vm if disk_size != 0: disk_spec = config.new_deviceChange() disk_spec.set_element_fileOperation("create") disk_spec.set_element_operation("add") disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass() disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass() disk_backing.set_element_fileName(volume_name) disk_backing.set_element_diskMode("persistent") disk_ctlr.set_element_key(0) disk_ctlr.set_element_controllerKey(disk_ctrl_key) disk_ctlr.set_element_unitNumber(0) disk_ctlr.set_element_backing(disk_backing) disk_ctlr.set_element_capacityInKB(disk_size) disk_spec.set_element_device(disk_ctlr) devices.append(disk_spec) # add a NIC. the network Name must be set as the device name to create the NIC. for network_name in networks: nic_spec = config.new_deviceChange() nic_spec.set_element_operation("add") nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass() nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass() nic_backing.set_element_deviceName(network_name) nic_ctlr.set_element_addressType("generated") nic_ctlr.set_element_backing(nic_backing) nic_ctlr.set_element_key(4) nic_spec.set_element_device(nic_ctlr) devices.append(nic_spec) config.set_element_deviceChange(devices) create_vm_request.set_element_config(config) folder_mor = create_vm_request.new__this(vmfmor) folder_mor.set_attribute_type(vmfmor.get_attribute_type()) create_vm_request.set_element__this(folder_mor) rp_mor = create_vm_request.new_pool(rpmor) rp_mor.set_attribute_type(rpmor.get_attribute_type()) create_vm_request.set_element_pool(rp_mor) host_mor = create_vm_request.new_host(hostmor) host_mor.set_attribute_type(hostmor.get_attribute_type()) create_vm_request.set_element_host(host_mor) # CREATE THE VM - add option "wait" taskmor = self.esx_server._proxy.CreateVM_Task(create_vm_request)._returnval task = VITask(taskmor, self.esx_server) task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if task.get_state() == task.STATE_ERROR: self._disconnect_from_esx() raise CreatorException("Error creating vm: %s" % task.get_error_message()) def create_virtual_switch(self, name, num_ports, esx_hostname=None): """ Creates a new standard virtual switch on esx :param name: name for new virtual switch :param num_ports: numbers of emulated ports :param esx_hostname: host name of esx server when virtual switch will be created :raise: CreatorException, ExistenceException """ num_ports = int(num_ports) self._connect_to_esx() hosts = None try: hosts = self.esx_server.get_hosts() if esx_hostname: host_system = [k for k, v in hosts.items() if v == esx_hostname][0] else: host_system = hosts.keys()[0] except IndexError: raise CreatorException("Couldn't find host") if not host_system: raise CreatorException("Couldn't find host") prop = VIProperty(self.esx_server, host_system) network_system = prop.configManager.networkSystem._obj for vs in prop.configManager.networkSystem.networkInfo.vswitch: if vs.name == name: self._disconnect_from_esx() raise ExistenceException("Switch '%s' already exist" % name) request = VI.AddVirtualSwitchRequestMsg() _this = request.new__this(network_system) _this.set_attribute_type(network_system.get_attribute_type()) request.set_element__this(_this) request.set_element_vswitchName(name) spec = request.new_spec() spec.set_element_numPorts(num_ports) request.set_element_spec(spec) try: self.esx_server._proxy.AddVirtualSwitch(request) except Exception: raise CreatorException("Couldn't create Switch") # self._disconnect_from_esx() def destroy_virtual_switch(self, name, esx_hostname=None): """ Destroys a named standard virtual switch on esx :param name: virtual switch's name :param esx_hostname: host name of esx server when virtual switch placed :raise: CreatorException, ExistenceException """ self._connect_to_esx() hosts = self.esx_server.get_hosts() try: if esx_hostname: host_system = [k for k, v in hosts.items() if v == esx_hostname][0] else: host_system = hosts.keys()[0] except Exception: raise CreatorException("Couldn't find host") prop = VIProperty(self.esx_server, host_system) network_system = prop.configManager.networkSystem._obj exist = False for vs in prop.configManager.networkSystem.networkInfo.vswitch: if vs.name == name: exist = True break if exist: request = VI.RemoveVirtualSwitchRequestMsg() _this = request.new__this(network_system) _this.set_attribute_type(network_system.get_attribute_type()) request.set_element__this(_this) request.set_element_vswitchName(name) try: self.esx_server._proxy.RemoveVirtualSwitch(request) except Exception: raise CreatorException("Couldn't remove virtual switch '%s'" % name) else: raise ExistenceException("Couldn't find virtual switch '%s'" % name) # self._disconnect_from_esx() def add_port_group(self, switch_name, vlan_name, esx_hostname=None, vlan_id=4095, promiscuous=False): """ Add new network to exist switch :param switch_name: vlan_name of switch which will be reconfigured :param vlan_name: vlan_name of VLAN :param esx_hostname: ESX hostname :param vlan_id: id for VLAN :param promiscuous: promiscuous mode enable/disable (True/False) :raise: ExistenceException, CreatorException """ self._connect_to_esx() vlan_id = int(vlan_id) hosts = self.esx_server.get_hosts() try: if esx_hostname: host_system = [k for k, v in hosts.items() if v == esx_hostname][0] else: host_system = hosts.keys()[0] except Exception: raise CreatorException("Couldn't find host") prop = VIProperty(self.esx_server, host_system) network_system = prop.configManager.networkSystem._obj request = VI.AddPortGroupRequestMsg() _this = request.new__this(network_system) _this.set_attribute_type(network_system.get_attribute_type()) request.set_element__this(_this) portgrp = request.new_portgrp() portgrp.set_element_name(vlan_name) portgrp.set_element_vlanId(vlan_id) portgrp.set_element_vswitchName(switch_name) policy = portgrp.new_policy() security = policy.new_security() security.set_element_allowPromiscuous(promiscuous) policy.set_element_security(security) portgrp.set_element_policy(policy) request.set_element_portgrp(portgrp) try: self.esx_server._proxy.AddPortGroup(request) except Exception as inst: message = str(inst) if "already exist" in message: raise ExistenceException( "Couldn't create network '%s:%s' on switch '%s', because it already exists" % vlan_name, vlan_id, switch_name, ) else: raise CreatorException( "Couldn't create network '%s:%s' on switch '%s'" % vlan_name, vlan_id, switch_name ) # self._disconnect_from_esx() def is_connected(self): """ Checks ESX manager connection :rtype : bool :return: connection status """ return self.esx_server.is_connected() def vm_power_on(self, vmname): """ Turns power on for the virtual machine :param vmname: virtual machine name :raise: ExistenceException, AttributeError, Exception """ if not vmname: raise AttributeError("Couldn't specify the virtual machine name") if not self._is_vm_exist(vmname): raise ExistenceException("Couldn't find the virtual machine '%s'" % vmname) try: self._connect_to_esx() vm = self.esx_server.get_vm_by_name(vmname) if not vm.is_powered_on() and not vm.is_powering_on(): vm.power_on() except Exception as error: self._disconnect_from_esx() raise CreatorException(error) def vm_power_off(self, vmname): """ Turns power off for the virtual machine :param vmname: virtual machine name :raise: ExistenceException, AttributeError, Exception """ if not vmname: raise AttributeError("Couldn't specify the virtual machine name") if not self._is_vm_exist(vmname): raise ExistenceException("Couldn't find the virtual machine '%s'" % vmname) try: self._connect_to_esx() vm = self.esx_server.get_vm_by_name(vmname) if not vm.is_powered_off() and not vm.is_powering_off(): vm.power_off() except Exception as error: self._disconnect_from_esx() raise CreatorException(error) def vm_reset(self, vmname): """ Resets a virtual machine. If the virtual machine is powered off, then turns the power on for this virtual machine :param vmname: virtual machine name :raise: AttributeError, ExistenceException, Exception """ if not vmname: raise AttributeError("Couldn't specify the virtual machine name") if not self._is_vm_exist(vmname): raise ExistenceException("Couldn't find the virtual machine '%s'" % vmname) try: self._connect_to_esx() vm = self.esx_server.get_vm_by_name(vmname) if not vm.is_powered_off(): vm.reset() else: vm.power_on() except Exception as error: self._disconnect_from_esx() raise CreatorException(error) def get_vm_path(self, vmname): """ Gets a virtual machine path on ESX server :param vmname: virtual machine name :return: virtual machine path :raise: CreatorException """ self._connect_to_esx() try: vm = self.esx_server.get_vm_by_name(vmname) except Exception: raise ExistenceException("Couldn't find the virtual machine '%s'" % vmname) try: return vm.get_property("path") except Exception as error: self._disconnect_from_esx() raise CreatorException(error) pass # todo: REVIEW ME def get_vm_obj(self, vmname): """ Gets a virtual machine object on ESX server :param vmname: virtual machine name :return: VIVirtualMachine instance :raise: CreatorException """ self._connect_to_esx() try: return self.esx_server.get_vm_by_name(vmname) except Exception: raise ExistenceException("Couldn't find the virtual machine '%s'" % vmname) finally: self._disconnect_from_esx() def add_existence_vmdk(self, vm_name, path): """ Add existence hard drive (.vmdk) to the virtual machine :param vm_name: virtual machine name :param path: hard drive path :param space: space for hard drive :raise: ExistenceException, CreatorException """ self._connect_to_esx() try: vm = self.esx_server.get_vm_by_name(vm_name) except Exception: raise ExistenceException("Couldn't find the virtual machine %s" % vm_name) unit_number = -1 for disk in vm._disks: unit_number = max(unit_number, disk["device"]["unitNumber"]) unit_number += 1 request = VI.ReconfigVM_TaskRequestMsg() _this = request.new__this(vm._mor) _this.set_attribute_type(vm._mor.get_attribute_type()) request.set_element__this(_this) spec = request.new_spec() dc = spec.new_deviceChange() dc.Operation = "add" hd = VI.ns0.VirtualDisk_Def("hd").pyclass() hd.Key = -100 hd.UnitNumber = unit_number hd.CapacityInKB = 0 hd.ControllerKey = 1000 backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("backing").pyclass() backing.FileName = path backing.DiskMode = "persistent" backing.ThinProvisioned = False hd.Backing = backing connectable = hd.new_connectable() connectable.StartConnected = True connectable.AllowGuestControl = False connectable.Connected = True hd.Connectable = connectable dc.Device = hd spec.DeviceChange = [dc] request.Spec = spec task = self.esx_server._proxy.ReconfigVM_Task(request)._returnval vi_task = VITask(task, self.esx_server) # Wait for task to finis status = vi_task.wait_for_state([vi_task.STATE_SUCCESS, vi_task.STATE_ERROR]) if status == vi_task.STATE_ERROR: self._disconnect_from_esx() raise CreatorException("ERROR CONFIGURING VM:%s" % vi_task.get_error_message()) # todo: add comment def _get_portgroup_name(self, name, esx_hostname=None): """ Get exist network from ESX :param name: network name :param esx_hostname: ESX host name :return: :raise: """ self._connect_to_esx() hosts = self.esx_server.get_hosts() try: if esx_hostname: host_system = [k for k, v in hosts.items() if v == esx_hostname][0] else: host_system = hosts.keys()[0] except IndexError: raise CreatorException("Couldn't find host") if not host_system: raise CreatorException("Couldn't find host") prop = VIProperty(self.esx_server, host_system) for pg in prop.configManager.networkSystem.networkInfo.portgroup: if pg.spec.name.lower() == name.lower(): real_name = pg.spec.name # self._disconnect_from_esx() return real_name # self._disconnect_from_esx() return None # todo: add comment def _is_vm_exist(self, name): """ :param name: :return: """ self._connect_to_esx() exist = False try: self.esx_server.get_vm_by_name(name) exist = True except: pass # self._disconnect_from_esx() return exist # todo: add comment def _fetch_resource_pool(self, rp_name, esx_hostname): """ :param rp_name: :param esx_hostname: :return: """ rpmor = None if rp_name == "/": rp_mor_temp = [k for k, v in self.esx_server.get_resource_pools().items() if v == "/Resources"] for rp in rp_mor_temp: prop = VIProperty(self.esx_server, rp) if prop.parent.name == esx_hostname: rpmor = rp break else: resource_pool = "/Resources" + rp_name rp_mor_temp = [k for k, v in self.esx_server.get_resource_pools().items() if v == resource_pool] for rp in rp_mor_temp: prop = VIProperty(self.esx_server, rp) while prop.parent.name != "host": prop = prop.parent if prop.name == esx_hostname: rpmor = rp break if rp: break return rpmor # todo: add comment def _fetch_computer_resource(self, datacenter_props, host): """ :param datacenter_props: :param host: :return: """ host_folder = datacenter_props.hostFolder._obj # get computer resources computer_resources = self.esx_server._retrieve_properties_traversal( property_names=["name", "host"], from_node=host_folder, obj_type="ComputeResource" ) # get computer resource of this host crmor = None for cr in computer_resources: if crmor: break for p in cr.PropSet: if p.Name == "host": for h in p.Val.get_element_ManagedObjectReference(): if h == host: crmor = cr.Obj break if crmor: break return VIProperty(self.esx_server, crmor)
break for p in cr.PropSet: if p.Name == "host": for h in p.Val.get_element_ManagedObjectReference(): if h == hostmor: crmor = cr.Obj break if crmor: break crprops = VIProperty(s, crmor) # get resource pool if resource_pool_path: cluster = [k for k, v in s.get_clusters().items() if v == cluster_name][0] rpmor = [ k for k, v in s.get_resource_pools(from_mor=cluster).items() if v == resource_pool_path ][0] else: rpmor = crprops.resourcePool._obj vmfmor = dcprops.vmFolder._obj # CREATE VM CONFIGURATION # get config target request = VI.QueryConfigTargetRequestMsg() _this = request.new__this(crprops.environmentBrowser._obj) _this.set_attribute_type(crprops.environmentBrowser._obj.get_attribute_type()) request.set_element__this(_this) h = request.new_host(hostmor)
try: options.vm_path except NameError: options.vm_path = raw_input("Enter the Template/VM to convert: ") # Make the vcenter connection server = VIServer() server.connect(options.vcenter, options.username, options.password) # Set the host, cluster and resource pool to "deploy" it into # We get teh "mor" which is a number to identify with vCenter hosts = server.get_hosts() host = [k for k,v in hosts.items() if v=="esxhost.local"][0] vm = server.get_vm_by_name(options.vm_path) cluster_name = "DevTest_Cluster" resource_pool = "/Resources" cluster = [k for k,v in server.get_clusters().items() if v==cluster_name][0] rpmor = [k for k,v in server.get_resource_pools(from_mor=cluster).items() if v==resource_pool][0] # This is the request message we send off to make the change, setting the # vm, host, and resource pool. request = VI.MarkAsVirtualMachineRequestMsg() _this = request.new__this(vm._mor) _this.set_attribute_type(vm._mor.get_attribute_type()) request.set_element__this(_this) request.set_element_pool(rpmor) request.set_element_host(host) server._proxy.MarkAsVirtualMachine(request) server.disconnect()
class ESXi_Server: server_ip = '' user_name = '' password = '' connect_flag = False server = None #vm_list = [] #def __init__(self): #Use the given args to connect the esxi server you want #@ip[string]: ESXi server's IP address #@name[string]: the username used to login the ESXi server #@pwd[string]: the password used to login the ESXi server def connect_server(self, ip, name, pwd): self.server_ip = ip self.user_name = name self.password = pwd self.server = VIServer() self.server.connect(self.server_ip, self.user_name, self.password) self.connect_flag = self.server.is_connected() if self.connect_flag: return True return False #To get all the definition registered vms from the connected server #@param[string]: can be set as ALL, POWER_ON, POWER_OFF, SUSPENDED #According to the param, returns a list of VM Paths. You might also filter by datacenter, #cluster, or resource pool by providing their name or MORs. #if cluster is set, datacenter is ignored, and if resource pool is set #both, datacenter and cluster are ignored. def get_registered_vms(self, param, status=None, datacenter=None, cluster=None, resource_pool=None): if param not in ['ALL', 'POWER_ON', 'POWER_OFF', 'SUSPENDED']: print "Get VMs error: param can only be set as ALL, POWER_ON, POWER_OFF, or SUSPENDED." return None if self.connect_flag == False: print "Get VMs error: Server not connected." return None if param == 'ALL': return self.server.get_registered_vms(datacenter, cluster, resource_pool) elif param == 'POWER_ON': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='poweredOn') elif param == 'POWER_OFF': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='poweredOff') elif param == 'SUSPENDED': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='suspended') else: return None #Disconnect to the Server def disconnect(self): if self.connect_flag == True: self.server = self.server.disconnect() self.connect_flag == False #To keep session alive def keep_session_alive(self): assert self.server.keep_session_alive() #To get the server type def get_server_type(self): return self.server.get_server_type() #To get performance manager def get_performance_manager(self): return self.server.get_performance_manager() #To get the all the server's hosts def get_all_hosts(self): """ Returns a dictionary of the existing hosts keys are their names and values their ManagedObjectReference object. """ return self.server.get_hosts() #To get all datastores def get_all_datastores(self): """ Returns a dictionary of the existing datastores. Keys are ManagedObjectReference and values datastore names. """ return self.server.get_datastores() #To get all clusters def get_all_clusters(self): """ Returns a dictionary of the existing clusters. Keys are their ManagedObjectReference objects and values their names. """ return self.server.get_clusters() #To get all datacenters def get_all_datacenters(self): """ Returns a dictionary of the existing datacenters. keys are their ManagedObjectReference objects and values their names. """ return self.server.get_datacenters() #To get all resource pools def get_all_resource_pools(self): """ Returns a dictionary of the existing ResourcePools. keys are their ManagedObjectReference objects and values their full path names. """ return self.server.get_resource_pools() #To get hosts by name def get_hosts_by_name(self, from_mor): """ Returns a dictionary of the existing ResourcePools. keys are their ManagedObjectReference objects and values their full path names. @from_mor: if given, retrieves the hosts contained within the specified managed entity. """ try: hosts_dic = self.server.get_hosts(from_mor) except: print "Get hosts error!" return None return hosts_dic
class VMWareSystem(MgmtSystemAPIBase): """Client to Vsphere API This class piggy backs off pysphere. Benefits of pysphere: - Don't need intimate knowledge w/ vsphere api itself. Detriments of pysphere: - Response often are not detailed enough. """ def __init__(self, hostname, username, password, **kwargs): self.api = VIServer() self.api.connect(hostname, username, password) def _get_vm(self, vm_name=None): if vm_name is None: raise Exception('Could not find a VM named %s.' % vm_name) else: try: vm = self.api.get_vm_by_name(vm_name) return vm except VIException as ex: raise Exception(ex) def _get_resource_pool(self, resource_pool_name=None): rps = self.api.get_resource_pools() for mor, path in rps.iteritems(): if re.match('.*%s' % resource_pool_name, path): return mor # Just pick the first return rps.keys()[0] def _find_ip(self, vm): maxwait = 600 net_info = None waitcount = 0 while net_info is None: if waitcount > maxwait: break net_info = vm.get_property('net', False) waitcount += 5 time.sleep(5) if net_info: ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' for ip in net_info[0]['ip_addresses']: if re.match(ipv4_re, ip) and ip != '127.0.0.1': return ip return None def start_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_on(): return True else: vm.power_on() ack = vm.get_status() if ack == 'POWERED ON': return True return False def stop_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_off(): return True else: vm.power_off() ack = vm.get_status() if ack == 'POWERED OFF': return True return False def delete_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_on(): self.stop_vm(vm_name) # When pysphere moves up to 0.1.8, we can just do: # vm.destroy() request = VI.Destroy_TaskRequestMsg() _this = request.new__this(vm._mor) _this.set_attribute_type(vm._mor.get_attribute_type()) request.set_element__this(_this) rtn = self.api._proxy.Destroy_Task(request)._returnval task = VITask(rtn, self.api) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: return True else: return False def create_vm(self, vm_name): raise NotImplementedError( 'This function has not yet been implemented.') def restart_vm(self, vm_name): if not self.stop_vm(vm_name): return False else: return self.start_vm(vm_name) def list_vm(self, **kwargs): vm_list = self.api.get_registered_vms(**kwargs) # The vms come back in an unhelpful format, so run them through a regex # Example vm name: '[datastore] vmname/vmname.vmx' def vm_name_generator(): for vm in vm_list: match = re.match(r'\[.*\] (.*)/\1\..*', vm) if match: yield match.group(1) # Unroll the VM name generator, and sort it to be more user-friendly return sorted(list(vm_name_generator())) def info(self): return '%s %s' % (self.api.get_server_type(), self.api.get_api_version()) def disconnect(self): self.api.disconnect() def vm_status(self, vm_name): state = self._get_vm(vm_name).get_status() print "vm " + vm_name + " status is " + state return state def is_vm_running(self, vm_name): state = self.vm_status(vm_name) return "POWERED ON" == state def is_vm_stopped(self, vm_name): state = self.vm_status(vm_name) return "POWERED OFF" == state def is_vm_suspended(self, vm_name): state = self.vm_status(vm_name) return "SUSPENDED" == state def suspend_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_off(): raise Exception('Could not suspend %s because it\'s not running.' % vm_name) else: vm.suspend() return self.is_vm_suspended(vm_name) def clone_vm(self, source_name, vm_name, resourcepool=None): vm = self._get_vm(source_name) if vm: clone = vm.clone( vm_name, sync_run=True, resourcepool=self._get_resource_pool(resourcepool)) return self._find_ip(clone) else: raise Exception('Could not clone %s' % source_name)
prop = VIProperty(con, MORefRPhost) print "*" * 50 print "Stats for host", Host print " overall processor usage:", prop.summary.quickStats.overallCpuUsage print " overall memory usage:", prop.summary.quickStats.overallMemoryUsage print " distributedCpuFairness: ", prop.summary.quickStats.distributedCpuFairness print " distributedMemoryFairness: ", prop.summary.quickStats.distributedMemoryFairness con.disconnect() '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' ''' Get All properties for a ressource pool : resource_pool_name ''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' s = VIServer() s.connect(vCenterserver, username, password, LOG_FILE) MORefRP = [ k for k, v in s.get_resource_pools().items() if v == resource_pool_name ][0] properties = [ "summary.quickStats.overallCpuUsage" #Basic CPU performance statistics, in MHz. , "summary.quickStats.overallCpuDemand" #Basic CPU performance statistics, in MHz. , "summary.quickStats.guestMemoryUsage" #Guest memory utilization statistics, in MB. This is also known as active guest memory. The number can be between 0 and the configured memory size of a virtual machine. , "summary.quickStats.hostMemoryUsage" #Host memory utilization statistics, in MB. This is also known as consummed host memory. This is between 0 and the configured resource limit. Valid while a virtual machine is running. This includes the overhead memory of a virtual machine. , "summary.quickStats.distributedCpuEntitlement" #This is the amount of CPU resource, in MHz, that this VM is entitled to, as calculated by DRS. Valid only for a VM managed by DRS. , "summary.quickStats.distributedMemoryEntitlement" #This is the amount of memory, in MB, that this VM is entitled to, as calculated by DRS. Valid only for a VM managed by DRS. , "summary.quickStats.staticCpuEntitlement" #The static CPU resource entitlement for a virtual machine. This value is calculated based on this virtual machine's resource reservations, shares and limit, and doesn't take into account current usage. This is the worst case CPU allocation for this virtual machine, that is, the amount of CPU resource this virtual machine would receive if all virtual machines running in the cluster went to maximum consumption. Units are MHz.
server.connect(server_name, user_name, password) # Get a reference to the folder in which VM needs to be created folders_list = server._get_managed_objects(MORTypes.Folder) for key in folders_list.keys(): if folders_list[key] == folder_name: folder_mor = key break else: raise Exception("Folder not found") # folder_filter = {"parent": folder_mor} clusters_list = server.get_clusters() for cluster_key in clusters_list.keys(): if clusters_list[cluster_key] == cluster_name: resourcepool = server.get_resource_pools(cluster_key).keys()[0] # WPST folder_mor = [ mor for mor, name in server._get_managed_objects( MORTypes.Folder).items() if name == folder_name ][0] vms = server._get_managed_objects(MORTypes.VirtualMachine, from_mor=folder_mor).values() # delete old VMs if is_purge == 'True': for vmname in vms: if vm_munged_name_prefix not in vmname: targetvm = server.get_vm_by_name(vmname) delete_vm(targetvm) # create new VM matched_vms = [
logging.basicConfig(level=logging.DEBUG) #you can get the resource pools running s.get_resource_pools() RESOURCE_POOL = "/Resources" OVF_FILE = "ovf.ovf" #you can get the host names running s.get_hosts() HOST = "10.16.120.54" DATASTORE = "datastore1" NETWORK_MAPPING = {"bridged":"VM Network"} VAPP_NAME = "import1" s = VIServer() s.connect("10.16.120.178", "administrator", "R3dhat!") try: LOGGER.debug('Hosts: %s.', s.get_hosts()) host = [k for k,v in s.get_hosts().items() if v==HOST][0] resource_pool = [k for k,v in s.get_resource_pools().items() if v == RESOURCE_POOL][0] LOGGER.debug('Datastores: %s', s.get_datastores()) datastore = [k for k,v in s.get_datastores().items() if v==DATASTORE][0] ovf = get_descriptor(OVF_FILE) descriptor_info = parse_descriptor(ovf) if hasattr(descriptor_info, "Warning"): LOGGER.warning(str(descriptor_info.Warning[0].LocalizedMessage)) if hasattr(descriptor_info, "Error"): LOGGER.error(str(descriptor_info.Error[0].LocalizedMessage)) exit() support_info = validate_host(host, ovf) import_spec = create_import_spec(resource_pool, datastore, ovf,
class ESXi_Server: server_ip = '' user_name = '' password = '' connect_flag = False server = None # vm_list = [] # Use the given args to connect the esxi server you want # @ip[string]: ESXi server's IP address # @name[string]: the username used to login the ESXi server # @pwd[string]: the password used to login the ESXi server def connect_server(self, ip, name, pwd): self.server_ip = ip self.user_name = name self.password = pwd self.server = VIServer() self.server.connect(self.server_ip, self.user_name, self.password) self.connect_flag = self.server.is_connected() if self.connect_flag: return True return False # To get all the definition registered vms from the connected server # @param[string]: can be set as ALL, POWER_ON, POWER_OFF, SUSPENDED # According to the param, returns a list of VM Paths. You might also filter by datacenter, # cluster, or resource pool by providing their name or MORs. # if cluster is set, datacenter is ignored, and if resource pool is set # both, datacenter and cluster are ignored. def get_registered_vms(self, param, status=None, datacenter=None, cluster=None, resource_pool=None): if param not in ['ALL', 'POWERED ON', 'POWERED OFF', 'SUSPENDED']: LOG.debug( "Get VMs error: param can only be set as ALL, POWERED ON,\ POWERED OFF, or SUSPENDED.") return None if not self.connect_flag: LOG.info("Get VMs error: Server not connected.") return None if param == 'ALL': return self.server.get_registered_vms(datacenter, cluster, resource_pool) elif param == 'POWERED ON': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='poweredOn') elif param == 'POWERED OFF': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='poweredOff') elif param == 'SUSPENDED': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='suspended') else: return None # Disconnect to the Server def disconnect(self): if self.connect_flag: self.server = self.server.disconnect() self.connect_flag = False # To keep session alive def keep_session_alive(self): return self.server.keep_session_alive() # To get the server type def get_server_type(self): return self.server.get_server_type() # To get performance manager def get_performance_manager(self): return self.server.get_performance_manager() # To get the all the server's hosts def get_all_hosts(self): """ Returns a dictionary of the existing hosts keys are their names and values their ManagedObjectReference object. """ return self.server.get_hosts() # To get all datastores def get_all_datastores(self): """ Returns a dictionary of the existing datastores. Keys are ManagedObjectReference and values datastore names. """ return self.server.get_datastores() # To get all clusters def get_all_clusters(self): """ Returns a dictionary of the existing clusters. Keys are their ManagedObjectReference objects and values their names. """ return self.server.get_clusters() # To get all datacenters def get_all_datacenters(self): """ Returns a dictionary of the existing datacenters. keys are their ManagedObjectReference objects and values their names. """ return self.server.get_datacenters() # To get all resource pools def get_all_resource_pools(self): """ Returns a dictionary of the existing ResourcePools. keys are their ManagedObjectReference objects and values their full path names. """ return self.server.get_resource_pools() # To get hosts by name def get_hosts_by_name(self, from_mor): """ Returns a dictionary of the existing ResourcePools. keys are their ManagedObjectReference objects and values their full path names. @from_mor: if given, retrieves the hosts contained within the specified managed entity. """ try: hosts_dic = self.server.get_hosts(from_mor) except: LOG.error("Get hosts error!") return None return hosts_dic def run_vm_by_name(self, name): """ Run vm by name. """ try: vm = self.server.get_vm_by_name(name) status = vm.get_status() if status == 'POWERED ON': pass elif status == 'POWERED OFF': try: vm.power_on() except: LOG.error("Run vm error!") pass else: pass except: LOG.error("Get vm status error when runing vm!") pass def stop_vm_by_name(self, name): """ Run vm by name. """ try: vm = self.server.get_vm_by_name(name) status = vm.get_status() if status == 'POWERED OFF': pass elif status == 'POWERED ON': try: vm.power_off() except: LOG.error("Stop vm error!") pass else: pass except: LOG.error("Get vm status error when stopping vm!") pass def get_vm_status_by_name(self, name): """ Get vm status by nam """ try: vm = self.server.get_vm_by_name(name) status = vm.get_status() LOG.info("Get VM status is %s" % status) return status except: LOG.info("Get VM status error!") return None
class VMWareSystem(MgmtSystemAPIBase): """Client to Vsphere API This class piggy backs off pysphere. Benefits of pysphere: - Don't need intimate knowledge w/ vsphere api itself. Detriments of pysphere: - Response often are not detailed enough. """ _stats_available = { 'num_vm': lambda self: len(self.list_vm()), 'num_host': lambda self: len(self.list_host()), 'num_cluster': lambda self: len(self.list_cluster()), 'num_template': lambda self: len(self.list_template()), 'num_datastore': lambda self: len(self.list_datastore()), } def __init__(self, hostname, username, password, **kwargs): self.api = VIServer() self.api.connect(hostname, username, password) def _get_vm(self, vm_name=None): if vm_name is None: raise Exception('Could not find a VM named %s.' % vm_name) else: try: vm = self.api.get_vm_by_name(vm_name) return vm except VIException as ex: raise Exception(ex) def does_vm_exist(self, name): try: self._get_vm(name) return True except Exception: return False def _get_resource_pool(self, resource_pool_name=None): rps = self.api.get_resource_pools() for mor, path in rps.iteritems(): if re.match('.*%s' % resource_pool_name, path): return mor # Just pick the first return rps.keys()[0] def get_ip_address(self, vm_name): vm = self._get_vm(vm_name) maxwait = 600 net_info = None waitcount = 0 while net_info is None: if waitcount > maxwait: break net_info = vm.get_property('net', False) waitcount += 5 time.sleep(5) if net_info: ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' for ip in net_info[0]['ip_addresses']: if re.match(ipv4_re, ip) and ip != '127.0.0.1': return ip return None def _get_list_vms(self, get_template=False): template_or_vm_list = [] props = self.api._retrieve_properties_traversal( property_names=['name', 'config.template'], from_node=None, obj_type=MORTypes.VirtualMachine) for prop in props: vm = None template = None for elem in prop.PropSet: if elem.Name == "name": vm = elem.Val elif elem.Name == "config.template": template = elem.Val if vm is None or template is None: continue if template == bool(get_template): template_or_vm_list.append(vm) return template_or_vm_list def start_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_on(): return True else: vm.power_on() ack = vm.get_status() if ack == 'POWERED ON': return True return False def stop_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_off(): return True else: vm.power_off() ack = vm.get_status() if ack == 'POWERED OFF': return True return False def delete_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_on(): self.stop_vm(vm_name) # When pysphere moves up to 0.1.8, we can just do: # vm.destroy() request = VI.Destroy_TaskRequestMsg() _this = request.new__this(vm._mor) _this.set_attribute_type(vm._mor.get_attribute_type()) request.set_element__this(_this) rtn = self.api._proxy.Destroy_Task(request)._returnval task = VITask(rtn, self.api) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: return True else: return False def create_vm(self, vm_name): raise NotImplementedError( 'This function has not yet been implemented.') def restart_vm(self, vm_name): if not self.stop_vm(vm_name): return False else: return self.start_vm(vm_name) def list_vm(self): return self._get_list_vms() def list_template(self): return self._get_list_vms(get_template=True) def list_flavor(self): raise NotImplementedError( 'This function is not supported on this platform.') def list_host(self): return self.api.get_hosts() def list_datastore(self): return self.api.get_datastores() def list_cluster(self): return self.api.get_clusters() def info(self): return '%s %s' % (self.api.get_server_type(), self.api.get_api_version()) def disconnect(self): self.api.disconnect() def vm_status(self, vm_name): state = self._get_vm(vm_name).get_status() print "vm " + vm_name + " status is " + state return state def is_vm_running(self, vm_name): state = self.vm_status(vm_name) return "POWERED ON" == state def is_vm_stopped(self, vm_name): state = self.vm_status(vm_name) return "POWERED OFF" == state def is_vm_suspended(self, vm_name): state = self.vm_status(vm_name) return "SUSPENDED" == state def suspend_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_off(): raise Exception('Could not suspend %s because it\'s not running.' % vm_name) else: vm.suspend() return self.is_vm_suspended(vm_name) def clone_vm(self): raise NotImplementedError('clone_vm not implemented.') def deploy_template(self, template, *args, **kwargs): if 'resourcepool' not in kwargs: kwargs['resourcepool'] = None vm = self._get_vm(template) if vm: vm.clone(kwargs['vm_name'], sync_run=True, resourcepool=self._get_resource_pool( kwargs['resourcepool'])) return kwargs['vm_name'] else: raise Exception('Could not clone %s' % template)
class VCenterManagement: server_ip = '' user_name = '' password = '' connect_flag = False server = None #vm_list = [] #def __init__(self): #Use the given args to connect the esxi server you want #@ip[string]: ESXi server's IP address #@name[string]: the username used to login the ESXi server #@pwd[string]: the password used to login the ESXi server def connect_server(self, ip, name, pwd): self.server_ip = ip self.user_name = name self.password = pwd self.server = VIServer() self.server.connect(self.server_ip, self.user_name, self.password) self.connect_flag = self.server.is_connected() if self.connect_flag: return True return False #To get all the definition registered vms from the connected server #@param[string]: can be set as ALL, POWER_ON, POWER_OFF, SUSPENDED #According to the param, returns a list of VM Paths. You might also filter by datacenter, #cluster, or resource pool by providing their name or MORs. #if cluster is set, datacenter is ignored, and if resource pool is set #both, datacenter and cluster are ignored. def get_registered_vms(self, param, status=None, datacenter=None, cluster=None, resource_pool=None): if param not in ['ALL', 'POWER_ON', 'POWER_OFF', 'SUSPENDED']: print "Get VMs error: param can only be set as ALL, POWER_ON, POWER_OFF, or SUSPENDED." return None if self.connect_flag == False: print "Get VMs error: Server not connected." return None if param == 'ALL': return self.server.get_registered_vms(datacenter, cluster, resource_pool) elif param == 'POWER_ON': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='poweredOn') elif param == 'POWER_OFF': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='poweredOff') elif param == 'SUSPENDED': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='suspended') else: return None #Disconnect to the Server def disconnect(self): if self.connect_flag == True: self.server = self.server.disconnect() self.connect_flag == False #To keep session alive def keep_session_alive(self): assert self.server.keep_session_alive() #To get the server type def get_server_type(self): return self.server.get_server_type() #To get performance manager def get_performance_manager(self): return self.server.get_performance_manager() #To get the all the server's hosts def get_all_hosts(self): """ Returns a dictionary of the existing hosts keys are their names and values their ManagedObjectReference object. """ return self.server.get_hosts() #To get all datastores def get_all_datastores(self): """ Returns a dictionary of the existing datastores. Keys are ManagedObjectReference and values datastore names. """ return self.server.get_datastores() #To get all clusters def get_all_clusters(self): """ Returns a dictionary of the existing clusters. Keys are their ManagedObjectReference objects and values their names. """ return self.server.get_clusters() #To get all datacenters def get_all_datacenters(self): """ Returns a dictionary of the existing datacenters. keys are their ManagedObjectReference objects and values their names. """ return self.server.get_datacenters() #To get all resource pools def get_all_resource_pools(self): """ Returns a dictionary of the existing ResourcePools. keys are their ManagedObjectReference objects and values their full path names. """ return self.server.get_resource_pools() #To get hosts by name def get_hosts_by_name(self, from_mor): """ Returns a dictionary of the existing ResourcePools. keys are their ManagedObjectReference objects and values their full path names. @from_mor: if given, retrieves the hosts contained within the specified managed entity. """ try: hosts_dic = self.server.get_hosts(from_mor) except: print "Get hosts error!" return None return hosts_dic def get_vm_by_name(self, vm_name): try: vm = self.server.get_vm_by_name(vm_name) except: print "Get vm error!" return None return vm def power_on_vm(self, vm_name): try: vm = self.get_vm_by_name(vm_name) if (vm.is_powered_off()): vm.power_on() print "vm " + vm_name + " power on success." else: print "vm " + vm_name + "is already power on" return False except: print "Power on vm " + vm_name + "error" return False return True def power_off_vm(self, vm_name): try: vm = self.get_vm_by_name(vm_name) if (vm.is_powered_on()): vm.power_off() print "vm " + vm_name + " power off success." else: print "vm " + vm_name + "is already power off" return False except: print "Power off vm " + vm_name + " error" return False return True
opt_user = options.user opt_password = options.password argvs = sys.argv argc = len(argvs) if ( argc != 7): print "Usage : python clone_VMs_from_template_VM.py --vcenter=<vCenter IP> --user=<user> --password=<password>--prefix=<string> --number=<int> --template=<template VM name>" sys.exit(1) # connect to vCenter server = VIServer() server.connect(opt_vcenter,opt_user,opt_password,trace_file="debug.txt") resource_pools = server.get_resource_pools() #print resource_pools first_resource_pool = resource_pools.keys()[0] #print first_resource_pool # specify the full path of a template VM template_vm = server.get_vm_by_name("%s" % opt_template) i = 1 while i <= opt_number: new_vm = "%s-%s" % ( opt_prefix,i ) clonedVM = template_vm.clone(new_vm,resourcepool=first_resource_pool) print "VM name : %s : status %s" % (clonedVM.get_property("name") , clonedVM.get_status()) i += 1
class EsxiServer: server_ip = '' user_name = '' password = '' connect_flag = False server = None def opearte_guest_power(self, src_vm_name, operate): """" |##desc: 操作客户机电源(重启、关机、开机),这种模式不需要vmtools,就是强制操作电源,不走操作系统 ,可能损坏数据 |##:param: >vm1.power_on() >vm1.reset() >vm1.suspend() #since pysphere 0.1.5 >vm1.power_off() |##:return: None |##@author: jhuang |##@time:12/12/2017 """ ovm = self.server.get_vm_by_name(src_vm_name) if operate == 'power_on': ovm.power_on() return True elif operate == 'reset': ovm.reset() return True elif operate == 'suspend': ovm.suspend() return True elif operate == 'power_off': ovm.power_off() return True else: print "Operate guest error: operate can noly be set as power_on, reset, suspend, power_off" return False def clone(self, src_vm_name, dest_vm_name, datacenter=None, sync_run=True, folder=None, resourcepool=None, datastore=None, host=None, power_on=True, template=False, snapshot=None, linked=False): """ |##desc: 克隆虚拟机 |##:param: None |##:return: None |##@author: jhuang |##@time:12/12/2017 """ # get_vm_by_name 在包含2个数据中心的情况下,需要指定参数 datacenter 其中的限定条件为datacenter # vm1 = server.get_vm_by_path("[DataStore1] Ubantu/Ubantu-10.vmx") 通过路径方法获取对象 logger.debug(src_vm_name) logger.debug(datacenter) ovm = self.server.get_vm_by_name(src_vm_name, datacenter) new_vm = ovm.clone(dest_vm_name) def create_snapshot(self, src_vm_name, snapShotName): """ |##desc: 创建快照 |##:param: None |##:return: None |##@author: jhuang |##@time:12/12/2017 """ ovm = self.server.get_vm_by_name(src_vm_name) ovm.create_snapshot(snapShotName) def set_ssl_no_verify(self): ssl._create_default_https_context = ssl._create_unverified_context # Use the given args to connect the esxi server you want # @ip[string]: ESXi server's IP address # @name[string]: the username used to login the ESXi server # @pwd[string]: the password used to login the ESXi server def connect_server(self, ip, name, pwd, ssl_no_verify=True): logger.debug(ip) logger.debug(name) logger.debug(pwd) self.server_ip = ip self.user_name = name self.password = pwd self.server = VIServer() if ssl_no_verify: self.set_ssl_no_verify() self.server.connect(self.server_ip, self.user_name, self.password) self.connect_flag = self.server.is_connected() if self.connect_flag: return True return False # To get all the definition registered vms from the connected server # @param[string]: can be set as ALL, POWER_ON, POWER_OFF, SUSPENDED # According to the param, returns a list of VM Paths. You might also filter by datacenter, # cluster, or resource pool by providing their name or MORs. # if cluster is set, datacenter is ignored, and if resource pool is set # both, datacenter and cluster are ignored. def get_registered_vms(self, param='ALL', status=None, datacenter=None, cluster=None, resource_pool=None): """ 获取注册的主机 (虚拟机) :param param1: this is a first param :param param2: this is a second param :returns: this is a description of what is returned :raises keyError: raises an exception @author: jhuang @time:28/02/2018 """ if param not in ['ALL', 'POWER_ON', 'POWER_OFF', 'SUSPENDED']: print "Get VMs error: param can only be set as ALL, POWER_ON, POWER_OFF, or SUSPENDED." return None if self.connect_flag == False: print "Get VMs error: Server not connected." return None if param == 'ALL': return self.server.get_registered_vms(datacenter, cluster, resource_pool) elif param == 'POWER_ON': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='poweredOn') elif param == 'POWER_OFF': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='poweredOff') elif param == 'SUSPENDED': return self.server.get_registered_vms(datacenter, cluster, resource_pool, status='suspended') else: return None # Disconnect to the Server def disconnect(self): if self.connect_flag is True: self.server = self.server.disconnect() self.connect_flag == False # To keep session alive def keep_session_alive(self): assert self.server.keep_session_alive() # To get the server type def get_server_type(self): return self.server.get_server_type() # To get performance manager def get_performance_manager(self): return self.server.get_performance_manager() # To get the all the server's hosts def get_all_hosts(self): """ 获取所有主机(这个是宿主机) Returns a dictionary of the existing hosts keys are their names and values their ManagedObjectReference object. """ return self.server.get_hosts() # To get all datastores def get_all_datastores(self): """ 获取存储列表 Returns a dictionary of the existing datastores. Keys are ManagedObjectReference and values datastore names. """ return self.server.get_datastores() # To get all clusters def get_all_clusters(self): """ 获取所有集群列表 Returns a dictionary of the existing clusters. Keys are their ManagedObjectReference objects and values their names. """ return self.server.get_clusters() # To get all datacenters def get_all_datacenters(self): """ 获取数据中心列表 Returns a dictionary of the existing datacenters. keys are their ManagedObjectReference objects and values their names. """ return self.server.get_datacenters() # To get all resource pools def get_all_resource_pools(self): """ Returns a dictionary of the existing ResourcePools. keys are their ManagedObjectReference objects and values their full path names. """ return self.server.get_resource_pools() # To get hosts by name def get_hosts_by_name(self, from_mor): """ Returns a dictionary of the existing ResourcePools. keys are their ManagedObjectReference objects and values their full path names. @from_mor: if given, retrieves the hosts contained within the specified managed entity. """ try: hosts_dic = self.server.get_hosts(from_mor) except: print "Get hosts error!" return None return hosts_dic
class basicOps: server=None def connect(self): configuration=Config() server = configuration._config_value("general", "server") if server is None: raise ValueError("server must be supplied on command line"+"or in configuration file.") username = configuration._config_value("general", "username") if username is None: raise ValueError("username must be supplied on command line" " or in configuration file.") password = configuration._config_value("general", "password") if password is None: raise ValueError("password must be supplied on command line" " or in configuration file.") self.server=VIServer() self.server.connect(server,username,password) def startVm(self,vmname): vm=self.server.get_vm_by_name(vmname) status=vm.get_status() if status=="POWERED OFF": task=vm.power_on(run_sync=False) return task def stopVm(self, vmname): vm=self.server.get_vm_by_name(vmname) status=vm.get_status() if status=="POWERED ON": task=vm.power_off(run_sync=False) return task def stopGuest(self,vmname): vm=self.server.get_vm_by_name(vmname) task=vm.shutdown_guest(run_sync=False) return task def rebootGuest(self,vmname): vm=self.server.get_vm_by_name(vmname) task=vm.reboot_guest(run_sync=False) return task def getDataCenters(self): return self.server.get_datacenters() def clone(self,templateName,cloneName="Template Clone"): vm=self.server.get_vm_by_name(templateName) resourcePool=self.getResourcePool() print resourcePool task=vm.clone(cloneName,resourcepool=resourcePool,sync_run=False) try: status=task.get_state() print(status) print "Creating machine from template:Job Status:" + status if status!="error": while (status!="success" or status !="error"): status=task.get_state() if status=="success": break; print "Creating machine from template: Job Status:" + status #vm_new=self.server.get_vm_by_name(cloneName) #return vm_new except: print("Error Occured:") def getResourcePool(self): configuration=Config() datacenter = configuration._config_value("vmware", "datacenter") if datacenter is None: raise ValueError("server must be supplied"+"in configuration file.") cluster = configuration._config_value("vmware", "cluster") if cluster is None: raise ValueError("cluster name must be supplied in configuration file.") resourcePool = configuration._config_value("vmware", "resourcePool") if resourcePool is None: raise ValueError("Resource Pool name must be supplied in configuration file") clusters=self.server.get_clusters() #print clusters keycluster=self.find_key(clusters,cluster) # print(keycluster) resource_pools=self.server.get_resource_pools(keycluster) # print(resource_pools) resourcePool=self.find_key(resource_pools,resourcePool,True) # print (resourcePool) return resourcePool # resource_pools=self.server.get_resource_pools() def find_key(self,dic, val, partial=False): retval=None if partial==False: retval=[k for k, v in dic.iteritems() if v == val][0] else: retval =[k for k, v in dic.iteritems() if (v.find(val)>0)][0] return retval def find_value(self,dic, key): return dic[key]
class VMWareSystem(MgmtSystemAPIBase): """Client to Vsphere API This class piggy backs off pysphere. Benefits of pysphere: - Don't need intimate knowledge w/ vsphere api itself. Detriments of pysphere: - Response often are not detailed enough. """ _stats_available = { 'num_vm': lambda self: len(self.list_vm()), 'num_host': lambda self: len(self.list_host()), 'num_cluster': lambda self: len(self.list_cluster()), 'num_template': lambda self: len(self.list_template()), 'num_datastore': lambda self: len(self.list_datastore()), } def __init__(self, hostname, username, password, **kwargs): self.api = VIServer() self.api.connect(hostname, username, password) def _get_vm(self, vm_name=None): if vm_name is None: raise Exception('Could not find a VM named %s.' % vm_name) else: try: vm = self.api.get_vm_by_name(vm_name) return vm except VIException as ex: raise Exception(ex) def does_vm_exist(self, name): try: self._get_vm(name) return True except Exception: return False def _get_resource_pool(self, resource_pool_name=None): rps = self.api.get_resource_pools() for mor, path in rps.iteritems(): if re.match('.*%s' % resource_pool_name, path): return mor # Just pick the first return rps.keys()[0] def get_ip_address(self, vm_name): vm = self._get_vm(vm_name) maxwait = 600 net_info = None waitcount = 0 while net_info is None: if waitcount > maxwait: break net_info = vm.get_property('net', False) waitcount += 5 time.sleep(5) if net_info: ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' for ip in net_info[0]['ip_addresses']: if re.match(ipv4_re, ip) and ip != '127.0.0.1': return ip return None def _get_list_vms(self, get_template=False): template_or_vm_list = [] props = self.api._retrieve_properties_traversal(property_names=['name', 'config.template'], from_node=None, obj_type=MORTypes.VirtualMachine) for prop in props: vm = None template = None for elem in prop.PropSet: if elem.Name == "name": vm = elem.Val elif elem.Name == "config.template": template = elem.Val if vm is None or template is None: continue if template == bool(get_template): template_or_vm_list.append(vm) return template_or_vm_list def start_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_on(): return True else: vm.power_on() ack = vm.get_status() if ack == 'POWERED ON': return True return False def stop_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_off(): return True else: vm.power_off() ack = vm.get_status() if ack == 'POWERED OFF': return True return False def delete_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_on(): self.stop_vm(vm_name) # When pysphere moves up to 0.1.8, we can just do: # vm.destroy() request = VI.Destroy_TaskRequestMsg() _this = request.new__this(vm._mor) _this.set_attribute_type(vm._mor.get_attribute_type()) request.set_element__this(_this) rtn = self.api._proxy.Destroy_Task(request)._returnval task = VITask(rtn, self.api) status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) if status == task.STATE_SUCCESS: return True else: return False def create_vm(self, vm_name): raise NotImplementedError('This function has not yet been implemented.') def restart_vm(self, vm_name): if not self.stop_vm(vm_name): return False else: return self.start_vm(vm_name) def list_vm(self): return self._get_list_vms() def list_template(self): return self._get_list_vms(get_template=True) def list_flavor(self): raise NotImplementedError('This function is not supported on this platform.') def list_host(self): return self.api.get_hosts() def list_datastore(self): return self.api.get_datastores() def list_cluster(self): return self.api.get_clusters() def info(self): return '%s %s' % (self.api.get_server_type(), self.api.get_api_version()) def disconnect(self): self.api.disconnect() def vm_status(self, vm_name): state = self._get_vm(vm_name).get_status() print "vm " + vm_name + " status is " + state return state def is_vm_running(self, vm_name): state = self.vm_status(vm_name) return "POWERED ON" == state def is_vm_stopped(self, vm_name): state = self.vm_status(vm_name) return "POWERED OFF" == state def is_vm_suspended(self, vm_name): state = self.vm_status(vm_name) return "SUSPENDED" == state def suspend_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.is_powered_off(): raise Exception('Could not suspend %s because it\'s not running.' % vm_name) else: vm.suspend() return self.is_vm_suspended(vm_name) def clone_vm(self): raise NotImplementedError('clone_vm not implemented.') def deploy_template(self, template, *args, **kwargs): if 'resourcepool' not in kwargs: kwargs['resourcepool'] = None vm = self._get_vm(template) if vm: vm.clone(kwargs['vm_name'], sync_run=True, resourcepool=self._get_resource_pool(kwargs['resourcepool'])) return kwargs['vm_name'] else: raise Exception('Could not clone %s' % template)
DATASTORE = "datastore1" #WHERE THE DISK WILL BE CREATED AT VM_PATH = "[datastore1-data] vm-test-name/vm-test-name.vmx" VM_NAME = "vm-test-name" folder_name = "vm-test-name" DATACENTER = "ha-datacenter" s = VIServer() s.connect(HOST, USER, PASSWORD) #vm = s.get_vm_by_path(VM_PATH) destination_folder = "/Resources/vm-test-name" rp_path = "/Resources" for k, v in s.get_resource_pools().items(): if v == destination_folder: pass print("k and v = %s %s" % (k, v)) print("Couldn't find folder '%s'" % (folder_name)) #look for the resource pool whose path is rp_path for ds, ds_name in s.get_datastores().items(): print ds_name if ds_name == DATASTORE: print "Found Datacenter: %s" % DATASTORE pprint(ds) #VMs should be created by a folder, in ESX always uses the vm folder