def to_esx(server, username, password, port=443): try: esxsnapshot.log.debug('Trying to connect with provided credentials') con = SmartConnect( host=server, user=username, pwd=password, port=int(port)) atexit.register(Disconnect, con) esxsnapshot.log.info('Connected to server %s' % server) esxsnapshot.log.debug('Server type: %s' % con.get_server_type()) esxsnapshot.log.debug('API version: %s' % con.get_api_version()) if not con: raise SystemExit("Unable to connect to host with supplied info.") return con except Exception as error: esxsnapshot.log.error(error) esxsnapshot.log.debug('Connect error. Program will exit now.') sys.exit(1)
parser = OptionParser(usage=usage) parser.add_option("--vm_uuid", action="store", type="string", dest="vm_uuid", help="VM UUID") (options, args) = parser.parse_args() #s = ssl.SSLContext(ssl.PROTOCOL_TLSv1) #s.verify_mode = ssl.CERT_NONE s = ssl._create_unverified_context() c = SmartConnect(host="IP", user="******", pwd="password", sslContext=s) content = c.RetrieveContent() #vm_uuid = '5032ea24-02c5-f1ba-6ca5-05d08d2ff8cc' vm_uuid = options.vm_uuid search_index = content.searchIndex # quick way to find a VM vm = search_index.FindByUuid(uuid=vm_uuid, vmSearch=True, instanceUuid=True) max_memory_GB = round(vm.runtime.maxMemoryUsage / 1024) mem_active_used_GB = round(vm.summary.quickStats.guestMemoryUsage / 1024, 2) mem_active_free_GB = max_memory_GB - mem_active_used_GB memory_active_percent = round((mem_active_used_GB / max_memory_GB) * 100, 2)
def main(): """ Manage the vCenter Integration Node configuration """ # Handling arguments args = get_args() all_clusters = args.all_clusters all_datacenters = args.all_datacenters all_hosts = args.all_hosts clusters = [] if args.clusters: clusters = args.clusters debug = args.debug allow_fqdn = args.allow_fqdn datacenters = [] if args.datacenters: datacenters = args.datacenters hosts = [] if args.hosts: hosts = args.hosts host_configure_agent = args.host_configure_agent hosts_file = None if args.hosts_file: hosts_file = args.hosts_file hv_username = None if args.hv_username: hv_username = args.hv_username hv_password = None if args.hv_password: hv_password = args.hv_password hv_management_network = None if args.hv_management_network: hv_management_network = args.hv_management_network hv_data_network = None if args.hv_data_network: hv_data_network = args.hv_data_network hv_vm_network = None if args.hv_vm_network: hv_vm_network = args.hv_vm_network hv_mc_network = None if args.hv_mc_network: hv_mc_network = args.hv_mc_network log_file = None if args.logfile: log_file = args.logfile nuage_enterprise = args.nuage_enterprise nuage_host = args.nuage_host nuage_port = args.nuage_port nuage_password = None if args.nuage_password: nuage_password = args.nuage_password nuage_username = args.nuage_username nuage_vrs_ovf = None if args.nuage_vrs_ovf: nuage_vrs_ovf = args.nuage_vrs_ovf nosslcheck = args.nosslcheck verbose = args.verbose vcenter_host = args.vcenter_host vcenter_name = vcenter_host if args.vcenter_name: vcenter_name = args.vcenter_name vcenter_https_port = args.vcenter_https_port vcenter_http_port = args.vcenter_http_port vcenter_password = None if args.vcenter_password: vcenter_password = args.vcenter_password vcenter_username = args.vcenter_username # Logging settings if debug: log_level = logging.DEBUG elif verbose: log_level = logging.INFO else: log_level = logging.WARNING logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level) logger = logging.getLogger(__name__) # Input checking if not all_datacenters and len(datacenters) < 1: logger.critical('Not all datacenters have to be present in the Nuage Deployment tool (--all-datacenters option NOT enabled), but also no datacenters specified (at least one --datacenter)') return 1 if not all_clusters and len(clusters) < 1: logger.critical('Not all clusters have to be present in the Nuage Deployment tool (--all-clusters option NOT enabled), but also no clusters specified (at least one --cluster)') return 1 if not all_hosts and len(hosts) < 1 and not hosts_file: logger.critical('Not all hosts have to be present in the Nuage Deployment tool (--all-hosts option NOT enabled), but also no hosts specified (at least one --host or specify a file with the host information via --hosts-file)') return 1 if all_datacenters and len(datacenters) > 0: logger.warning('You enabled all datacenters and added individual datacenter options, --all-datacenters takes precendence and overwrites the specified datacenters.') datacenters = [] if all_clusters and len(clusters) > 0: logger.warning('You enabled all clusters and added individual cluster options, --all-clusters takes precendence and overwrites the specified clusters.') clusters = [] if all_hosts and len(hosts) > 0 and not hosts_file: logger.warning('You enabled all hosts and added individual hosts options, --all-hosts takes precendence and overwrites the specified hosts.') hosts = [] elif all_hosts and len(hosts) < 1 and hosts_file: logger.warning('You enabled all hosts and provided a hosts file, the hosts file takes precendence over the --all-hosts flag and this flag will be ignored.') all_hosts = False elif not all_hosts and len(hosts) > 0 and hosts_file: logger.warning('You specified host with the --host argument and provided a hosts file, the hosts file takes precendence over the --host paramerters and these will be ignored.') hosts = [] # CSV Handling hosts_list = None if hosts_file: hosts_list = {} # CSV fields: # VM Name, Resource Pool, Folder, MAC Address, Post Script logger.debug('Parsing csv %s' % hosts_file) if not os.path.isfile(hosts_file): logger.critical('CSV file %s does not exist, exiting' % hosts_file) return 1 with open(hosts_file, 'rb') as hostlist: hosts_list_raw = csv.reader(hostlist, delimiter=',', quotechar='"') for row in hosts_list_raw: logger.debug('Found CSV row: %s' % ','.join(row)) # Adding IP to the hosts variable so it can also be used in further handling if it's a valid IP if allow_fqdn or ip_address_is_valid(row[0]): hosts_list[row[0]] = row hosts.append(row[0]) else: logger.warning('Found an invalid IP %s in the hosts file and FQDNs are not allowed, skipping line' % row[0]) # Disabling SSL verification if set ssl_context = None if nosslcheck: logger.debug('Disabling SSL certificate verification.') requests.packages.urllib3.disable_warnings() import ssl if hasattr(ssl, 'SSLContext'): ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ssl_context.verify_mode = ssl.CERT_NONE # Getting user password for Nuage connection if nuage_password is None: logger.debug('No command line Nuage password received, requesting Nuage password from user') nuage_password = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (nuage_host, nuage_username)) # Getting user password for vCenter connection if vcenter_password is None: logger.debug('No command line vCenter password received, requesting vCenter password from user') vcenter_password = getpass.getpass(prompt='Enter password for vCenter host %s for user %s: ' % (vcenter_host, vcenter_username)) # Getting user password for hosts if hv_password is None: logger.debug('No command line Host password received, requesting Host password from user') hv_password = getpass.getpass(prompt='Enter password for the hosts inside vCenter %s for user %s: ' % (vcenter_host, hv_username)) try: vc = None nc = None # Connecting to Nuage try: logger.info('Connecting to Nuage server %s:%s with username %s' % (nuage_host, nuage_port, nuage_username)) nc = vsdk.NUVSDSession(username=nuage_username, password=nuage_password, enterprise=nuage_enterprise, api_url="https://%s:%s" % (nuage_host, nuage_port)) nc.start() except IOError, e: pass if not nc or not nc.is_current_session(): logger.error('Could not connect to Nuage host %s with user %s and specified password' % (nuage_host, nuage_username)) return 1 # Connecting to vCenter try: logger.info('Connecting to vCenter server %s:%s with username %s' % (vcenter_host, vcenter_https_port, vcenter_username)) if ssl_context: vc = SmartConnect(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port), sslContext=ssl_context) else: vc = SmartConnect(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port)) except IOError, e: pass
class VSphereClient(object): """ Wrapper for Vsphere """ def __init__(self, admin_user, admin_password, auth_host, auth_port=443): self.admin_user = admin_user self.admin_password = admin_password self.auth_host = auth_host self.auth_port = auth_port self._vsphere_connection = None self._vsphere_content = None self._perfManager = None def set_configuration(self, admin_user, admin_password, auth_host, auth_port=443): self.admin_user = admin_user self.admin_password = admin_password self.auth_host = auth_host self.auth_port = auth_port def __repr__(self): return "<VSphereClient host='%s:%d' [%s] user='******' password='******'>" % \ (self.auth_host, self.auth_port, "On" if self._vsphere_connection is not None else "Off", self.admin_user, "*" * len(self.admin_password)) def __del__(self): self.close() def connect(self, reconnect=False, insecure=True): """ Create new authenticated VSphere client """ if self._vsphere_connection is None or reconnect: LOG.info('Connecting to vSphere server on %s:%s' % (self.auth_host, self.auth_port)) kwargs = {'host': self.auth_host, 'port': self.auth_port, 'user': self.admin_user, 'pwd': self.admin_password} vmomi_versions = sorted(pyVmomi_version.versionMap.keys()) LOG.debug("PyVmomi versions: %s", vmomi_versions) if insecure and ("vim25/6.5" in vmomi_versions or "vim25/6.0" in vmomi_versions): try: import ssl kwargs['sslContext'] = ssl._create_unverified_context() except (ImportError, AttributeError): # on python older than 2.7.9 ssl does not have this function pass try: self._vsphere_connection = SmartConnect(**kwargs) self._vsphere_content = self._vsphere_connection.RetrieveContent() self._perf_manager = self._vsphere_content.perfManager except (ConnectionError, vim.fault.InvalidLogin) as exc: raise VSphereException("Failed connecting to %s:%s using user %s: %s" % (self.auth_host, self.auth_port, self.admin_user, exc)) def close(self): if self._vsphere_connection: del self._vsphere_connection self._vsphere_connection = None @property @reconnect_on_fault def session_key(self): """ :return: The current session key. A unique identifier of the current connection. """ if self._vsphere_content is not None and self._vsphere_connection is not None: # perform simple operation to check connectivity. self._vsphere_connection.CurrentTime() if not self._vsphere_content.sessionManager.currentSession: raise VSphereReconnectException("Can't get session key, session might be off") return self._vsphere_content.sessionManager.currentSession.key @reconnect_on_fault def _list_objects(self, object_type, folder=None): if folder is None: folder = self._vsphere_content.rootFolder objview = self._vsphere_content.viewManager.CreateContainerView(folder, [object_type], True) objects = objview.view objview.Destroy() return objects @reconnect_on_fault def _get_obj(self, vim_type, name): """ Get the vsphere object associated with a given text name :param vim_type: List of pyVmomi types. :type vim_type: vim.* :param name: The name of the desired object. :type name: str. """ obj = None container = self._vsphere_content.viewManager.CreateContainerView(self._vsphere_content.rootFolder, vim_type, True) for item in container.view: if item.name == name: obj = item break if obj is None: LOG.debug("Could not find %s ", name) return obj def get_obj(self, vim_type, name): return self._get_obj(vim_type, name) def list_vms(self): return self._list_objects(vim.VirtualMachine) def list_hosts(self): return self._list_objects(vim.HostSystem) def list_users(self): user_list = [] for dom in self.domains: # skipping host users tmp_list = self._vsphere_content.userDirectory.RetrieveUserGroups( domain=dom, searchStr="", exactMatch=False, findUsers=True, findGroups=True) user_list.extend(tmp_list) return user_list # ----------------------------------------- # Property-Collector based API (used for deployment) # TODO: consider using only this API # ----------------------------------------- @reconnect_on_fault def _get_objects(self, object_type, properties): view = get_container_view(self._vsphere_connection, obj_type=[object_type]) objects = collect_properties(self._vsphere_connection, view_ref=view, obj_type=object_type, path_set=properties, include_mors=True) for obj in objects: obj['moid'] = obj['obj']._moId del obj['obj'] return objects def collect_roles(self): return self._vsphere_content.authorizationManager.roleList @property def roles(self): return self.collect_roles() def collect_domains(self): return self._vsphere_content.userDirectory.domainList[1:] @property def domains(self): """ Return all domains except host domain :return: List of domains except host domain. """ return self.collect_domains() @reconnect_on_fault def _get_obj_by_moid(self, obj_type, moid): obj = obj_type(moid) obj._stub = self._vsphere_connection._stub return obj def get_vm(self, vm_moid): """ Get VM by moid. """ return self._get_obj_by_moid(vim.VirtualMachine, vm_moid) def get_host(self, host_moid): """ Get host by moid. """ return self._get_obj_by_moid(vim.HostSystem, host_moid) def get_host_by_name(self, host_name): """ Get host by name. """ return self.get_obj([vim.HostSystem], host_name) @reconnect_on_fault def wait_for_task(self, task, action_name, hide_result=False, update_status_callback=None): if update_status_callback is None: def dummy_callback(task): pass update_status_callback = dummy_callback LOG.info('Waiting for %s to complete.', action_name) last_state = (None, None) while task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]: if task.info.state == "canceled": try: task.CancelTask() except Exception as exc: LOG.warn("Error canceling task '%s': %s", action_name, exc) LOG.warn('%s was canceled!', action_name) return None elif last_state != (task.info.state, task.info.progress): LOG.info("Task '%s' state: %s (progress: %s%%)", action_name, task.info.state, task.info.progress or 0) last_state = (task.info.state, task.info.progress) try: update_status_callback(task) except Exception: LOG.exception("Error while calling %s task update status callback", action_name) do_sleep(1) if task.info.state == vim.TaskInfo.State.success: try: update_status_callback(task) except Exception: LOG.exception("Error while calling %s task update status callback", action_name) if task.info.result is not None and not hide_result: LOG.info('%s completed successfully, result: %s', action_name, task.info.result) else: LOG.info('%s completed successfully.', action_name) else: LOG.error('%s did not complete successfully: %s', action_name, task.info.error) raise VSphereTaskFailed(action_name, task.info.error) # may not always be applicable, but can't hurt. return task
# Author: Vikas Shitole # Website: www.vThinkBeyondVM.com # Product: vCenter server/vSphere DRS rules # Description: Python script to get associated DRS rules for a Virtual Machine (from DRS cluster) # Reference:http://vthinkbeyondvm.com/pyvmomi-tutorial-how-to-get-all-the-core-vcenter-server-inventory-objects-and-play-around/ # How to setup pyVmomi environment?: http://vthinkbeyondvm.com/how-did-i-get-started-with-the-vsphere-python-sdk-pyvmomi-on-ubuntu-distro/ from pyVim.connect import SmartConnect from pyVmomi import vim import ssl s = ssl.SSLContext(ssl.PROTOCOL_TLSv1) s.verify_mode = ssl.CERT_NONE c = SmartConnect(host="10.161.2.3", user="******", pwd="VMware1!", sslContext=s) content = c.content #Below method gets all objects those are matching with provided "vimtype" def get_all_objs(content, vimtype): obj = {} container = content.viewManager.CreateContainerView( content.rootFolder, vimtype, True) for managed_object_ref in container.view: obj.update({managed_object_ref: managed_object_ref.name}) return obj # Scanning a input VM inside inventory using special python construct i.e. List comprehension
def main(): """ Simple command-line program for listing the virtual machines on a system. """ global vm_cluster parser = argparse.ArgumentParser( description='Process args for retrieving all the Virtual Machines') parser.add_argument('-l', '--login', required=True, action='store', help='login for VCenter access') parser.add_argument('-k', '--key', required=True, action='store', help='secured key for authentication') parser.add_argument('-vc', '--vcenter', required=True, action='store', help='VCenter host') parser.add_argument('-v', '--vm', required=True, action='store', help='Comma delimited list of VMs') args = parser.parse_args() vm_cluster = str(args.vm).split(',') try: password = encrypt.decrypt_login(args.key) except Exception as e: #print (e) print( "\nERROR: Symmetric key and/or password file not found not found.") return -1 context = None if hasattr(ssl, '_create_unverified_context'): context = ssl._create_unverified_context() si = SmartConnect(host=args.vcenter, user=args.login, pwd=password, port=443, sslContext=context) if not si: print("Could not connect to the specified host using specified " "username and password") return -1 atexit.register(Disconnect, si) content = si.RetrieveContent() for child in content.rootFolder.childEntity: if hasattr(child, 'vmFolder'): datacenter = child vmFolder = datacenter.vmFolder vmList = vmFolder.childEntity for vm in vmList: PrintVmInfo(vm) print('\nAll VMs have been refreshed.') return 0
obj = [vm for vm in vm_view.view] vm_view.Destroy() return obj # Disabling urllib3 ssl warnings requests.packages.urllib3.disable_warnings() # Disabling SSL certificate verification context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_NONE # Connect to vCenter si = SmartConnect(host='Enter vCenter Info Here', user=vm_username, pwd=vm_password, port=443, sslContext=context) # Disconnect vCenter atexit.register(Disconnect, si) # Here are the variables containing our vCenter Info content = si.RetrieveContent() clusters = GetCluster(content) hosts = GetVMHosts(content) vms = GetVMs(content) cluster_name = [] cluster_name_sorted = sorted(cluster_name) cluster_hosts = []
def main(): args = GetArgs() jsonConfig = args.jsonConfig if not os.path.exists(jsonConfig): os.system( "/bin/logger '[STATELESS-DEBUG] Unable to find JSON Config File '" + jsonConfig) return # Process JSON with open(jsonConfig) as f: jsonData = json.load(f) os.system("/bin/logger '[STATELESS-DEBUG] jsonConfigData='" + json.dumps(jsonData)) vcenterServer = jsonData["vcenter_server"] vcenterUser = jsonData["vcenter_user"] vcenterPass = jsonData["vcenter_pass"] datacenterName = jsonData["vcenter_datacenter"] clusterName = jsonData["vcenter_cluster"] esxUsername = "******" esxPassword = "" esxPort = 443 # For python 2.7.9 and later, the defaul SSL conext has more strict # connection handshaking rule. We may need turn of the hostname checking # and client side cert verification context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE si = SmartConnect(host=vcenterServer, user=vcenterUser, pwd=vcenterPass, port=443, sslContext=context) atexit.register(Disconnect, si) # Retrieve ESXi SSL Thumbprint cmd = "echo -n | openssl s_client -connect localhost:443 2>/dev/null | openssl x509 -noout -fingerprint -sha1 | awk -F \'=\' \'{print $2}\'" tmp = os.popen(cmd) sslThumbprint = (tmp.readline()).strip() tmp.close() # Retrieve ESXi IP Address cmd = "grep HostIPAddr /etc/vmware/esx.conf | awk \'{print $3}\'" tmp = os.popen(cmd) esxHostname = (tmp.readline()).strip().replace('"', '') tmp.close() # Check to see if previous ESXi-Arm instance was added, remove prior to adding again try: hostRef, hostPath = invt.findHost('', esxHostname, si)[0] os.system( "/bin/logger '[STATELESS-DEBUG] Removing previous ESXi-Arm instance '" + esxHostname) task = hostRef.Destroy() if WaitForTask(task) == "error": raise task.info.error else: task.info.result except: os.system("/bin/logger '[STATELESS-DEBUG] Creating AddHost Spec'") # Create AddHost Spec hostAddSpec = Vim.Host.ConnectSpec() hostAddSpec.SetHostName(esxHostname) hostAddSpec.SetPort(esxPort) hostAddSpec.SetUserName(esxUsername) hostAddSpec.SetPassword(esxPassword) hostAddSpec.SetForce(True) hostAddSpec.SetSslThumbprint(sslThumbprint) # Add Host cluster = invt.GetCluster(datacenterName, clusterName, si) os.system("/bin/logger '[STATELESS-DEBUG] hostAddSpec='" + json.dumps(hostAddSpec.__dict__)) os.system("/bin/logger '[STATELESS-DEBUG] Joining vCenter Server'") os.system( "esxcfg-init --set-boot-progress-text \"Joining vCenter Server ...\"") task = cluster.AddHost(hostAddSpec, True, None) if WaitForTask(task) == "error": raise task.info.error else: return task.info.result
def main(): """ Manage the vCenter Integration Node configuration """ # Handling arguments args = get_args() debug = args.debug log_file = None if args.logfile: log_file = args.logfile nosslcheck = args.nosslcheck template = args.template vcenter_host = args.vcenter_host vcenter_port = args.vcenter_port vcenter_password = None if args.vcenter_password: vcenter_password = args.vcenter_password vcenter_username = args.vcenter_username verbose = args.verbose # Logging settings if debug: log_level = logging.DEBUG elif verbose: log_level = logging.INFO else: log_level = logging.WARNING logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level) logger = logging.getLogger(__name__) # Disabling SSL verification if set ssl_context = None if nosslcheck: logger.debug('Disabling SSL certificate verification.') requests.packages.urllib3.disable_warnings() import ssl if hasattr(ssl, 'SSLContext'): ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ssl_context.verify_mode = ssl.CERT_NONE # Getting user password for vCenter connection if vcenter_password is None: logger.debug('No command line vCenter password received, requesting vCenter password from user') vcenter_password = getpass.getpass(prompt='Enter password for vCenter host %s for user %s: ' % (vcenter_host, vcenter_username)) try: vc = None # Connecting to vCenter try: logger.info('Connecting to vCenter server %s:%s with username %s' % (vcenter_host, vcenter_port, vcenter_username)) if ssl_context: vc = SmartConnect(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_port), sslContext=ssl_context) else: vc = SmartConnect(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_port)) except IOError, e: pass if not vc: logger.error('Could not connect to vCenter host %s with user %s and specified password' % (vcenter_host, vcenter_username)) return 1 logger.info('Connected to both Nuage & vCenter servers') logger.debug('Registering vCenter disconnect at exit') atexit.register(Disconnect, vc) # Find the correct VM logger.debug('Finding VM %s' % template) template_vm = find_vm(vc, logger, template) if template_vm is None: logger.error('Unable to find VM %s' % template) return 1 logger.info('VM %s found' % template) logger.info('VM is in %s power state' % template_vm.runtime.powerState) if template_vm.runtime.powerState == "poweredOff": logger.info('Powering OFF VM is not needed, moving to deletion directly') else: logger.info('Powering OFF VM. This might take a couple of seconds') power_on_task = template_vm.PowerOffVM_Task() logger.debug('Waiting fo VM to power OFF') run_loop = True while run_loop: info = power_on_task.info if info.state == vim.TaskInfo.State.success: run_loop = False break elif info.state == vim.TaskInfo.State.error: if info.error.fault: logger.info('Power OFF has quit with error: %s' % info.error.fault.faultMessage) else: logger.info('Power OFF has quit with cancelation') run_loop = False break sleep(5) ''' IP ADDRESSES ''' vm = template_vm inputs = {'vm_ip' : '29.203.240.5', 'subnet' : '255.255.255.240', 'gateway' : '29.203.240.1', 'dns' : ['15.163.248.60', '15.163.248.61'], 'domain' : 'ng1labpln.mcloud.entsvcs.net' } adaptermap = vim.vm.customization.AdapterMapping() globalip = vim.vm.customization.GlobalIPSettings() adaptermap.adapter = vim.vm.customization.IPSettings() adaptermap.adapter.ip = vim.vm.customization.FixedIp() adaptermap.adapter.ip.ipAddress = inputs['vm_ip'] adaptermap.adapter.subnetMask = inputs['subnet'] adaptermap.adapter.gateway = inputs['gateway'] globalip.dnsServerList = inputs['dns'] adaptermap.adapter.dnsDomain = inputs['domain'] globalip = vim.vm.customization.GlobalIPSettings() ident = vim.vm.customization.LinuxPrep(domain=inputs['domain'], hostName=vim.vm.customization.FixedName(name=template)) customspec = vim.vm.customization.Specification() #For only one adapter customspec.identity = ident customspec.identity.hostname = "TestESX12_I1_s1" customspec.nicSettingMap = [adaptermap] customspec.globalIPSettings = globalip print "Reconfiguring VM Networks . . ." task = vm.Customize(spec=customspec) print "1" run_loop = True while run_loop: info = task.info print "X" if info.state == vim.TaskInfo.State.success: run_loop = False break elif info.state == vim.TaskInfo.State.error: if info.error.fault: logger.info('Configuring IP on VM failed with: %s' % info.error.fault.faultMessage) else: logger.info('Configuring IP on VM got cancelation') run_loop = False break sleep(5) logger.info('Deleting VM. This might take a couple of seconds') power_on_task = template_vm.Destroy_Task() logger.debug('Waiting fo VM to Deleting') run_loop = True while run_loop: info = power_on_task.info if info.state == vim.TaskInfo.State.success: run_loop = False break elif info.state == vim.TaskInfo.State.error: if info.error.fault: logger.info('Deleting has quit with error: %s' % info.error.fault.faultMessage) else: logger.info('Deleting has quit with cancelation') run_loop = False break sleep(5)
class Vsphere(object): def __init__(self, module): self.changed = False self.module = module self.vsphere_host = module.params.get('host') login_username = module.params.get('login') login_password = module.params.get('password') self.timeout = module.params.get('timeout') check_ssl = bool(module.params.get('checkssl')) ##################################### # # # SSL cert support for pyvmomi 6.0+ # # # ##################################### context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE if not check_ssl: ssl._create_default_https_context = ssl._create_unverified_context try: self.si = SmartConnect(host=self.vsphere_host, user=login_username, pwd=login_password, sslContext=context) #self.si = SmartConnect(host = self.vsphere_host, user = login_username, pwd = login_password) except: self.module.fail_json(msg='Could not connect to host %s' % self.vsphere_host) atexit.register(Disconnect, self.si) def _wait_task(self, task): while (task.info.state != vim.TaskInfo.State.success and task.info.state != vim.TaskInfo.State.error): sleep(2) failed = False if task.info.state == vim.TaskInfo.State.success: out = '"%s" completed successfully.%s' % (task.info.task, ': %s' % task.info.result if task.info.result else '') else: failed = True if task.info.error: out = '%s did not complete successfully: %s' % ( task.info.task, task.info.error.msg) else: out = '%s did not complete successfully: with unknown error, state: %s' % ( task.info.task, task.info.state) return failed, out, task def get_container_view(self, vimtype, name=None, recurse=True, limit=None): """ Get vsphere object(s), if name is not None, return the first object found. limit will change the root directory to search for the vsphere object from. """ if isinstance(limit, dict): limit = self.get_container_view([getattr(vim, limit['type'])], limit['name']) else: limit = self.content.rootFolder container = self.content.viewManager.CreateContainerView( limit, vimtype, recurse) if name is not None: def name_matches(child, name): try: #if child.name == name: # return True #else: # return False return child.name == name except vmodl.fault.ManagedObjectNotFound: # This exception can be thrown (e.g. intermittently when creating multiple VMs in parallel) # because we're trying to get properties of a child which is partway being created or deleted. # Ideally we'd like to check the name in a way which doesn't trigger exceptions, but for now # we just ignore the exception to prevent it propagating and failing the module. return False return [c for c in container.view if name_matches(c, name)][0] #x = [ c for c in container.view if name_matches(c, name)] #if len(x) > 0: #else: # return #for c in container.view: # try: # if c.name == name: # return c # except: # return False #return # # matchfound = name_matches(c, name) # # if matchfound is True: # return c # else: # continue return container.view def update_spec(self, spec): """ Utility function for taking a dictionary object and converting embedded values to vim objects. This function updates the dictianary in place, similar to dict.update For Example: spec = { 'name' : 'vmname', 'numCPUs' : 1, 'files' : { 'VirtualMachineFileInfo' : { 'vmPathName' : '[datastore]vmname' } } } --> ( output from ipython ) {'files': (vim.vm.FileInfo) { dynamicType = <unset>, dynamicProperty = (vmodl.DynamicProperty) [], vmPathName = '[datastore]vmname', snapshotDirectory = <unset>, suspendDirectory = <unset>, logDirectory = <unset> }, 'name': 'vmname', 'numCPUs': 1} """ if not isinstance(spec, dict): return spec for spec_name, spec_value in spec.iteritems(): # Ansible returns numbers as unicode try: spec[spec_name] = int(spec_value) except (ValueError, TypeError): try: spec[spec_name] = float(spec_value) except (ValueError, TypeError): # Ansible returns bool as unicode try: if (spec_value == "True") or (spec_value == "False"): spec[spec_name] = ast.literal_eval(spec_value) except (ValueError, TypeError): pass # recursively update the values if spec_name == 'ManagedObjectReference': try: return self.get_container_view( [getattr(vim, spec_value['type'])], spec_value['name'], limit=spec_value.get('limit', None)) except IndexError: self.module.fail_json( msg='Failed to find %s within %s' % (spec_value['name'], spec_value.get('limit', 'root'))) if isinstance(spec_value, dict): spec[spec_name] = self.update_spec(spec_value) if isinstance(spec_value, list): spec[spec_name] = [self.update_spec(v) for v in spec_value] if hasattr(vim, spec_name) and isinstance(spec_value, dict): try: return getattr(vim, spec_name)(**spec_value) except AttributeError: pass return spec @property def content(self): if not hasattr(self, '_content'): self._content = self.si.RetrieveContent() return self._content @property def datacenter(self): if not hasattr(self, '_datacenter'): dc = self.module.params.get('datacenter', None) if dc is not None: self._datacenter = [ d for d in self.content.rootFolder.childEntity if d.name == dc ][0] else: self._datacenter = self.content.rootFolder.childEntity[0] return self._datacenter ################################################################################ # Guest Power Methods ################################################################################ def start(self, vm): if vm.runtime.powerState == 'poweredOn': return False, dict(changed=False, msg='%s is already powered on.' % vm.name) task = vm.PowerOn() worked, msg, _ = self._wait_task(task) return worked, dict(changed=True, msg=msg) def stop(self, vm): if vm.runtime.powerState == 'poweredOff': return False, dict(msg='%s is already powered off.' % vm.name) task = vm.PowerOff() worked, msg, task = self._wait_task(task) if isinstance(task.info.error, vim.fault.InvalidPowerState): return False, dict(msg='%s is already powered off.' % vm.name) return worked, dict(changed=True, msg=msg) def shutdown(self, vm, force=False): if vm.runtime.powerState == 'poweredOff': return False, dict(changed=False, msg='%s is already powered off.' % vm.name) if vm.guest.toolsRunningStatus != 'guestToolsRunning': if force: return self.stop(vm) else: return True, dict( msg= 'Cannot shutdown %s. Guest Tools are not currently running.' ) try: vm.ShutdownGuest() except vim.fault.InvalidPowerState: return False, dict(changed=False, msg='%s is already powered off.' % vm.name) if self._wait_for_shutdown(vm): return False, dict(changed=True, msg='Successfully shutdown %s' % vm.name) else: if force: return self.stop(vm) return True, dict(msg='Failed to shutdown %s' % vm.name) def _wait_for_shutdown(self, vm): finish_time = time.time() + self.timeout while time.time() < finish_time: if vm.runtime.powerState == 'poweredOff': return True return False ################################################################################ # Guest Snapshot Methods ################################################################################ def _find_snapshot_by_name(self, vm, name): def find_snap(tree, name): if tree.name == name: return tree else: for child in tree.childSnapshotList: s = find_snap(child, name) if s is not None: return s snap = None try: for root_snap in vm.snapshot.rootSnapshotList: snap = find_snap(root_snap, name) if snap is not None: break except (IndexError, AttributeError): snap = None return snap def create_snapshot(self, vm, name, description='', memory=False, quiesce=False): if name is None: return True, dict(msg='The snapshot name needs to be specified') if self._find_snapshot_by_name(vm, name) is not None: return False, dict(changed=False, msg='%s already has a snapshot by the name %s' % (vm.name, name)) task = vm.CreateSnapshot(name=name, description=description, memory=memory, quiesce=quiesce) failed, out, _ = self._wait_task(task) return failed, dict(changed=True, msg=out) def remove_snapshot(self, vm, name, remove_children=False): if name is None: return True, dict(msg='The snapshot name needs to be specified') snap = self._find_snapshot_by_name(vm, name) if snap is None: return False, dict( changed=False, msg='%s does not have a snapshot by the name %s' % (vm.name, name)) task = snap.snapshot.Remove(removeChildren=remove_children) failed, out, _ = self._wait_task(task) return failed, dict(changed=True, msg=out) def revert_snapshot(self, vm, name=None, suppress_power_on=False): if name is None: task = vm.RevertToCurrentSnapshot( suppressPowerOn=suppress_power_on) else: snap = self._find_snapshot_by_name(vm, name) if snap == None: self.module.fail_json( msg='Snapshot named "%s" does not exist' % (name)) task = snap.snapshot.Revert(suppressPowerOn=suppress_power_on) failed, out, _ = self._wait_task(task) return failed, dict(changed=True, msg=out) ################################################################################ # Folder Methods ################################################################################ def create_folder(self, folder): failed = False parentfolder = folder.get('parent', self.datacenter.vmFolder.name) parent = self.get_container_view([vim.Folder], parentfolder, limit={ 'type': 'Datacenter', 'name': self.datacenter.name }) # return failed, dict(changed = False, msg = dir(type(parent)) for fol in parent.childEntity: if fol.name == folder['name']: out = "Folder already exists" return failed, dict(changed=False, msg=out) try: f = parent.CreateFolder(folder['name']) except vim.fault.DuplicateName as e: out = e.msg changed = False except vim.fault as e: failed = True out = e.msg changed = False else: changed = True out = 'Successfully created "%s.%s"' \ % ( parent.name, f.name ) return failed, dict(changed=changed, msg=out) def destroy_folder(self, folder): f = self.get_container_view([vim.Folder], folder['name']) task = f.Destroy() failed, out, _ = self._wait_task(task) return failed, dict(changed=True, msg=out) ################################################################################ # VM Methods ################################################################################ def run_task(self, vm, spec): task_name = spec['type'] spec_value = spec.get('value', {}) self.update_spec(spec_value) task = getattr(vm, task_name)(**spec_value) failed, out, _ = self._wait_task(task) return failed, dict(changed=True, msg=out) def upgrade_tools(self, vm): if vm.guest.toolsVersionStatus2 == 'guestToolsCurrent': return False, dict(changed=False, msg='Tools already current.') else: finish_time = time.time() + self.timeout while time.time() < finish_time: if vm.guest.toolsRunningStatus == 'guestToolsRunning': return self.run_task(vm, {'type': 'UpgradeTools_Task'}) else: continue return True, dict( msg='Guest tools need to be running to upgrade them.') def destroy_vm(self, vm): return self.run_task(vm, {'type': 'Destroy'}) def create_vm(self, guest, spec, devices=None): spec_value = spec.get('value', {}) self.update_spec(spec_value) vm_confspec = getattr(vim, spec.get('type'))(name=guest['name'], **spec_value) pool = guest.get('resource_pool', None) if pool is None: compute = self.get_container_view([vim.ComputeResource], self.module.params.get( 'compute_resource', None)) if isinstance(compute, list): compute = compute[0] rp = compute.resourcePool else: rp = self.get_container_view([vim.ResourcePool], pool) try: foldernm = guest.get('folder', self.datacenter.vmFolder.name) #foldernm = foldernm['name'] folder = self.get_container_view([vim.Folder], name=foldernm, limit={ 'type': 'Datacenter', 'name': self.datacenter.name }) except IndexError: self.module.fail_json(msg='Could not find the folder "%s"' % guest['folder']) task = folder.CreateVm(vm_confspec, rp) failed, out, _ = self._wait_task(task) return failed, dict(changed=True, msg=out) def clone_vm(self, guest, spec, devices=None): spec_value = spec.get('value', {}) self.update_spec(spec_value) vm_clonespec = getattr(vim, spec.get('type'))(**spec_value) parent = guest['clone_from'] try: parent = self.get_container_view([vim.VirtualMachine], guest.get('clone_from')) except IndexError: self.module.fail_json( msg='Could not fine the Virtual Machine to clone from, %' % guest['clone_from']) try: foldernm = guest.get('folder', self.datacenter.vmFolder.name) #foldernm = foldernm['name'] folder = self.get_container_view( [vim.Folder], #guest.get('folder', self.datacenter.vmFolder.name)) name=foldernm) except IndexError: self.module.fail_json(msg='Could not find the folder "%s"' % guest['folder']) task = parent.Clone(folder, guest['name'], vm_clonespec) failed, out, _ = self._wait_task(task) return failed, dict(changed=True, msg=out) def keep_task_alive(self, lease): """ Keeps the lease alive while POSTing the VMDK. """ i = 5 while (True): sleep(5) try: # Choosing arbitrary percentage to keep the lease alive. lease.HttpNfcLeaseProgress(i) i = i + 5 if (lease.state == vim.HttpNfcLease.State.done): return # If the lease is released, we get an exception. # Returning to kill the thread. except: return def deploy_ovf(self, guest, spec, devices=None): # class objectview(object): # def __init__(self, d): # self.__dict__ = d # args = { # 'new_vm':guest['name'], # 'host': self.vsphere_host, # 'vmdk_path':guest['vmdk_file'], # 'ovf_path':guest['ovf_file'], # 'cluster_name':"DBCLUSTER1", # 'datacenter_name': "KPHOME", # 'datastore_name': guest['datastore_name'] # } # deploy_ovf1.main( objectview(args), self.si) # Read the OVF File ovf_file = guest.get('ovf_file') if os.path.exists(ovf_file): with open(ovf_file, 'r') as f: try: ovfd = f.read() f.close() except: msg = "Could not read file: %s" % ovf_file else: msg = "ovf file does not exists at: %s" % ovf_file # Find pool compute = self.get_container_view([vim.ComputeResource], guest['compute_resource']) # Get datastore object. datastore_list = self.datacenter.datastoreFolder.childEntity if guest['datastore_name']: datastore_obj = self.get_container_view([vim.Datastore], guest['datastore_name']) elif len(datastore_list) > 0: datastore_obj = datastore_list[0] else: print "No datastores found in DC (%s)." % self.datacenter.name pool = guest.get('resource_pool', 'Resources') # if pool is None: # compute = self.get_container_view([vim.ComputeResource], self.module.params.get('compute_resource', None)) # if isinstance(compute, list): # compute = compute[0] # rp = compute.resourcePool # else: rp = self.get_container_view([vim.ResourcePool], pool) # get the folder to deploy to folder = self.get_container_view([vim.Folder], name=self.datacenter.vmFolder.name) try: parentfolder = self.module.params.get( 'folder', self.datacenter.vmFolder.name) parentnm = parentfolder['parent'] foldernm = parentfolder['name'] # foldernm = guest.get('folder', self.datacenter.vmFolder.name) parent = self.get_container_view([vim.Folder], name=parentnm) for fol in parent.childEntity: if fol.name == foldernm: folder = fol #guest.get('folder', self.datacenter.vmFolder.name)) except IndexError: self.module.fail_json(msg='Could not find the folder "%s"' % guest['folder']) # OVF Manager manager = self.content.ovfManager spec_params = vim.OvfManager.CreateImportSpecParams( entityName=guest['name'], diskProvisioning=guest['disk_type']) import_spec = manager.CreateImportSpec(ovfd, rp, datastore_obj, spec_params) #task = rp.ImportVApp(import_spec.importSpec, self.datacenter.vmFolder) task = rp.ImportVApp(import_spec.importSpec, folder) while (True): if (task.state == vim.HttpNfcLease.State.ready): # Assuming single VMDK. url = task.info.deviceUrl[0].url.replace( '*', self.vsphere_host) # Spawn a dawmon thread to keep the lease active while POSTing # VMDK. keepalive_thread = Thread(target=self.keep_task_alive, args=(task, )) keepalive_thread.start() # POST the VMDK to the host via curl. Requests library would work # too. curl_cmd = ( "curl -Ss -X POST --insecure -T %s -H 'Content-Type: application/x-vnd.vmware-streamVmdk' %s" % (guest['vmdk_file'], url)) returncode = os.system(curl_cmd) if returncode == 0: task.HttpNfcLeaseComplete() keepalive_thread.join() return False, dict( changed=True, msg="image deploy completed sucessfully") else: task.HttpNfcLeaseAbort() keepalive_thread.join() return True, dict( changed=True, msg="failed to upload the VMDK to ESXi Host, URL = {0}" .format(curl_cmd)) elif (task.state == vim.HttpNfcLease.State.error): raise Exception( "Error connecting to the esxi host to upload vmdk {0}". format(task.error)) # break def rename_vm(self, vm, new_name): if new_name is None: return True, dict(msg='The new vm name needs to be specified') task = vm.Rename(new_name) failed, out, _ = self._wait_task(task) return failed, dict(changed=True, msg=out) ################################################################################ # Guest Operations Methods ################################################################################ def _run_guest_op_command(self, command, spec_value): fail = True finish_time = time.time() + self.timeout while time.time() < finish_time: try: out = command(**spec_value) fail = False break except vim.fault.GuestOperationsUnavailable as e: out = e.msg continue except vim.fault as e: self.module.fail_json(msg=e.msg) return fail, out def guest_operation(self, manager, spec): manager = getattr(self.content.guestOperationsManager, manager) try: command = getattr(manager, spec.get('type')) except AttributeError: self.module.fail_json( msg='Failed to find %s command for manager, %s.' % (spec.get('type', 'NOT GIVEN'), manager)) spec_value = spec.get('value', {}) self.update_spec(spec_value) fail, out = self._run_guest_op_command(command, spec_value) try: encodedOut = json.loads({ 'command': spec.get('type'), 'out': json.dumps(out, cls=VsphereJsonEncoder) }) except TypeError: encodedOut = str(out) return fail, dict(changed=True, guest_operation=encodedOut) def put_file_in_guest(self, local_path, spec): fm = self.content.guestOperationsManager.fileManager size = os.path.getsize(local_path) spec_value = spec.get('value', {}) self.update_spec(spec_value) spec_value.update({'fileSize': size}) command = fm.InitiateFileTransferToGuest fail, out = self._run_guest_op_command(command, spec_value) if fail: self.module.fail_json(msg=out) if re.search(r'http[s]://\*/', out): # Assume that the vsphere host we are connected to # is the host that vsphere is looking for. out = re.sub(r'/\*/', '/%s/' % self.vsphere_host, out) try: import requests except ImportError: self.module.fail_json( msg='Need requests package to put file on guest.') with open(local_path, 'r') as fd: res = requests.put(out, data=fd, verify=False) failed = False if res.status_code == 200 else True return failed, dict( changed=True, msg='Uploaded file to server. Status Code: %d, Text %s' % (res.status_code, res.text)) def add_disk(self, vm, disk_size, disk_type): spec = vim.vm.ConfigSpec() # get all disks on a VM, set unit_number to the next available for dev in vm.config.hardware.device: if hasattr(dev.backing, 'fileName'): unit_number = int(dev.unitNumber) + 1 # unit_number 7 reserved for scsi controller if unit_number == 7: unit_number += 1 if unit_number >= 16: print "we don't support this many disks" return if isinstance(dev, vim.vm.device.VirtualSCSIController): controller = dev # add disk here dev_changes = [] new_disk_kb = int(disk_size) * 1024 * 1024 disk_spec = vim.vm.device.VirtualDeviceSpec() disk_spec.fileOperation = "create" disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add disk_spec.device = vim.vm.device.VirtualDisk() disk_spec.device.backing = \ vim.vm.device.VirtualDisk.FlatVer2BackingInfo() if disk_type == 'thin': disk_spec.device.backing.thinProvisioned = True disk_spec.device.backing.diskMode = 'persistent' disk_spec.device.unitNumber = unit_number disk_spec.device.capacityInKB = new_disk_kb disk_spec.device.controllerKey = controller.key dev_changes.append(disk_spec) spec.deviceChange = dev_changes task = vm.ReconfigVM_Task(spec=spec) failed, out, _ = self._wait_task(task) return failed, dict(changed=True, msg=out) #print "%sGB disk added to %s" % (disk_size, vm.config.name) def get_file_in_guest(self, local_path, spec): fm = self.content.guestOperationsManager.fileManager spec_value = spec.get('value', {}) self.update_spec(spec_value) command = fm.InitiateFileTransferFromGuest fail, out = self._run_guest_op_command(command, spec_value) if fail: self.module.fail_json(msg=out) url = out.url if re.search(r'http[s]://\*/', url): # Assume that the vsphere host we are connected to # is the host that vsphere is looking for. url = re.sub(r'/\*/', '/%s/' % self.vsphere_host, url) try: import requests except ImportError: self.module.fail_json( msg='Need requests package to put file on guest.') res = requests.get(url, stream=True, verify=False) if res.status_code == 200: with open(local_path, 'w') as fd: for chunk in res.iter_content(1024): fd.write(chunk) failed = False msg = 'Successfully downloaded file from guest.' else: failed = True msg = 'Failed to download file from guest.' return failed, dict(changed=True, msg=msg)
content.rootFolder, vimtype, True) for c in container.view: if name and c.name == name: obj = c break container.Destroy() return obj args = get_args() s = ssl.SSLContext( ssl.PROTOCOL_SSLv23) # For VC 6.5/6.0 s=ssl.SSLContext(ssl.PROTOCOL_TLSv1) s.verify_mode = ssl.CERT_NONE si = SmartConnect(host=args.host, user=args.user, pwd=args.password, sslContext=s) content = si.content vm = get_obj(content, [vim.VirtualMachine], args.vmname) if (vm and vm.capability.perVmEvcSupported): print( "VM available in vCenter server and it supports perVm EVC, thats good") else: print("VM either NOT found or perVMEvc is NOT supported on the VM") quit() supported_evc_mode = si.capability.supportedEVCMode for evc_mode in supported_evc_mode: if (evc_mode.key == "intel-ivybridge"): ivy_mask = evc_mode.featureMask
def main(): supportedArgs = [ (["P:", "primary host="], "localhost", "Primary host name", "primaryHost"), (["S:", "secondary host="], "localhost", "Secondary host name", "secondaryHost"), (["T:", "tertiary host="], "", "Third host name for VMotion test", "tertiaryHost"), (["d:", "shared datastore name="], "storage1", "shared datastore name", "dsName"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["e:", "useExisting="], False, "Use existing VM", "useExistingVm"), (["v:", "VM name="], "vmFT", "Name of the virtual machine", "vmname"), (["i:", "numiter="], "1", "Number of iterations", "iter") ] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage")] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Process command line primaryName = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) dsName = args.GetKeyValue("dsName") primaryHost = args.GetKeyValue("primaryHost") secondaryHost = args.GetKeyValue("secondaryHost") tertiaryHost = args.GetKeyValue("tertiaryHost") useExisting = bool(args.GetKeyValue("useExistingVm")) if primaryHost == secondaryHost: secondaryName = "_" + primaryName else: secondaryName = primaryName for i in range(numiter): primaryVm = None primarySi = None secondarySi = None tertiarySi = None try: # Connect to tertiary host, if provided. if tertiaryHost != "": tertiarySi = SmartConnect(host=tertiaryHost, user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) Log("Connected to VMotion test host, VMotion will be tested") if not useExisting: CleanupVm(primaryName) CleanupVm(secondaryName, True) # Connect to primary host primarySi = SmartConnect(host=primaryHost, user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) Log("Connected to Primary host") # Cleanup from previous runs if not useExisting: CleanupVm(primaryName) CleanupVm(secondaryName, True) # Connect to secondary host secondarySi = SmartConnect(host=secondaryHost, user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) Log("Connected to Secondary host") # Cleanup from previous runs if not useExisting: CleanupVm(primaryName) CleanupVm(secondaryName, True) # Create new VM connect.SetSi(primarySi) primaryVm = None if useExisting: primaryVm = folder.Find(primaryName) if primaryVm == None: raise Exception("No primary VM with name " + primaryName + " found!") Log("Using primary VM " + primaryName) else: Log("Creating primary VM " + primaryName) primaryVm = vm.CreateQuickDummy(primaryName, guest = "winXPProGuest", cdrom = 1, numScsiDisks = 1, scrubDisks = True, datastoreName = dsName) # Get details about primary VM primaryUuid = primaryVm.GetConfig().GetInstanceUuid() primaryCfgPath = primaryVm.GetConfig().GetFiles().GetVmPathName() primaryDir = primaryCfgPath[:primaryCfgPath.rfind("/")] Log("Using VM : " + primaryVm.GetName() + " with instanceUuid " + primaryUuid) if useExisting: ftState = Vim.VirtualMachine.FaultToleranceState.running else: ftState = Vim.VirtualMachine.FaultToleranceState.notConfigured CheckFTState(primaryVm, ftState) # Create secondary VM connect.SetSi(secondarySi) if useExisting: secondaryVm = folder.Find(secondaryName) if secondaryVm == None: raise Exception("No secondary VM with name " + secondaryName + " found!") Log("Using secondary VM " + secondaryName) else: Log("Creating secondary VM " + secondaryName) secondaryVm = vm.CreateQuickSecondary(secondaryName, primaryUuid, primaryCfgPath, primaryDir) if secondaryVm == None: raise "Secondary VM creation failed" Log("Created secondary VM " + secondaryVm.GetName()) secondaryUuid = secondaryVm.GetConfig().GetInstanceUuid() secondaryCfgPath = secondaryVm.GetConfig().GetFiles().GetVmPathName() Log("Secondry VM: instanceUuid " + secondaryUuid) Log("Secondary cfg path: " + secondaryCfgPath) primaryFTMgr = host.GetFaultToleranceMgr(primarySi) if not useExisting: ## Configure some additional config variables needed for FT ## This should eventually be done automatically at FT Vmotion time Log("Setting up extra config settings for the primary VM...") connect.SetSi(primarySi) extraCfgs = primaryVm.GetConfig().GetExtraConfig() AddExtraConfig(extraCfgs, "replay.allowBTOnly", "TRUE") AddExtraConfig(extraCfgs, "replay.allowFT", "TRUE") if primaryName != secondaryName: AddExtraConfig(extraCfgs, "ft.allowUniqueName", "TRUE") cSpec = Vim.Vm.ConfigSpec() cSpec.SetExtraConfig(extraCfgs) task = primaryVm.Reconfigure(cSpec) WaitForTask(task) # Register secondary VM Log("Register secondary VM with the powered off primary") task = primaryFTMgr.RegisterSecondary(primaryVm, secondaryUuid, secondaryCfgPath) WaitForTask(task) Log("Secondary VM registered successfully") # Verify FT state CheckFTState(primaryVm, Vim.VirtualMachine.FaultToleranceState.enabled) Log("FT configured successfully.") # PowerOn FT VM Log("Powering on Primary VM") vm.PowerOn(primaryVm) # Perform FT VMotion to setup protection Log("Migrating state from primary to secondary VM.") vm.Migrate(primaryVm, primarySi, secondarySi, secondaryCfgPath, True, Vim.Host.VMotionManager.VMotionType.fault_tolerance) CheckFTState(primaryVm, Vim.VirtualMachine.FaultToleranceState.running) CheckFTState(secondaryVm, Vim.VirtualMachine.FaultToleranceState.running, secondarySi, False) Log("VMs are running with FT protection.") # Test VM component health exchanges secondaryFTMgr = host.GetFaultToleranceMgr(secondarySi) health = Vim.Host.FaultToleranceManager.ComponentHealthInfo() health.SetIsStorageHealthy(True) health.SetIsNetworkHealthy(True) Log("Testing VM health exchange from primary to secondary: all healthy") TestComponentHealthInfo(primaryFTMgr, secondaryFTMgr, primaryVm, secondaryVm, health) Log("Testing VM health exchange from primary to secondary: storage unhealthy") health.SetIsStorageHealthy(False) TestComponentHealthInfo(primaryFTMgr, secondaryFTMgr, primaryVm, secondaryVm, health) Log("Testing VM health exchange from secondary to primary: network unhealthy") health.SetIsStorageHealthy(True) health.SetIsNetworkHealthy(False) TestComponentHealthInfo(secondaryFTMgr, primaryFTMgr, secondaryVm, primaryVm, health) # Making peer go live Log("Making FT primary go live from secondary") secondaryFTMgr.GoLivePeerVM(secondaryVm) time.sleep(5) CheckFTState(primaryVm, Vim.VirtualMachine.FaultToleranceState.needSecondary, primarySi) WaitForPowerState(secondaryVm, secondarySi, Vim.VirtualMachine.PowerState.poweredOff) # Set local VM storage and network health Log("Setting primary VM as unhealthy") health.SetIsStorageHealthy(False) health.SetIsNetworkHealthy(False) primaryFTMgr.SetLocalVMComponentHealth(primaryVm, health) # Restart secondary VM. It should still show storage unhealthy Log("Restarting secondary VM.") vm.Migrate(primaryVm, primarySi, secondarySi, secondaryCfgPath, True, Vim.Host.VMotionManager.VMotionType.fault_tolerance) CheckFTState(primaryVm, Vim.VirtualMachine.FaultToleranceState.running) CheckFTState(secondaryVm, Vim.VirtualMachine.FaultToleranceState.running, secondarySi, False) Log("VMs are running with FT protection.") # Verify health has propagated to the new secondary health2 = secondaryFTMgr.GetPeerVMComponentHealth(secondaryVm) if health.isStorageHealthy != health2.isStorageHealthy or \ health.isNetworkHealthy != health2.isNetworkHealthy: Log("Got peer health information : " + str(health2)) raise Exception("Peer health information does not match") # Test VMotion of primary and secondary, if a third host is given if tertiarySi != None: # Mark secondary as unhealthy health.SetIsStorageHealthy(True) health.SetIsNetworkHealthy(False) secondaryFTMgr.SetLocalVMComponentHealth(secondaryVm, health) Log("VMotion primary to tertiary host") vm.Migrate(primaryVm, primarySi, tertiarySi, primaryCfgPath, False) primaryFTMgr = host.GetFaultToleranceMgr(tertiarySi) connect.SetSi(tertiarySi) primaryVm = folder.Find(primaryName) CheckFTState(primaryVm, Vim.VirtualMachine.FaultToleranceState.running, tertiarySi) CheckFTState(secondaryVm, Vim.VirtualMachine.FaultToleranceState.running, secondarySi, False) # Verify secondary health has propagated to primary on new host health2 = primaryFTMgr.GetPeerVMComponentHealth(primaryVm) if health.isStorageHealthy != health2.isStorageHealthy or \ health.isNetworkHealthy != health2.isNetworkHealthy: Log("Got peer health information : " + str(health2)) raise Exception("Peer health information does not match") # Mark primary as unhealthy health.SetIsStorageHealthy(False) health.SetIsNetworkHealthy(True) primaryFTMgr.SetLocalVMComponentHealth(primaryVm, health) Log("VMotion secondary to tertiary host") vm.Migrate(secondaryVm, secondarySi, tertiarySi, secondaryCfgPath, False) secondaryFTMgr = host.GetFaultToleranceMgr(tertiarySi) connect.SetSi(tertiarySi) secondaryVm = folder.Find(secondaryName) CheckFTState(primaryVm, Vim.VirtualMachine.FaultToleranceState.running, tertiarySi) CheckFTState(secondaryVm, Vim.VirtualMachine.FaultToleranceState.running, tertiarySi, False) # Verify primary health has propagated to secondary on new host health2 = secondaryFTMgr.GetPeerVMComponentHealth(secondaryVm) if health.isStorageHealthy != health2.isStorageHealthy or \ health.isNetworkHealthy != health2.isNetworkHealthy: Log("Got peer health information : " + str(health2)) raise Exception("Peer health information does not match") Log("Power off Primary VM") connect.SetSi(tertiarySi) vm.PowerOff(primaryVm) WaitForPowerState(primaryVm, tertiarySi, Vim.VirtualMachine.PowerState.poweredOff) WaitForPowerState(secondaryVm, primarySi, Vim.VirtualMachine.PowerState.poweredOff) if not useExisting: Log("Cleaning up VMs") CleanupVm(primaryName) CleanupVm(secondaryName, True) else: # Skipped VMotion test Log("Power off Primary VM") connect.SetSi(primarySi) vm.PowerOff(primaryVm) WaitForPowerState(primaryVm, primarySi, Vim.VirtualMachine.PowerState.poweredOff) WaitForPowerState(secondaryVm, secondarySi, Vim.VirtualMachine.PowerState.poweredOff) if not useExisting: Log("Cleaning up VMs") CleanupVm(primaryName) connect.SetSi(secondarySi) CleanupVm(secondaryName, True) except Exception as e: Log("Caught exception : " + str(e)) global status status = "FAIL" Disconnect(primarySi) Disconnect(secondarySi) return Disconnect(primarySi) Disconnect(secondarySi)
from tools import cluster from tools import datacenter from tools import cli PARSER = cli.build_arg_parser() PARSER.add_argument("-n", "--dcname", required=True, action="store", help="Name of the Datacenter to create.") PARSER.add_argument("-c", "--cname", required=True, action="store", help="Name to give the cluster to be created.") MY_ARGS = PARSER.parse_args() cli.prompt_for_password(MY_ARGS) context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_NONE SI = SmartConnect(host=MY_ARGS.host, user=MY_ARGS.user, pwd=MY_ARGS.password, port=int(MY_ARGS.port), sslContext=context) atexit.register(Disconnect, SI) dc = datacenter.create_datacenter(dcname=MY_ARGS.dcname, service_instance=SI) cluster.create_cluster(datacenter=dc, name=MY_ARGS.cname)
def main(): """ Clone a VM or template into multiple VMs with logical names with numbers and allow for post-processing """ # Handling arguments args = get_args() ipv6 = args.ipv6 amount = args.amount[0] basename = args.basename[0] count = args.count[0] debug = args.debug folder_name = None if args.folder: folder_name = args.folder[0] host = args.host[0] print_ips = args.ips print_macs = args.macs log_file= None if args.logfile: log_file = args.logfile[0] port = args.port[0] post_script = None if args.post_script: post_script = args.post_script[0] password = None if args.password: password = args.password[0] power_on= not args.nopoweron resource_pool_name = None if args.resource_pool: resource_pool_name = args.resource_pool[0] template = args.template[0] threads = args.threads[0] username = args.username[0] verbose = args.verbose maxwait = args.maxwait[0] # Logging settings if debug: log_level = logging.DEBUG elif verbose: log_level = logging.INFO else: log_level = logging.WARNING if log_file: logging.basicConfig(filename=log_file,format='%(asctime)s %(levelname)s %(message)s',level=log_level) else: logging.basicConfig(filename=log_file,format='%(asctime)s %(levelname)s %(message)s',level=log_level) logger = logging.getLogger(__name__) # Getting user password if password is None: logger.debug('No command line password received, requesting password from user') password = getpass.getpass(prompt='Enter password for vCenter %s for user %s: ' % (host,username)) try: si = None try: logger.info('Connecting to server %s:%s with username %s' % (host,port,username)) si = SmartConnect(host=host,user=username,pwd=password,port=int(port)) except IOError, e: pass if not si: logger.error('Could not connect to host %s with user %s and specified password' % (host,username)) return 1 logger.debug('Registering disconnect at exit') atexit.register(Disconnect, si) # Find the correct VM logger.debug('Finding template %s' % template) template_vm = find_vm(si,logger,template,False) if template_vm is None: logger.error('Unable to find template %s' % template) return 1 logger.info('Template %s found' % template) # Find the correct Resource Pool resource_pool = None if resource_pool_name is not None: logger.debug('Finding resource pool %s' % resource_pool_name) resource_pool = find_resource_pool(si,logger,resource_pool_name) if resource_pool is None: logger.error('Unable to find resource pool %s' % resource_pool_name) return 1 logger.info('Resource pool %s found' % resource_pool_name) # Find the correct folder folder = None if folder_name is not None: logger.debug('Finding folder %s' % folder_name) folder = find_folder(si,logger,folder_name) if folder is None: logger.error('Unable to find folder %s' % folder_name) return 1 logger.info('Folder %s found' % folder_name) else: logger.info('Setting folder to template folder as default') folder = template_vm.parent # Creating necessary specs logger.debug('Creating relocate spec') if resource_pool is not None: logger.debug('Resource pool found, using') relocate_spec = vim.vm.RelocateSpec(pool=resource_pool) else: logger.debug('No resource pool found, continuing without it') relocate_spec = vim.vm.RelocateSpec() logger.debug('Creating clone spec') clone_spec = vim.vm.CloneSpec(powerOn=power_on,template=False,location=relocate_spec) # Pool handling logger.debug('Setting up pools and threads') pool = ThreadPool(threads) mac_ip_pool = ThreadPool(threads) mac_ip_pool_results = [] logger.debug('Pools created with %s threads' % threads) # Generate VM names logger.debug('Creating thread specifications') vm_specs = [] vm_names = [] for a in range(1,amount+1): vm_names.append('%s-%i' % (basename,count)) count += 1 vm_names.sort() for vm_name in vm_names: vm_specs.append((si,logger,vm_name,clone_spec,folder,ipv6,maxwait,post_script,power_on,print_ips,print_macs,template,template_vm,mac_ip_pool,mac_ip_pool_results)) logger.debug('Running virtual machine clone pool') pool.map(vm_clone_handler_wrapper,vm_specs) logger.debug('Closing virtual machine clone pool') pool.close() pool.join() logger.debug('Waiting for all mac, ip and post-script processes') for running_task in mac_ip_pool_results: running_task.wait() logger.debug('Closing mac, ip and post-script processes') mac_ip_pool.close() mac_ip_pool.join()
class VMware(object): def __init__(self, host, port, user, password): try: self.session = SmartConnect(host=host, user=user, pwd=password, port=int(port)) except IOError: raise VMwareException( 'Unable to create vCenter session {}:{}@{}'.format( host, port, user)) atexit.register(Disconnect, self.session) self.content = self.session.RetrieveContent() # obj_names could be used instead of obj_filter # if obj_filter provided, obj_names will be ignored def get_obj(self, prop_names=None, obj_type=vim.VirtualMachine, obj_names=None, obj_filter=None, only_one=True, root=None): # Setup defaults if prop_names is None: prop_names = () if root is None: root = self.content.rootFolder # Setup filter if not obj_filter and obj_names: obj_filter = lambda props: props['name'] in obj_names # Starting point obj_spec = vmodl.query.PropertyCollector.ObjectSpec() obj_spec.obj = self.content.viewManager.CreateContainerView( root, [ obj_type, ], True) obj_spec.skip = True # Define path for search traversal_spec = vmodl.query.PropertyCollector.TraversalSpec() traversal_spec.type = obj_spec.obj.__class__ traversal_spec.name = 'traversing' traversal_spec.path = 'view' traversal_spec.skip = False obj_spec.selectSet = [traversal_spec] # Identify the properties to the retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec() property_spec.type = obj_type property_spec.all = False property_spec.pathSet = list(set(tuple(prop_names) + ('name', ))) # Create filter specification filter_spec = vmodl.query.PropertyCollector.FilterSpec() filter_spec.objectSet = [obj_spec] filter_spec.propSet = [property_spec] # Retrieve properties collector = self.session.content.propertyCollector objs = collector.RetrieveContents([filter_spec]) # Filter objects objs_and_props = [] for obj in objs: # Compile propeties properties = {prop.name: prop.val for prop in obj.propSet} # If it fails filter, skip if obj_filter and not obj_filter(properties): continue # Return this one with obj properties['obj'] = obj.obj objs_and_props.append(properties) # If we only need one object, return first if only_one: if len(objs_and_props) == 1: return objs_and_props[0] elif len(objs_and_props) > 1: raise VMwareException( 'Found multiple {} objects that match filter'.format( obj_type)) else: raise VMwareException( 'Could not find {} object that matches filter'.format( obj_type)) # If we are okay with more, then return whole else: if len(objs_and_props): return objs_and_props else: raise VMwareException( 'Unable to find {} objects that match filter'.format( obj_type)) def get_folder(self, path): current_folder = None for f in path.split('/'): current_folder = self.get_obj(obj_type=vim.Folder, obj_names=(f, ), root=current_folder)['obj'] return current_folder def get_portgroup(self, vlan, host): def vlan_host_filter(props): # If host is in this portgroup's collection of host, grab it if the vlan matches if host in (h.name for h in props['host']): return (props['config.defaultPortConfig'].vlan.vlanId == vlan) # Fallback to false return False return self.get_obj(prop_names=('host', 'config.defaultPortConfig'), obj_type=vim.dvs.DistributedVirtualPortgroup, obj_filter=vlan_host_filter)['obj'] def new_start_task(self, task, task_tag=''): if task_tag: task_tag = '[{}] '.format(task_tag) task.wait( queued=lambda t: sys.stdout.write('{}Queued...\n'.format(task_tag) ), running=lambda t: sys.stdout.write('{}Running...\n'.format(task_tag )), success=lambda t: sys.stdout.write('{}Completed.\n\n'.format( task_tag)), error=lambda t: sys.stdout.write('{}Error!\n\n'.format(task_tag))) return True def start_task(self, task, success_msg, task_tag='', hint_msg=None, last_task=True): pre_result = '\n' if task_tag: task_tag = '[{}] '.format(task_tag) pre_result = '' try: task.wait(queued=lambda t: sys.stdout.write('{}Queued...\n'.format( task_tag)), running=lambda t: sys.stdout.write('{}Running...\n'. format(task_tag)), success=lambda t: sys.stdout.write('{}{}{}\n'.format( pre_result, task_tag, success_msg)), error=lambda t: sys.stdout.write('{}{}Error!\n'.format( pre_result, task_tag))) except Exception as e: print '\nException: {}'.format(e.msg) if hint_msg: print 'Hint: {}'.format(hint_msg) return False return True def set_compute(self, vm_name, memory_mb, cpu_count): vm_obj = self.get_obj(obj_names=(vm_name, ))['obj'] spec = vim.vm.ConfigSpec() spec.memoryMB = long(memory_mb) spec.numCPUs = int(cpu_count) self.new_start_task(vm_obj.ReconfigVM_Task(spec=spec)) def get_compute(self, vm_name): resource_props = ('config.hardware.memoryMB', 'config.hardware.numCPU') vm = self.get_obj(obj_names=(vm_name, ), prop_names=resource_props) return (vm[r] for r in resource_props) def get_disks(self, vm_name): vm = self.get_obj(obj_names=(vm_name, ), prop_names=('config.hardware.device', )) print('Disks') print('-----') disk_index = 0 for device in vm['config.hardware.device']: if type(device) == vim.vm.device.VirtualDisk: disk_index += 1 thin_prov = ' (thin)' if device.backing.thinProvisioned else '' size_gb = device.capacityInKB / (1024 * 1024) datastore = device.backing.fileName print '{}) {} - {:.1f}GB{}'.format(disk_index, datastore, size_gb, thin_prov) def human_readable_b(self, bytes): for item in ['bytes', 'KB', 'MB', 'GB']: if bytes < 1024.0: return "%3.1f%s" % (bytes, item) bytes /= 1024.0 return "%3.1f%s" % (bytes, 'TB') def get_cluster_datastores(self, cluster, ds_prefix): cluster_datastores = self.get_obj(obj_names=(cluster, ), prop_names=('datastore', ), obj_type=vim.ClusterComputeResource, only_one=True) for ds in cluster_datastores['datastore']: if not ds.summary.name.startswith(ds_prefix): continue space_free = self.human_readable_b(ds.summary.freeSpace or 0) space_total = self.human_readable_b(ds.summary.capacity) space_overprov = (ds.summary.uncommitted or 0) - (ds.summary.freeSpace or 0) percent_prov = ((ds.summary.capacity + space_overprov) / float(ds.summary.capacity)) * 100 print '======================' print 'Name: {}'.format(ds.summary.name) print 'Space free: {}/{}'.format(space_free, space_total) print 'Space provisioned: {:.2f}%'.format(percent_prov) print 'VM count: {}'.format(len(ds.vm)) def summarize_cluster_datastores(self, cluster, ds_prefix): cluster_datastores = self.get_obj(obj_names=(cluster, ), prop_names=('datastore', ), obj_type=vim.ClusterComputeResource, only_one=True) ds_prefixes = [] for ds in cluster_datastores['datastore']: # Skip anything not matching prefix if not ds.summary.name.startswith(ds_prefix): print ds.summary.name continue # Strip numbers dashes and undercores from end this_prefix = ds.summary.name.rstrip('._-1234567890') if this_prefix not in ds_prefixes: ds_prefixes.append(this_prefix) if len(ds_prefixes) == 0: print 'No prefixes detected' else: print 'Datastore prefixes detected:' for ds in sorted(ds_prefixes): print '- {}'.format(ds) def choose_a_datastore(self, cluster, ds_prefix, prov_limit, vm_limit): cluster_datastores = self.get_obj(obj_names=(cluster, ), prop_names=('datastore', ), obj_type=vim.ClusterComputeResource, only_one=True) acceptable_ds = [] for ds in cluster_datastores['datastore']: # Get name and check if it has prefix ds_name = ds.summary.name if not ds_name.startswith(ds_prefix): continue # Get VM count and only continue if under limit vm_count = len(ds.vm) if vm_count >= vm_limit: continue # Getting free space now since used for later calc free_space = ds.summary.freeSpace or 0 # Calculate provision percentage prov_space = (ds.summary.uncommitted or 0) - free_space prov_perc = ((ds.summary.capacity + prov_space) / float(ds.summary.capacity)) * 100 if prov_perc >= prov_limit: continue # ASSERT: meets limits acceptable_ds.append({ 'name': ds_name, 'vm_count': vm_count, 'free_space': free_space, 'prov_perc': prov_perc, }) # Check if we found a datastore if not len(acceptable_ds): raise VMwareException( 'Could not find a {} datastore with {} perfix that has less than {} VMs and is under {:.0f}% provisioned ' .format(cluster, ds_prefix, vm_limit, prov_limit)) # Get DS with highest free space highest_free = sorted(acceptable_ds, key=lambda ds: ds['free_space'])[0]['name'] return highest_free def verify_datastore(self, cluster, ds): pass def get_status(self, vm_name): vm = self.get_obj(obj_names=(vm_name, ), prop_names=( 'config.hardware.numCPU', 'config.hardware.memoryMB', 'guest.guestState', 'guest.toolsStatus', 'guest.ipAddress', 'guest.hostName', 'config.guestFullName', 'config.version', 'runtime.bootTime', 'runtime.powerState', )) print 'Hostname : {}'.format(vm['guest.hostName']) print 'OS : {}'.format(vm['config.guestFullName']) print 'IP Address : {}'.format(vm['guest.ipAddress']) print 'CPU Count : {}'.format(vm['config.hardware.numCPU']) print 'Memory : {:.1f}GB'.format(vm['config.hardware.memoryMB'] / 1024) print 'Power State : {}'.format(vm['runtime.powerState']) print 'VM Status : {}'.format(vm['guest.guestState']) print 'Guest Tools : {}'.format(vm['guest.toolsStatus']) print 'VM Version : {}'.format(vm['config.version']) if 'runtime.bootTime' in vm: boot_time = vm['runtime.bootTime'].astimezone( get_localzone()).strftime('%m/%d/%Y %H:%M') print 'Last Boot : {}'.format(boot_time) def destroy(self, vm_name): vm = self.get_obj(obj_names=(vm_name, ), prop_names=('runtime.powerState', )) # If VM on, turn off if vm['runtime.powerState'] == vim.VirtualMachinePowerState.poweredOn: print 'Turning off VM before deleting...' self.start_task( vm['obj'].PowerOff(), success_msg='VM turned off, deleting from disk now...') self.start_task(vm['obj'].Destroy(), success_msg='VM {} has been destroyed'.format(vm_name)) def clone(self, template_name, vm_name, cpus, memory, datacenter_name, cluster_name, ds_name, ip, domain, dns, vlan, subnet, gateway, folder_path=None, *args, **kwargs): # Find objects datacenter = self.get_obj(obj_type=vim.Datacenter, obj_names=(datacenter_name)) cluster = self.get_obj(obj_type=vim.ClusterComputeResource, obj_names=(cluster_name)) datastore = self.get_obj(obj_type=vim.Datastore, obj_names=(ds_name, )) template_vm = self.get_obj(obj_names=(template_name, )) # Get folder, defaults to datacenter if folder_path: folder = self.get_folder(folder_path) else: folder = datacenter['obj'].vmFolder # Default objects resource_pool = cluster['obj'].resourcePool # Relocation specs relospec = vim.vm.RelocateSpec() relospec.datastore = datastore['obj'] relospec.pool = resource_pool # Configuration specs vmconf = vim.vm.ConfigSpec() vmconf.numCPUs = cpus vmconf.memoryMB = memory * 1024 vmconf.cpuHotAddEnabled = True vmconf.memoryHotAddEnabled = True # NIC mapping nic_map = vim.vm.customization.AdapterMapping() nic_map.adapter = vim.vm.customization.IPSettings() nic_map.adapter.ip = vim.vm.customization.FixedIp() nic_map.adapter.ip.ipAddress = ip nic_map.adapter.subnetMask = subnet nic_map.adapter.gateway = gateway nic_map.adapter.dnsDomain = domain # Global networking global_ip = vim.vm.customization.GlobalIPSettings() global_ip.dnsServerList = dns global_ip.dnsSuffixList = domain # Identity settings ident = vim.vm.customization.LinuxPrep() ident.domain = domain ident.hostName = vim.vm.customization.FixedName() ident.hostName.name = vm_name # Customization specs customspec = vim.vm.customization.Specification() customspec.nicSettingMap = [nic_map] customspec.globalIPSettings = global_ip customspec.identity = ident # Clone specs clonespec = vim.vm.CloneSpec() clonespec.location = relospec clonespec.config = vmconf clonespec.customization = customspec clonespec.powerOn = True clonespec.template = False # Create task task = template_vm['obj'].Clone(folder=folder, name=vm_name, spec=clonespec) result = self.start_task( task, task_tag='Cloning', success_msg='Cloned in folder {}'.format(folder_path), last_task=False) # Do not continue if we didn't get clone if not result: return # Change networking vm_props = ( 'runtime.host', 'config.hardware.device', ) vm_obj = self.get_obj(obj_names=(vm_name, ), prop_names=vm_props) vmconf = vim.vm.ConfigSpec() # Get right network network = self.get_portgroup(vlan, vm_obj['runtime.host'].name) # Modify NIC card nic = vim.vm.device.VirtualDeviceSpec() nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit for device in vm_obj['config.hardware.device']: if isinstance(device, vim.vm.device.VirtualEthernetCard): nic.device = device break nic.device.wakeOnLanEnabled = True portgroup_connection = vim.dvs.PortConnection() portgroup_connection.portgroupKey = network.key portgroup_connection.switchUuid = network.config.distributedVirtualSwitch.uuid nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo( ) nic.device.backing.port = portgroup_connection nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() nic.device.connectable.startConnected = True nic.device.connectable.allowGuestControl = True vmconf.deviceChange = [ nic, ] # Start task task = vm_obj['obj'].ReconfigVM_Task(vmconf) self.start_task(task, task_tag='Networking', success_msg='VIF reconfigured')
class CloudCollectorVMWareVSphere(CloudCollector): def __init__(self, name, config, defaults, options): super().__init__(name, config, defaults, options) self.client = None self.maxDepth = 10 self.vm2cluster = {} self.vm2dvsPort = {} self.vm2dvsPortKey = {} def _login(self): host = self.config['host'] port = self.config.get('port', 443) user = self.config['user'] passwd = self.config['pass'] context = None if not self.verify_ssl: if hasattr(ssl, '_create_unverified_context'): context = ssl._create_unverified_context() logging.info("logging in host={}, user={}".format(host, user)) try: self.client = SmartConnect(host=host, user=user, pwd=passwd, port=port, sslContext=context) except: logging.error("failed to log in to host={}".format(host)) return False if not self.client: return False logging.info("logged in") return True def _fetch(self, collect): res = [] self.content = self.client.RetrieveContent() # collect networks (DistributedVirtualPortgroup) logging.info("collecting networks") self.networks = {} dvs_portgroup = None for child in self.content.rootFolder.childEntity: if hasattr(child, 'network'): for net in child.network: if isinstance(net, vim.DistributedVirtualPortgroup): logging.debug("new network key={} name={}".format( net.key, net.name)) self.networks[net.key] = net.name dvp_net = net elif isinstance(net, vim.Network): logging.debug("new network name={}".format(net.name)) self.networks[net.name] = net.name else: logging.error("bad network name={} type={}".format( net.name, net)) # TODO: handle this ! # fetch ports try: ports = net.config.distributedVirtualSwitch.FetchDVPorts() for port in ports: if port.connectee and port.connectee.connectedEntity and port.state: vmid = port.connectee.connectedEntity._moId state = port.state.runtimeInfo if not vmid in self.vm2dvsPort: self.vm2dvsPort[vmid] = [] self.vm2dvsPortKey[vmid] = [] try: if port.key not in self.vm2dvsPortKey[vmid]: self.vm2dvsPortKey[vmid].append(port.key) rec = { "portKey": port.key, "nicKey": port.connectee.nicKey, "connected": state.linkUp, "vlan": state.vlanIds[0].start, #"vlanRange": [ state.vlanIds.start, state.vlanIds.end ], "mac": state.macAddress, "portgroup": port.portgroupKey, "network": self.networks[port.portgroupKey], "dvsUUID": port.dvsUuid, } try: rec["idx"] = int( re.split('\.(eth|vmnic)', state.linkPeer, 1)[1]) rec["ethName"] = state.linkPeer.split( '.').pop() except: pass self.vm2dvsPort[vmid].append(rec) except: pass # sort by idx for vmid in self.vm2dvsPort: self.vm2dvsPort[vmid] = sorted(self.vm2dvsPort[vmid], key=lambda kv: kv.get('idx', 0)) except: pass #pprint(self.vm2dvsPort) # collect hosts logging.info("collecting clusters") for child in self.content.rootFolder.childEntity: if hasattr(child, 'hostFolder'): for cluster in child.hostFolder.childEntity: if isinstance(cluster, vim.ComputeResource): recs = self.__process_cluster(cluster) if recs: res.extend(recs) if TEST: break else: logging.error("bad compute resource name={}".format( cluster.name)) # collect VMs logging.info("collecting VApps and VMs") for child in self.content.rootFolder.childEntity: #print("child = {}".format(child.name)) if hasattr(child, 'vmFolder'): datacenter = child vmFolder = datacenter.vmFolder with concurrent.futures.ThreadPoolExecutor( max_workers=self.options["tasks"] or 1) as executor: futures = [] for vm in vmFolder.childEntity: futures.append( executor.submit(self.__process_vmchild, vm)) for future in concurrent.futures.as_completed(futures): recs = future.result() if recs: res.extend(recs) return res def __process_cluster(self, cluster): name = cluster.name logging.debug("new cluster name={}".format(name)) # collect virtual machines obj_view = self.content.viewManager.CreateContainerView( cluster, [vim.VirtualMachine], True) vm_list = obj_view.view obj_view.Destroy() for vm in vm_list: self.vm2cluster[vm._moId] = name # collect cluster data cs = cluster.summary rec = { "name": name, "id": cluster._moId, "cpus": cs.numCpuCores, "threads": cs.numCpuThreads, "hosts": cs.numHosts, "memory": cs.totalMemory // (1024 * 1024) } res = [] res.append( self.new_record( 'cluster', { "name": rec["name"], "id": rec["id"], "cpus": rec["cpus"], "memory": rec["memory"] }, rec)) logging.info("collecting cluster hosts") for host in cluster.host: res.extend(self.__process_host(host)) if TEST: break return res def __process_host(self, host): # ignore host not connected if host.runtime.connectionState != 'connected': return [] name = host.name logging.debug("new server name={}".format(name)) hs = host.summary hp = hs.config.product hcpu = host.hardware.cpuPkg[0] rec = { "name": name, "id": host._moId, "management_ip": hs.managementServerIp, "primary_ip": None, "memory": hs.hardware.memorySize // (1024 * 1024), "cpus": hs.hardware.numCpuCores, "cpu_vendor": hcpu.vendor, "cpu_description": hcpu.description, "threads": hs.hardware.numCpuThreads, "nics": hs.hardware.numNics, "UUID": hs.hardware.uuid, "hw_vendor": hs.hardware.vendor, "hw_model": hs.hardware.model, "is_maintanance": hs.runtime.inMaintenanceMode, "status": hs.runtime.powerState, "is_on": (hs.runtime.powerState == "poweredOn" and 1 or 0), "os": hp.fullName, "sw_license": hp.licenseProductName, "os_type": hp.osType, "sw_name": hp.name, "sw_vendor": hp.vendor, "sw_version": hp.version, "cluster": host.parent.name, "storage": None, "networks": None, "storages": None } # storage storage = 0 storage_free = 0 datastore = [] for ds in host.datastore: storage += ds.summary.capacity storage_free += ds.summary.freeSpace info = { "name": ds.summary.name, "capacity": ds.summary.capacity // (1024 * 1024), "free": ds.summary.freeSpace // (1024 * 1024), "ssd": None } if hasattr(ds.info, 'vmfs'): info["ssd"] = ds.info.vmfs.ssd datastore.append(info) if storage > 0: rec["storage"] = storage // (1024 * 1024) rec["storages"] = datastore # networks ( networks = [] netdevinfo = {} for nic in host.config.network.vnic: netdevinfo[nic.spec.mac] = { "ip": nic.spec.ip.ipAddress or None, "subnet": nic.spec.ip.subnetMask or None } for nic in host.config.network.pnic: net = { "name": nic.device, "mac": nic.mac, "ip": netdevinfo.get(nic.mac, {}).get("ip") or (nic.spec.ip.ipAddress or None), "subnet": netdevinfo.get(nic.mac, {}).get("subnet") } if not rec["primary_ip"]: rec["primary_ip"] = net["ip"] if net["ip"] and net["ip"] == rec["primary_ip"]: net["primary"] = True networks.append(net) if len(networks) > 0: rec["networks"] = networks res = [] res.append( self.new_record( "server", { "name": rec["name"], "project": rec["cluster"], "id": rec["id"], "cpus": int(rec.get("cpus") or 0), "memory": int(rec.get("memory") or 0), "storage": int(rec.get("storage") or 0), "storages": rec.get("storages"), "primary_ip": rec["primary_ip"], "management_ip": rec["management_ip"], "networks": rec["networks"], "os": rec["os"], "status": rec["status"], "is_on": rec["is_on"], }, rec)) return res def __process_vmchild(self, child, depth=1, prefix=None): res = [] if isinstance(child, vim.Folder) or isinstance(child, vim.VirtualApp): if prefix == None: prefix = child.name else: prefix = prefix + '.' + child.name res.extend(self.__process_vapp(child, prefix)) #print("vmchild = {}/{}".format(child.name, type(child).__name__)) # if this is a group it will have children. if it does, recurse into them # and then return if hasattr(child, 'childEntity'): if depth > self.maxDepth: return [] for c in child.childEntity: res.extend(self.__process_vmchild(c, depth + 1, prefix)) if TEST: break return res # if this is a vApp, it likely contains child VMs # (vApps can nest vApps, but it is hardly a common usecase, so ignore that) if isinstance(child, vim.VirtualApp): vmList = vm.vm res.extend(self.__process_vapp(child, prefix)) for c in vmList: res.extend(self.__process_vmchild(c, depth + 1, prefix)) if TEST: break break return res if isinstance(child, vim.VirtualMachine): res.extend(self.__process_vm(child, prefix)) return res def __process_vapp(self, vapp, name): logging.debug("new vapp name={}".format(name)) vs = None if isinstance(vapp, vim.VirtualApp): vs = vapp.summary rec = { "name": name, "id": vapp._moId, "memory": (vs and vs.configuredMemoryMB or 0) } res = [] res.append( self.new_record( 'vapp', { "name": rec["name"], "id": rec["id"], "memory": (rec.get("memory") or 0) }, rec)) return res def __process_vm(self, vm, parent): name = vm.name logging.debug("new vm name={}".format(name)) vs = vm.summary vs_runtime_host_name = None vs_runtime_cluster_name = None try: vs_runtime_host_name = vm.runtime.host.summary.name vs_runtime_cluster_name = vs.runtime.host.parent.name except: pass rec = { "name": vm.name, "config_name": vs.config.name, "os": vs.config.guestFullName, "guest_id": vs.config.guestId, "description": vs.config.annotation, "cpus": vm.config.hardware.numCPU, "memory": vm.config.hardware.memoryMB, "disks": vs.config.numVirtualDisks, "id": vm._moId, "instanceUUID": vs.config.instanceUuid, "UUID": vs.config.uuid, "storage": None, "primary_ip": ((vs.guest.ipAddress and ":" not in vs.guest.ipAddress) and vs.guest.ipAddress or None), # no ipv6 here "status": vs.runtime.powerState, "is_on": (vs.runtime.powerState == "poweredOn" and 1 or 0), "host": vs_runtime_host_name, "project": parent, "vapp": parent, "datastore": [ds.datastore.name for ds in vm.storage.perDatastoreUsage], "cluster": vs_runtime_cluster_name or self.vm2cluster.get(vm._moId), #"management_ip": vs.runtime.host.parent.summary.managementServerIp # TODO: need this #"tags": vm.tags, } if vs.config.template: rec['template'] = 1 # storage storage = 0 storages = [] for vd in vm.config.hardware.device: if isinstance(vd, vim.VirtualDisk): storage += vd.capacityInKB datastore = None thin = None # get attribs if isinstance(vd.backing, vim.vm.device.VirtualDevice.FileBackingInfo): datastore = vd.backing.datastore.name elif isinstance(vd.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo): datastore = vd.backing.deviceName if isinstance(vd.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo): thin = vd.backing.thinProvisioned # TODO: add info from vm.guest.disk storages.append({ "id": vd.key, "name": vd.deviceInfo.label, "capacity": int(vd.capacityInKB // 1024), "free": None, "profile": datastore, "thin": thin, "ssd": None, }) if storage > 0: rec["storage"] = int(storage // 1024) if len(storages) > 0: rec["storages"] = storages # networks networks = [] #pprint(vm.config.hardware.device) #pprint(vm.guest.net) for nic_idx in range(len(vm.guest.net)): nic = vm.guest.net[nic_idx] net = { "id": nic.deviceConfigId, "mac": nic.macAddress, "network": nic.network, "ip": None, "connected": nic.connected } if nic.ipConfig: for ip in nic.ipConfig.ipAddress: if not net[ "ip"] and ip.prefixLength <= 32: # DUMMY distinguish IPv4 address net["ip"] = ip.ipAddress if ip.prefixLength != 0: net["prefix"] = ip.prefixLength if not rec["primary_ip"]: rec["primary_ip"] = net["ip"] else: if not net.get("aliases"): net["aliases"] = [] net["aliases"].append(ip.ipAddress + "/" + str(ip.prefixLength or 32)) if net["ip"] and net["ip"] == rec["primary_ip"]: net["primary"] = True # XXX: Cisco fix # vmguest is not reporting correct data, we have to map config # to reported IPs (this is a big heuristic) if len(vm.guest.net) == 1 and \ net['id'] <= 0 and net['network'] is None and \ net['mac'] == '00:11:22:33:44:55' and \ net['ip'] and (len(self.vm2dvsPort[rec['id']]) == 1 + len(net.get('aliases', []))): ports = self.vm2dvsPort[rec['id']] portIP = [net['ip']] portIP.extend(net.get('aliases', [])) net.pop('aliases', None) for idx in range(len(ports)): port = ports[idx] net["id"] = port["nicKey"] net["mac"] = port["mac"] net["network"] = port["network"] net["ip"] = portIP[idx].split('/', 1)[0] net["connected"] = port["connected"] net["__fix"] = 'ip2port-multi' networks.append(net) net = {} elif net['id'] <= 0 and net['network'] is None and net[ 'ip'] and rec['id'] in self.vm2dvsPort: ports = self.vm2dvsPort[rec['id']] fixed = False for port in ports: if port['mac'] == net['mac']: net["id"] = port["nicKey"] net["network"] = port["network"] net["__fix"] = "mac2port" fixed = True break if not fixed: net["id"] = 'CI-NIC' + str(nic_idx) net["name"] = 'nic' + str(nic_idx) net["__fix"] = "autoname" networks.append(net) elif net['ip']: networks.append(net) if len(networks) > 0: rec["networks"] = networks res = [] res.append( self.new_record( 'vm', { "name": rec["name"], "cluster": rec["cluster"], "project": rec["vapp"], "description": rec["description"], "id": rec["id"], "cpus": int(rec.get("cpus") or 0), "memory": int(rec.get("memory") or 0), "disks": int(rec.get("disks") or 0), "storage": int(rec.get("storage") or 0), "storages": rec.get("storages"), "primary_ip": rec["primary_ip"], "networks": rec.get("networks"), "os": rec["os"], "status": rec["status"], "is_on": rec["is_on"], "template": int(rec.get("template") or 0) }, rec)) return res def _logout(self): Disconnect(self.client)
class PyVmomiHelper(object): def __init__(self, module): if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi module required') self.module = module self.params = module.params self.si = None self.smartconnect() self.datacenter = None self.folders = None self.foldermap = None def smartconnect(self): kwargs = {'host': self.params['hostname'], 'user': self.params['username'], 'pwd': self.params['password']} if hasattr(ssl, 'SSLContext'): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE kwargs['sslContext'] = context # CONNECT TO THE SERVER try: self.si = SmartConnect(**kwargs) except Exception: err = get_exception() self.module.fail_json(msg="Cannot connect to %s: %s" % (kwargs['host'], err)) atexit.register(Disconnect, self.si) self.content = self.si.RetrieveContent() def _build_folder_tree(self, folder, tree={}, treepath=None): tree = {'virtualmachines': [], 'subfolders': {}, 'vimobj': folder, 'name': folder.name} children = None if hasattr(folder, 'childEntity'): children = folder.childEntity if children: for child in children: if child == folder or child in tree: continue if type(child) == vim.Folder: ctree = self._build_folder_tree(child) tree['subfolders'][child] = dict.copy(ctree) elif type(child) == vim.VirtualMachine: tree['virtualmachines'].append(child) else: if type(folder) == vim.VirtualMachine: return folder return tree def _build_folder_map(self, folder, vmap={}, inpath='/'): ''' Build a searchable index for vms+uuids+folders ''' if type(folder) == tuple: folder = folder[1] if not 'names' in vmap: vmap['names'] = {} if not 'uuids' in vmap: vmap['uuids'] = {} if not 'paths' in vmap: vmap['paths'] = {} if inpath == '/': thispath = '/vm' else: thispath = os.path.join(inpath, folder['name']) if thispath not in vmap['paths']: vmap['paths'][thispath] = [] # helpful for isolating folder objects later on if not 'path_by_fvim' in vmap: vmap['path_by_fvim'] = {} if not 'fvim_by_path' in vmap: vmap['fvim_by_path'] = {} # store object by path and store path by object vmap['fvim_by_path'][thispath] = folder['vimobj'] vmap['path_by_fvim'][folder['vimobj']] = thispath # helpful for isolating vm objects later on if not 'path_by_vvim' in vmap: vmap['path_by_vvim'] = {} if not 'vvim_by_path' in vmap: vmap['vvim_by_path'] = {} if thispath not in vmap['vvim_by_path']: vmap['vvim_by_path'][thispath] = [] for item in folder.items(): k = item[0] v = item[1] if k == 'name': pass elif k == 'subfolders': for x in v.items(): vmap = self._build_folder_map(x, vmap=vmap, inpath=thispath) elif k == 'virtualmachines': for x in v: if not x.config.name in vmap['names']: vmap['names'][x.config.name] = [] vmap['names'][x.config.name].append(x.config.uuid) vmap['uuids'][x.config.uuid] = x.config.name vmap['paths'][thispath].append(x.config.uuid) if x not in vmap['vvim_by_path'][thispath]: vmap['vvim_by_path'][thispath].append(x) if x not in vmap['path_by_vvim']: vmap['path_by_vvim'][x] = thispath return vmap def getfolders(self): if not self.datacenter: self.get_datacenter() self.folders = self._build_folder_tree(self.datacenter.vmFolder) self.folder_map = self._build_folder_map(self.folders) return (self.folders, self.folder_map) def get_datacenter(self): self.datacenter = get_obj(self.content, [vim.Datacenter], self.params['datacenter']) def getvm(self, name=None, uuid=None, folder=None, name_match=None): # https://www.vmware.com/support/developer/vc-sdk/visdk2xpubs/ReferenceGuide/vim.SearchIndex.html # self.si.content.searchIndex.FindByInventoryPath('DC1/vm/test_folder') vm = None folder_path = None if uuid: vm = self.si.content.searchIndex.FindByUuid(uuid=uuid, vmSearch=True) elif folder: if self.params['folder'].endswith('/'): self.params['folder'] = self.params['folder'][0:-1] # Build the absolute folder path to pass into the search method searchpath = None if self.params['folder'].startswith('/vm'): searchpath = '%s' % self.params['datacenter'] searchpath += self.params['folder'] elif self.params['folder'].startswith('/'): searchpath = '%s' % self.params['datacenter'] searchpath += '/vm' + self.params['folder'] else: # need to look for matching absolute path if not self.folders: self.getfolders() paths = self.folder_map['paths'].keys() paths = [x for x in paths if x.endswith(self.params['folder'])] if len(paths) > 1: self.module.fail_json(msg='%s matches more than one folder. Please use the absolute path starting with /vm/' % self.params['folder']) elif paths: searchpath = paths[0] if searchpath: # get all objects for this path ... fObj = self.si.content.searchIndex.FindByInventoryPath(searchpath) if fObj: if isinstance(fObj, vim.Datacenter): fObj = fObj.vmFolder for cObj in fObj.childEntity: if not type(cObj) == vim.VirtualMachine: continue if cObj.name == name: vm = cObj break else: vmList = get_all_objs(self.content, [vim.VirtualMachine]) if name_match: if name_match == 'first': vm = get_obj(self.content, [vim.VirtualMachine], name) elif name_match == 'last': matches = [] vmList = get_all_objs(self.content, [vim.VirtualMachine]) for thisvm in vmList: if thisvm.config.name == name: matches.append(thisvm) if matches: vm = matches[-1] else: matches = [] vmList = get_all_objs(self.content, [vim.VirtualMachine]) for thisvm in vmList: if thisvm.config.name == name: matches.append(thisvm) if len(matches) > 1: module.fail_json(msg='more than 1 vm exists by the name %s. Please specify a uuid, or a folder, or a datacenter or name_match' % name) if matches: vm = matches[0] return vm def set_powerstate(self, vm, state, force): """ Set the power status for a VM determined by the current and requested states. force is forceful """ facts = self.gather_facts(vm) expected_state = state.replace('_', '').lower() current_state = facts['hw_power_status'].lower() result = {} # Need Force if not force and current_state not in ['poweredon', 'poweredoff']: return "VM is in %s power state. Force is required!" % current_state # State is already true if current_state == expected_state: result['changed'] = False result['failed'] = False else: task = None try: if expected_state == 'poweredoff': task = vm.PowerOff() elif expected_state == 'poweredon': task = vm.PowerOn() elif expected_state == 'restarted': if current_state in ('poweredon', 'poweringon', 'resetting'): task = vm.Reset() else: result = {'changed': False, 'failed': True, 'msg': "Cannot restart VM in the current state %s" % current_state} except Exception: result = {'changed': False, 'failed': True, 'msg': get_exception()} if task: self.wait_for_task(task) if task.info.state == 'error': result = {'changed': False, 'failed': True, 'msg': task.info.error.msg} else: result = {'changed': True, 'failed': False} # need to get new metadata if changed if result['changed']: newvm = self.getvm(uuid=vm.config.uuid) facts = self.gather_facts(newvm) result['instance'] = facts return result def gather_facts(self, vm): ''' Gather facts from vim.VirtualMachine object. ''' facts = { 'module_hw': True, 'hw_name': vm.config.name, 'hw_power_status': vm.summary.runtime.powerState, 'hw_guest_full_name': vm.summary.guest.guestFullName, 'hw_guest_id': vm.summary.guest.guestId, 'hw_product_uuid': vm.config.uuid, 'hw_processor_count': vm.config.hardware.numCPU, 'hw_memtotal_mb': vm.config.hardware.memoryMB, 'hw_interfaces':[], 'ipv4': None, 'ipv6': None, } netDict = {} for device in vm.guest.net: mac = device.macAddress ips = list(device.ipAddress) netDict[mac] = ips for k,v in netDict.iteritems(): for ipaddress in v: if ipaddress: if '::' in ipaddress: facts['ipv6'] = ipaddress else: facts['ipv4'] = ipaddress for idx,entry in enumerate(vm.config.hardware.device): if not hasattr(entry, 'macAddress'): continue factname = 'hw_eth' + str(idx) facts[factname] = { 'addresstype': entry.addressType, 'label': entry.deviceInfo.label, 'macaddress': entry.macAddress, 'ipaddresses': netDict.get(entry.macAddress, None), 'macaddress_dash': entry.macAddress.replace(':', '-'), 'summary': entry.deviceInfo.summary, } facts['hw_interfaces'].append('eth'+str(idx)) return facts def remove_vm(self, vm): # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy task = vm.Destroy() self.wait_for_task(task) if task.info.state == 'error': return ({'changed': False, 'failed': True, 'msg': task.info.error.msg}) else: return ({'changed': True, 'failed': False}) def deploy_template(self, poweron=False, wait_for_ip=False): # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html # FIXME: # - clusters # - multiple datacenters # - resource pools # - multiple templates by the same name # - multiple disks # - changing the esx host is ignored? # - static IPs # FIXME: need to search for this in the same way as guests to ensure accuracy template = get_obj(self.content, [vim.VirtualMachine], self.params['template']) if not template: self.module.fail_json(msg="Could not find a template named %s" % self.params['template']) datacenters = get_all_objs(self.content, [vim.Datacenter]) datacenter = get_obj(self.content, [vim.Datacenter], self.params['datacenter']) if not datacenter: self.module.fail_json(msg='No datacenter named %s was found' % self.params['datacenter']) if not self.foldermap: self.folders, self.foldermap = self.getfolders() # find matching folders if self.params['folder'].startswith('/'): folders = [x for x in self.foldermap['fvim_by_path'].items() if x[0] == self.params['folder']] else: folders = [x for x in self.foldermap['fvim_by_path'].items() if x[0].endswith(self.params['folder'])] # throw error if more than one match or no matches if len(folders) == 0: self.module.fail_json(msg='no folder matched the path: %s' % self.params['folder']) elif len(folders) > 1: self.module.fail_json(msg='too many folders matched "%s", please give the full path starting with /vm/' % self.params['folder']) # grab the folder vim object destfolder = folders[0][1] # FIXME: cluster or hostsystem ... ? #cluster = get_obj(self.content, [vim.ClusterComputeResource], self.params['esxi']['hostname']) hostsystem = get_obj(self.content, [vim.HostSystem], self.params['esxi_hostname']) resource_pools = get_all_objs(self.content, [vim.ResourcePool]) # set the destination datastore in the relocation spec if self.params['disk']: datastore_name = self.params['disk'][0]['datastore'] datastore = get_obj(self.content, [vim.Datastore], datastore_name) else: # use the template's existing DS disks = [x for x in template.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] datastore = disks[0].backing.datastore datastore_name = datastore.name # create the relocation spec relospec = vim.vm.RelocateSpec() relospec.datastore = datastore # fixme ... use the pool from the cluster if given relospec.pool = resource_pools[0] relospec.host = hostsystem clonespec_kwargs = {} clonespec_kwargs['location'] = relospec # create disk spec if not default if self.params['disk']: # grab the template's first disk and modify it for this customization disks = [x for x in template.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] diskspec = vim.vm.device.VirtualDeviceSpec() # set the operation to edit so that it knows to keep other settings diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit diskspec.device = disks[0] # get the first disk attributes pspec = self.params.get('disk')[0] # is it thin? if pspec.get('type', '').lower() == 'thin': diskspec.device.backing.thinProvisioned = True # which datastore? if pspec.get('datastore'): # This is already handled by the relocation spec, # but it needs to eventually be handled for all the # other disks defined pass # what size is it? if [x for x in pspec.keys() if x.startswith('size_') or x == 'size']: # size_tb, size_gb, size_mb, size_kb, size_b ...? if 'size' in pspec: # http://stackoverflow.com/a/1451407 trans = string.maketrans('', '') chars = trans.translate(trans, string.digits) expected = pspec['size'].translate(trans, chars) expected = expected unit = pspec['size'].replace(expected, '').lower() expected = int(expected) else: param = [x for x in pspec.keys() if x.startswith('size_')][0] unit = param.split('_')[-1].lower() expected = [x[1] for x in pspec.items() if x[0].startswith('size_')][0] expected = int(expected) kb = None if unit == 'tb': kb = expected * 1024 * 1024 * 1024 elif unit == 'gb': kb = expected * 1024 * 1024 elif unit ==' mb': kb = expected * 1024 elif unit == 'kb': kb = expected else: self.module.fail_json(msg='%s is not a supported unit for disk size' % unit) diskspec.device.capacityInKB = kb # tell the configspec that the disk device needs to change configspec = vim.vm.ConfigSpec(deviceChange=[diskspec]) clonespec_kwargs['config'] = configspec clonespec = vim.vm.CloneSpec(**clonespec_kwargs) task = template.Clone(folder=destfolder, name=self.params['name'], spec=clonespec) self.wait_for_task(task) if task.info.state == 'error': # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361 # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173 return ({'changed': False, 'failed': True, 'msg': task.info.error.msg}) else: vm = task.info.result if wait_for_ip: self.set_powerstate(vm, 'poweredon', force=False) self.wait_for_vm_ip(vm) vm_facts = self.gather_facts(vm) return ({'changed': True, 'failed': False, 'instance': vm_facts}) def wait_for_task(self, task): # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html # https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py while task.info.state not in ['success', 'error']: time.sleep(1) def wait_for_vm_ip(self, vm, poll=100, sleep=5): ips = None facts = {} thispoll = 0 while not ips and thispoll <= poll: newvm = self.getvm(uuid=vm.config.uuid) facts = self.gather_facts(newvm) if facts['ipv4'] or facts['ipv6']: ips = True else: time.sleep(sleep) thispoll += 1 #import epdb; epdb.st() return facts def fetch_file_from_guest(self, vm, username, password, src, dest): ''' Use VMWare's filemanager api to fetch a file over http ''' result = {'failed': False} tools_status = vm.guest.toolsStatus if (tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning'): result['failed'] = True result['msg'] = "VMwareTools is not installed or is not running in the guest" return result # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst creds = vim.vm.guest.NamePasswordAuthentication( username=username, password=password ) # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/FileManager/FileTransferInformation.rst fti = self.content.guestOperationsManager.fileManager. \ InitiateFileTransferFromGuest(vm, creds, src) result['size'] = fti.size result['url'] = fti.url # Use module_utils to fetch the remote url returned from the api rsp, info = fetch_url(self.module, fti.url, use_proxy=False, force=True, last_mod_time=None, timeout=10, headers=None) # save all of the transfer data for k,v in info.iteritems(): result[k] = v # exit early if xfer failed if info['status'] != 200: result['failed'] = True return result # attempt to read the content and write it try: with open(dest, 'wb') as f: f.write(rsp.read()) except Exception as e: result['failed'] = True result['msg'] = str(e) return result def push_file_to_guest(self, vm, username, password, src, dest, overwrite=True): ''' Use VMWare's filemanager api to push a file over http ''' result = {'failed': False} tools_status = vm.guest.toolsStatus if (tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning'): result['failed'] = True result['msg'] = "VMwareTools is not installed or is not running in the guest" return result # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst creds = vim.vm.guest.NamePasswordAuthentication( username=username, password=password ) # the api requires a filesize in bytes filesize = None fdata = None try: #filesize = os.path.getsize(src) filesize = os.stat(src).st_size fdata = None with open(src, 'rb') as f: fdata = f.read() result['local_filesize'] = filesize except Exception as e: result['failed'] = True result['msg'] = "Unable to read src file: %s" % str(e) return result # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.vm.guest.FileManager.html#initiateFileTransferToGuest file_attribute = vim.vm.guest.FileManager.FileAttributes() url = self.content.guestOperationsManager.fileManager. \ InitiateFileTransferToGuest(vm, creds, dest, file_attribute, filesize, overwrite) # PUT the filedata to the url ... rsp, info = fetch_url(self.module, url, method="put", data=fdata, use_proxy=False, force=True, last_mod_time=None, timeout=10, headers=None) result['msg'] = str(rsp.read()) # save all of the transfer data for k,v in info.iteritems(): result[k] = v return result def run_command_in_guest(self, vm, username, password, program_path, program_args, program_cwd, program_env): result = {'failed': False} tools_status = vm.guest.toolsStatus if (tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning'): result['failed'] = True result['msg'] = "VMwareTools is not installed or is not running in the guest" return result # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst creds = vim.vm.guest.NamePasswordAuthentication( username=username, password=password ) res = None pdata = None try: # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst pm = self.content.guestOperationsManager.processManager # https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html ps = vim.vm.guest.ProcessManager.ProgramSpec( #programPath=program, #arguments=args programPath=program_path, arguments=program_args, workingDirectory=program_cwd, ) res = pm.StartProgramInGuest(vm, creds, ps) result['pid'] = res pdata = pm.ListProcessesInGuest(vm, creds, [res]) # wait for pid to finish while not pdata[0].endTime: time.sleep(1) pdata = pm.ListProcessesInGuest(vm, creds, [res]) result['owner'] = pdata[0].owner result['startTime'] = pdata[0].startTime.isoformat() result['endTime'] = pdata[0].endTime.isoformat() result['exitCode'] = pdata[0].exitCode if result['exitCode'] != 0: result['failed'] = True result['msg'] = "program exited non-zero" else: result['msg'] = "program completed successfully" except Exception as e: result['msg'] = str(e) result['failed'] = True return result
def run(): host = __pillar__["proxy"]["vcenter"] user = __pillar__["proxy"]["username"] pwd = junos_decode(__pillar__["proxy"]["encoded_password"]) port = __pillar__["proxy"]["port"] context = None if hasattr(ssl, '_create_unverified_context'): context = ssl._create_unverified_context() #si = __salt__["vsphere.get_service_instance_via_proxy"]() si = SmartConnect(host=host, user=user, pwd=pwd, port=port, sslContext=context) if not si: print("Could not connect to the specified host using specified " "username and password") return -1 atexit.register(Disconnect, si) content = si.RetrieveContent() hosts = GetHosts(content) host_list = [] for host in hosts: host_name = host.config.network.dnsConfig.hostName host = host.config.host host_list.append({"host": host, "host_name": host_name}) #print(host_list) vms = GetVMs(content) vm_list = [] for vm in vms: print(" ") #print(vm.summary) #print("####end of vm######") if vm.summary.guest != None: ip = vm.summary.guest.ipAddress name = vm.summary.config.name host = vm.summary.runtime.host ###### match the HostSystem to the Hostname of the physical host for item in host_list: if item["host"] == host: host_name = item["host_name"] break #print("Name : ", name) #print("IP : ", ip) if vm.config != None: #print("########vm config name######") #print(vm.config) for device in vm.config.hardware.device: #print("########device######") #print(type(device)) #print(device) if hasattr(device, 'macAddress'): mac = device.macAddress #print("MAC : ", mac) vm_list.append({ 'tags': { "name": name }, "fields": { "ip": ip, "mac": mac, "host": host_name } }) #for vm in vm_list: print(vm) return vm_list
def main(): """ Manage the vCenter Integration Node configuration """ # Handling arguments args = get_args() debug = args.debug folder_name = None if args.folder: folder_name = args.folder[0] log_file = None if args.logfile: log_file = args.logfile[0] name = args.name[0] nuage_enterprise = args.nuage_enterprise[0] nuage_host = args.nuage_host[0] nuage_port = args.nuage_port[0] nuage_password = None if args.nuage_password: nuage_password = args.nuage_password[0] nuage_username = args.nuage_username[0] nuage_vm_enterprise = None if args.nuage_vm_enterprise: nuage_vm_enterprise = args.nuage_vm_enterprise[0] nuage_vm_domain = None if args.nuage_vm_domain: nuage_vm_domain = args.nuage_vm_domain[0] nuage_vm_zone = None if args.nuage_vm_zone: nuage_vm_zone = args.nuage_vm_zone[0] nuage_vm_subnet = None if args.nuage_vm_subnet: nuage_vm_subnet = args.nuage_vm_subnet[0] nuage_vm_ip = None if args.nuage_vm_ip: nuage_vm_ip = args.nuage_vm_ip[0] nuage_vm_user = None if args.nuage_vm_user: nuage_vm_user = args.nuage_vm_user[0] power_on = not args.nopoweron resource_pool_name = None if args.resource_pool: resource_pool_name = args.resource_pool[0] nosslcheck = args.nosslcheck template = args.template[0] vcenter_host = args.vcenter_host[0] vcenter_port = args.vcenter_port[0] vcenter_password = None if args.vcenter_password: vcenter_password = args.vcenter_password[0] vcenter_username = args.vcenter_username[0] verbose = args.verbose # Logging settings if debug: log_level = logging.DEBUG elif verbose: log_level = logging.INFO else: log_level = logging.WARNING logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level) logger = logging.getLogger(__name__) # Disabling SSL verification if set if nosslcheck: logger.debug('Disabling SSL certificate verification.') requests.packages.urllib3.disable_warnings() import ssl if hasattr(ssl, '_create_unverified_context'): ssl._create_default_https_context = ssl._create_unverified_context # Getting user password for Nuage connection if nuage_password is None: logger.debug( 'No command line Nuage password received, requesting Nuage password from user' ) nuage_password = getpass.getpass( prompt='Enter password for Nuage host %s for user %s: ' % (nuage_host, nuage_username)) # Getting user password for vCenter connection if vcenter_password is None: logger.debug( 'No command line vCenter password received, requesting vCenter password from user' ) vcenter_password = getpass.getpass( prompt='Enter password for vCenter host %s for user %s: ' % (vcenter_host, vcenter_username)) try: vc = None nc = None # Connecting to Nuage try: logger.info('Connecting to Nuage server %s:%s with username %s' % (nuage_host, nuage_port, nuage_username)) nc = vsdk.NUVSDSession(username=nuage_username, password=nuage_password, enterprise=nuage_enterprise, api_url="https://%s:%s" % (nuage_host, nuage_port)) nc.start() except IOError, e: pass if not nc or not nc.is_current_session(): logger.error( 'Could not connect to Nuage host %s with user %s, enterprise %s and specified password' % (nuage_host, nuage_username, nuage_enterprise)) return 1 # Connecting to vCenter try: logger.info('Connecting to vCenter server %s:%s with username %s' % (vcenter_host, vcenter_port, vcenter_username)) vc = SmartConnect(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_port)) except IOError, e: pass
def main(): supportedArgs = [ (["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["o:", "otherhost="], "", "Host name of other host", "otherhost"), (["q:", "otherpwd="], "ca$hc0w", "Password of other host", "otherpwd"), (["d:", "ds="], None, "Datastore name", "ds"), (["v:", "vm="], "vdm_multihost", "VM name", "vm"), (["i:", "numiter="], "1", "Number of iterations", "iter") ] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), (["cleanup", "c"], True, "Try to cleanup test vms from previous runs", "cleanup") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) si = SmartConnect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) Log("Connected to host " + args.GetKeyValue("host")) # Process command line numiter = int(args.GetKeyValue("iter")) doCleanup = args.GetKeyValue("cleanup") status = "PASS" resultsArray = [] serviceInstanceContent = si.RetrieveContent() vdiskMgr = serviceInstanceContent.GetVirtualDiskManager() if vdiskMgr == None: Log("Virtual Disk Manager not found") sys.exit(0) otherHostSetup = OtherHostSetup(args) otherHostSetup.SetupOtherHost() for i in range(numiter): bigClock = StopWatch() try: try: ph = Phase() vdiskMgrTest = VirtualDiskManagerTest(si, vdiskMgr, args) vdiskMgrTest.RunTests() ph.SetPhase("Virtual Disk Manager Tests") status = "PASS" finally: bigClock.finish("iteration " + str(i)) except Exception as e: Log("Caught exception : " + str(e)) status = "FAIL" Log("TEST RUN COMPLETE: " + status) resultsArray.append(status) otherHostSetup.CleanupOtherHost() Log("Results for each iteration: ") for i in range(len(resultsArray)): Log("Iteration " + str(i) + ": " + resultsArray[i])
def main(): """ function runs all of the other functions. Some parts of this function are taken from the getallvms.py script from the pyvmomi gihub repo """ args = get_args() try: si = None try: si = SmartConnect(host=args.host, user=args.user, pwd=args.password, port=int(args.port)) except IOError, e: pass if not si: print "Could not connect to the specified host using " \ "specified username and password" return -1 atexit.register(Disconnect, si) content = si.RetrieveContent() datacenter = content.rootFolder.childEntity[0] datastores = datacenter.datastore vmfolder = datacenter.vmFolder vmlist = vmfolder.childEntity dsvmkey = [] # each datastore found on ESXi host or vCenter is passed # to the find_vmx and examine_vmx functions to find all # VMX files and search them for ds in datastores: find_vmx(ds.browser, "[%s]" % ds.summary.name, datacenter.name, ds.summary.name) examine_vmx(ds.summary.name) updatevmx_path() # each VM found in the inventory is passed to the getvm_info # function to get it's instanceuuid for vm in vmlist: getvm_info(vm) # each key from the DS_VM hashtable is added to a separate # list for comparison later for a in DS_VM.keys(): dsvmkey.append(a) # each uuid in the dsvmkey list is passed to the find_match # function to look for a match print "The following virtual machine(s) do not exist in the " \ "inventory, but exist on a datastore " \ "(Display Name, Datastore/Folder name):" for match in dsvmkey: find_match(match) Disconnect(si)
(["v", "verbose"], False, "Verbose output", "verbose") ] # # Parse command line parameters and process usage. # args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # # Define a connection with the host. # si = SmartConnect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) atexit.register(Disconnect, si) def main(): # # Open connection with host # hs = host.GetHostSystem(si) verbose = (args.GetKeyValue("verbose") == True) # # Run plugin tests. # vslmsvcPlugginTest = VslmsvcPlugginTests(verbose); vslmsvcPlugginTest.runTests()
def main(): args = GetArgs() try: vmnames = args.vm si = None if args.password: password = args.password else: password = getpass.getpass( prompt="Enter password for host {} and user {}: ".format( args.host, args.user)) try: si = SmartConnect(host=args.host, user=args.user, pwd=password, port=int(args.port)) except IOError as e: pass if not si: print( 'Could not connect to the specified host using specified username and password' ) return -1 atexit.register(Disconnect, si) content = si.RetrieveContent() # Get vCenter date and time for use as baseline when querying for counters vchtime = si.CurrentTime() # Get all the performance counters perf_dict = {} perfList = content.perfManager.perfCounter for counter in perfList: counter_full = "{}.{}.{}".format(counter.groupInfo.key, counter.nameInfo.key, counter.rollupType) perf_dict[counter_full] = counter.key retProps = GetProperties(content, [vim.VirtualMachine], ['name', 'runtime.powerState'], vim.VirtualMachine) #Find VM supplied as arg and use Managed Object Reference (moref) for the PrintVmInfo for vm in retProps: if (vm['name'] in vmnames) and (vm['runtime.powerState'] == "poweredOn"): PrintVmInfo(vm['moref'], content, vchtime, args.interval, perf_dict) elif vm['name'] in vmnames: print( 'ERROR: Problem connecting to Virtual Machine. {} is likely powered off or suspended' .format(vm['name'])) except vmodl.MethodFault as e: print('Caught vmodl fault : ' + e.msg) return -1 except Exception as e: print('Caught exception : ' + str(e)) return -1 return 0
def main(): """ Manage the activation of a vSphere VM """ # Handling arguments args = get_args() if args.config_file: cfg = parse_config(args.config_file) elif os.path.isfile('{0:s}/.nuage/config.ini'.format( os.path.expanduser('~'))): cfg = parse_config('{0:s}/.nuage/config.ini'.format( os.path.expanduser('~'))) else: print 'Missing config file' return 1 mode = args.mode nuage_vm_enterprise = None if args.nuage_vm_enterprise: nuage_vm_enterprise = args.nuage_vm_enterprise nuage_vm_domain = None if args.nuage_vm_domain: nuage_vm_domain = args.nuage_vm_domain nuage_vm_zone = None if args.nuage_vm_zone: nuage_vm_zone = args.nuage_vm_zone nuage_vm_subnet = None if args.nuage_vm_subnet: nuage_vm_subnet = args.nuage_vm_subnet nuage_vm_ip = None if args.nuage_vm_ip: nuage_vm_ip = args.nuage_vm_ip nuage_vm_user = None if args.nuage_vm_user: nuage_vm_user = args.nuage_vm_user nuage_vm_policy_group = None if args.nuage_vm_policy_group: nuage_vm_user = args.nuage_vm_policy_group nuage_vm_redirection_target = None if args.nuage_vm_redirection_target: nuage_vm_user = args.nuage_vm_redirection_target vcenter_vm_name = None if args.vcenter_vm_name: vcenter_vm_name = args.vcenter_vm_name # Handling logging log_dir = cfg.get('LOG', 'directory') log_file = cfg.get('LOG', 'file') log_level = cfg.get('LOG', 'level') if not log_level: log_level = 'ERROR' log_path = None if log_dir and log_file and os.path.isdir(log_dir) and os.access( log_dir, os.W_OK): log_path = os.path.join(log_dir, log_file) logging.basicConfig( filename=log_path, format='%(asctime)s %(levelname)s - %(name)s - %(message)s', level=log_level) logging.info('Logging initiated') # Disabling SSL verification if set logging.debug('Disabling SSL certificate verification.') requests.packages.urllib3.disable_warnings() try: vc = None nc = None # Connecting to Nuage try: logging.info( 'Connecting to Nuage server {0:s} with username {1:s} and enterprise {2:s}' .format(cfg.get('NUAGE', 'vsd_api_url'), cfg.get('NUAGE', 'vsd_api_user'), cfg.get('NUAGE', 'vsd_api_enterprise'))) nc = vsdk.NUVSDSession(username=cfg.get('NUAGE', 'vsd_api_user'), password=cfg.get('NUAGE', 'vsd_api_password'), enterprise=cfg.get('NUAGE', 'vsd_api_enterprise'), api_url=cfg.get('NUAGE', 'vsd_api_url')) nc.start() except IOError: pass if not nc or not nc.is_current_session(): logging.error( 'Could not connect to Nuage host {0:s} with user {1:s}, enterprise {2:s} and specified password' .format(cfg.get('NUAGE', 'vsd_api_url'), cfg.get('NUAGE', 'vsd_api_user'), cfg.get('NUAGE', 'vsd_api_enterprise'))) return 1 # Connecting to vCenter try: logging.info( 'Connecting to vCenter server {0:s} with username {1:s}'. format(cfg.get('VSPHERE', 'vsphere_api_host'), cfg.get('VSPHERE', 'vsphere_api_user'))) vc = SmartConnect(host=cfg.get('VSPHERE', 'vsphere_api_host'), user=cfg.get('VSPHERE', 'vsphere_api_user'), pwd=cfg.get('VSPHERE', 'vsphere_api_password'), port=int(cfg.get('VSPHERE', 'vsphere_api_port'))) except IOError: pass if not vc: logging.error( 'Could not connect to vCenter host {0:s} with user {1:s} and specified password' .format(cfg.get('VSPHERE', 'vsphere_api_host'), cfg.get('VSPHERE', 'vsphere_api_user'))) return 1 logging.info('Connected to both Nuage & vCenter servers') logging.debug('Registering vCenter disconnect at exit') atexit.register(Disconnect, vc) vcenter_vm = None vm_enterprise = None vm_user = None vm_domain = None vm_is_l2domain = False vm_zone = None vm_subnet = None vm_ip = None vm_policy_group = None vm_redirection_target = None # Verifying the vCenter VM existence or selecting it if vcenter_vm_name: vcenter_vm = find_vm(vc, vcenter_vm_name) if vcenter_vm is None: logging.critical( 'Unable to find specified VM with name {0:s}'.format( vcenter_vm_name)) return 1 else: logging.debug('Offering a choice of which VM to activate') content = vc.content obj_view = content.viewManager.CreateContainerView( content.rootFolder, [vim.VirtualMachine], True) vm_list = obj_view.view clear() print('Please select your VM:') index = 0 for cur_vm in vm_list: print('%s. %s' % (index + 1, cur_vm.name)) index += 1 while vcenter_vm is None: choice = raw_input( 'Please enter the number of the VM [1-%s]: ' % len(vm_list)) choice = int(choice) if choice > 0 and choice - 1 < len(vm_list): vcenter_vm = vm_list[choice - 1] break print('Invalid choice, please try again') # Verifying the Nuage Enterprise existence or selecting it if nuage_vm_enterprise: logging.debug('Finding Nuage enterprise %s' % nuage_vm_enterprise) vm_enterprise = nc.user.enterprises.get_first( filter="name == '%s'" % nuage_vm_enterprise) if vm_enterprise is None: logging.error('Unable to find Nuage enterprise %s' % nuage_vm_enterprise) return 1 logging.info('Nuage enterprise %s found' % nuage_vm_enterprise) else: clear() print('VM: %s' % vcenter_vm.name) print(80 * '-') print('Please select your enterprise:') index = 0 all_ent = nc.user.enterprises.get() for cur_ent in all_ent: print('%s. %s' % (index + 1, cur_ent.name)) index += 1 while vm_enterprise is None: choice = raw_input( 'Please enter the number of the enterprise [1-%s]: ' % len(all_ent)) choice = int(choice) if choice > 0 and choice - 1 < len(all_ent): vm_enterprise = all_ent[choice - 1] break print('Invalid choice, please try again') # Verifying the Nuage User existence or selecting it if nuage_vm_user: logging.debug('Finding Nuage user %s' % nuage_vm_user) vm_user = vm_enterprise.users.get_first(filter="userName == '%s'" % nuage_vm_user) if vm_user is None: logging.error('Unable to find Nuage user %s' % nuage_vm_user) return 1 logging.info('Nuage user %s found' % nuage_vm_user) else: clear() print('VM: %s' % vcenter_vm.name) print('Enterprise: %s' % vm_enterprise.name) print(80 * '-') print('Please select your user:'******'%s. %s' % (index + 1, cur_user.user_name)) index += 1 while vm_user is None: choice = raw_input( 'Please enter the number of the user [1-%s]: ' % len(all_users)) choice = int(choice) if choice > 0 and choice - 1 < len(all_users): vm_user = all_users[choice - 1] break print('Invalid choice, please try again') # Verifying the Nuage Domain existence or selecting it if nuage_vm_domain: logging.debug('Finding Nuage domain %s' % nuage_vm_domain) vm_domain = vm_enterprise.domains.get_first(filter="name == '%s'" % nuage_vm_domain) if vm_domain is None: logging.debug( 'Unable to find the domain {0:s} as an L3 domain'.format( nuage_vm_domain)) vm_domain = vm_enterprise.l2_domains.get_first( filter="name == '%s'" % nuage_vm_domain) vm_is_l2domain = True if vm_domain is None: logging.error('Unable to find Nuage domain {0:s}'.format( nuage_vm_domain)) return 1 logging.info('Nuage domain %s found' % nuage_vm_domain) else: clear() print('VM: %s' % vcenter_vm.name) print('Enterprise: %s' % vm_enterprise.name) print('User: %s' % vm_user.user_name) print(80 * '-') print('Please select your domain:') index = 0 all_l3_dom = vm_enterprise.domains.get() all_l2_dom = vm_enterprise.l2_domains.get() all_dom = all_l2_dom + all_l3_dom for cur_dom in all_l2_dom: print('%s. L2 %s - %s/%s' % (index + 1, cur_dom.name, cur_dom.address, cur_dom.netmask)) index += 1 for cur_dom in all_l3_dom: print('%s. L3 - %s' % (index + 1, cur_dom.name)) index += 1 while vm_domain is None: choice = raw_input( 'Please enter the number of the domain [1-%s]: ' % len(all_dom)) choice = int(choice) if choice > 0 and choice - 1 < len(all_dom): vm_domain = all_dom[choice - 1] if type(vm_domain) is vsdk.NUL2Domain: vm_is_l2domain = True break print('Invalid choice, please try again') # Verifying the Nuage Zone existence or selecting it if not vm_is_l2domain and nuage_vm_zone: logging.debug('Finding Nuage zone %s' % nuage_vm_zone) vm_zone = vm_domain.zones.get_first(filter="name == '%s'" % nuage_vm_zone) if vm_zone is None: logging.error('Unable to find Nuage zone %s' % nuage_vm_zone) return 1 logging.info('Nuage zone %s found' % nuage_vm_zone) elif not vm_is_l2domain: clear() print('VM: %s' % vcenter_vm.name) print('Enterprise: %s' % vm_enterprise.name) print('User: %s' % vm_user.user_name) print('Domain: %s' % vm_domain.name) print(80 * '-') print('Please select your zone:') index = 0 all_zone = vm_domain.zones.get() for cur_zone in all_zone: print('%s. %s' % (index + 1, cur_zone.name)) index += 1 while vm_zone is None: choice = raw_input( 'Please enter the number of the zone [1-%s]: ' % len(all_zone)) choice = int(choice) if choice > 0 and choice - 1 < len(all_zone): vm_zone = all_zone[choice - 1] break print('Invalid choice, please try again') # Verifying the Nuage Subnet existence or selecting it if not vm_is_l2domain and nuage_vm_subnet: logging.debug('Finding Nuage subnet %s' % nuage_vm_subnet) vm_subnet = vm_zone.subnets.get_first(filter="name == '%s'" % nuage_vm_subnet) if vm_subnet is None: logging.error('Unable to find Nuage subnet %s' % nuage_vm_subnet) return 1 logging.info('Nuage subnet %s found' % nuage_vm_subnet) elif not vm_is_l2domain: clear() print('VM: %s' % vcenter_vm.name) print('Enterprise: %s' % vm_enterprise.name) print('User: %s' % vm_user.user_name) print('Domain: %s' % vm_domain.name) print('Zone: %s' % vm_zone.name) print(80 * '-') print('Please select your subnet:') index = 0 all_subnet = vm_zone.subnets.get() for cur_subnet in all_subnet: print('%s. %s - %s/%s' % (index + 1, cur_subnet.name, cur_subnet.address, cur_subnet.netmask)) index += 1 while vm_subnet is None: choice = raw_input( 'Please enter the number of the subnet [1-%s]: ' % len(all_subnet)) choice = int(choice) if choice > 0 and choice - 1 < len(all_subnet): vm_subnet = all_subnet[choice - 1] break print('Invalid choice, please try again') # Verifying the IP or asking for it if nuage_vm_ip: logging.debug( 'Verifying if IP %s is inside Nuage subnet %s range' % (nuage_vm_ip, vm_subnet.name)) if not ipaddress.ip_address(nuage_vm_ip) in ipaddress.ip_network( '%s/%s' % (vm_subnet.address, vm_subnet.netmask)): logging.error( 'IP %s is not part of subnet %s with netmask %s' % (nuage_vm_ip, vm_subnet.address, vm_subnet.netmask)) return 1 vm_ip = nuage_vm_ip else: clear() print('VM: %s' % vcenter_vm.name) print('Enterprise: %s' % vm_enterprise.name) print('User: %s' % vm_user.user_name) if not vm_is_l2domain: print('Domain: %s' % vm_domain.name) print('Zone: %s' % vm_zone.name) print('Subnet: %s - %s/%s' % (vm_subnet.name, vm_subnet.address, vm_subnet.netmask)) else: print('Domain: %s - %s/%s' % (vm_domain.name, vm_domain.address, vm_domain.netmask)) print(80 * '-') print( 'If you want a static IP, please enter it. Or press enter for a DHCP assigned IP.' ) while vm_ip is None: choice = raw_input( 'Please enter the IP or press enter for a DHCP assigned IP: ' ) if not choice or ipaddress.ip_address( choice) in ipaddress.ip_network( '%s/%s' % (vm_subnet.address, vm_subnet.netmask)): vm_ip = choice break print('Invalid choice, please try again') # Verifying the Nuage policy group existence or selecting it if nuage_vm_policy_group: logging.debug('Finding Nuage policy group %s' % nuage_vm_policy_group) vm_policy_group = vm_domain.policy_groups.get_first( filter="name == '%s'" % nuage_vm_policy_group) if vm_policy_group is None: logging.error('Unable to find Nuage policy group {0:s}'.format( nuage_vm_policy_group)) return 1 logging.info('Nuage policy group %s found' % nuage_vm_policy_group) else: clear() print('VM: %s' % vcenter_vm.name) print('Enterprise: %s' % vm_enterprise.name) print('User: %s' % vm_user.user_name) if not vm_is_l2domain: print('Domain: %s' % vm_domain.name) print('Zone: %s' % vm_zone.name) print('Subnet: %s - %s/%s' % (vm_subnet.name, vm_subnet.address, vm_subnet.netmask)) else: print('Domain: %s - %s/%s' % (vm_domain.name, vm_domain.address, vm_domain.netmask)) if vm_ip: print('IP: {0:s}'.format(vm_ip)) print(80 * '-') print('Please select your policy group:') index = 0 all_pg = vm_domain.policy_groups.get() print('0. None') for cur_pg in all_pg: print('%s. %s' % (index + 1, cur_pg.name)) index += 1 while vm_policy_group is None: choice = raw_input( 'Please enter the number of the policy group [0-%s]: ' % len(all_pg)) choice = int(choice) if choice == 0: vm_policy_group = None break elif choice > 0 and choice - 1 < len(all_pg): vm_policy_group = all_pg[choice - 1] break print('Invalid choice, please try again') # Verifying the Nuage redirection target existence or selecting it if nuage_vm_redirection_target: logging.debug('Finding Nuage redirection target %s' % nuage_vm_redirection_target) vm_redirection_target = vm_domain.redirection_targets.get_first( filter="name == '%s'" % nuage_vm_redirection_target) if vm_redirection_target is None: logging.error( 'Unable to find Nuage redirection target {0:s}'.format( nuage_vm_redirection_target)) return 1 logging.info('Nuage redirection target %s found' % nuage_vm_redirection_target) else: clear() print('VM: %s' % vcenter_vm.name) print('Enterprise: %s' % vm_enterprise.name) print('User: %s' % vm_user.user_name) if not vm_is_l2domain: print('Domain: %s' % vm_domain.name) print('Zone: %s' % vm_zone.name) print('Subnet: %s - %s/%s' % (vm_subnet.name, vm_subnet.address, vm_subnet.netmask)) else: print('Domain: %s - %s/%s' % (vm_domain.name, vm_domain.address, vm_domain.netmask)) if vm_ip: print('IP: {0:s}'.format(vm_ip)) if vm_policy_group: print('Policy group: {0:s}'.format(vm_policy_group.name)) print(80 * '-') print('Please select your redirection target:') index = 0 all_rt = vm_domain.redirection_targets.get() print('0. None') for cur_rt in all_rt: print('%s. %s' % (index + 1, cur_rt.name)) index += 1 while vm_redirection_target is None: choice = raw_input( 'Please enter the number of the redirection target [0-%s]: ' % len(all_rt)) choice = int(choice) if choice == 0: vm_redirection_target = None break elif choice > 0 and choice - 1 < len(all_rt): vm_redirection_target = all_rt[choice - 1] break print('Invalid choice, please try again') logging.info('Using following Nuage values:') logging.info('Enterprise: %s' % vm_enterprise.name) logging.info('User: %s' % vm_user.user_name) if not vm_is_l2domain: logging.info('Domain: %s' % vm_domain.name) logging.info('Zone: %s' % vm_zone.name) logging.info( 'Subnet: %s - %s/%s' % (vm_subnet.name, vm_subnet.address, vm_subnet.netmask)) else: logging.info( 'Domain: %s - %s/%s' % (vm_domain.name, vm_domain.address, vm_domain.netmask)) if vm_ip: logging.info('Static IP: %s' % vm_ip) if vm_policy_group: logging.info('Policy group: {0:s}'.format(vm_policy_group.name)) if vm_redirection_target: logging.info('Redirection target: {0:s}'.format( vm_redirection_target.name)) clear() if mode == 'metadata': print('Setting Nuage Metadata on VM') # Setting Nuage metadata logging.info('Setting Nuage Metadata') vm_option_values = [] # Enterprise vm_option_values.append( vim.option.OptionValue(key='nuage.enterprise', value=vm_enterprise.name)) if vm_is_l2domain: # L2 Domain vm_option_values.append( vim.option.OptionValue(key='nuage.nic0.l2domain', value=vm_domain.name)) else: # Domain vm_option_values.append( vim.option.OptionValue(key='nuage.nic0.domain', value=vm_domain.name)) # Zone vm_option_values.append( vim.option.OptionValue(key='nuage.nic0.zone', value=vm_zone.name)) # Subnet vm_option_values.append( vim.option.OptionValue(key='nuage.nic0.network', value=vm_subnet.name)) # Network type vm_option_values.append( vim.option.OptionValue(key='nuage.nic0.networktype', value='ipv4')) # User vm_option_values.append( vim.option.OptionValue(key='nuage.user', value=vm_user.user_name)) # IP if vm_ip: vm_option_values.append( vim.option.OptionValue(key='nuage.nic0.ip', value=vm_ip)) # Policy group if vm_policy_group: vm_option_values.append( vim.option.OptionValue(key='nuage.nic0.policy-group', value=vm_policy_group.name)) # Redirection target if vm_redirection_target: vm_option_values.append( vim.option.OptionValue(key='nuage.nic0.redirection-target', value=vm_redirection_target.name)) logging.debug('Creating of config spec for VM') config_spec = vim.vm.ConfigSpec(extraConfig=vm_option_values) logging.info( 'Applying advanced parameters. This might take a couple of seconds' ) config_task = vcenter_vm.ReconfigVM_Task(spec=config_spec) logging.debug('Waiting for the advanced paramerter to be applied') while True: info = config_task.info if info.state == vim.TaskInfo.State.success: logging.debug('Advanced parameters applied') break elif info.state == vim.TaskInfo.State.error: if info.error.fault: logging.info( 'Applying advanced parameters has quit with error: %s' % info.error.fault.faultMessage) else: logging.info( 'Applying advanced parameters has quit with cancelation' ) break sleep(5) elif mode == 'split-activation': print('Creating vPort and VM in VSD for split activation') logging.debug('Starting split activation') # Getting VM UUID logging.debug('Getting VM UUID, MAC & IP') vcenter_vm_uuid = vcenter_vm.config.uuid logging.debug('Found UUID %s for VM %s' % (vcenter_vm_uuid, vcenter_vm.name)) vcenter_vm_mac = None vcenter_vm_hw = vcenter_vm.config.hardware for dev in vcenter_vm_hw.device: if isinstance(dev, vim.vm.device.VirtualEthernetCard): if dev.macAddress: logging.debug('Found MAC {0:s} for VM {1:s}'.format( dev.macAddress, vcenter_vm.name)) vcenter_vm_mac = dev.macAddress break if vcenter_vm_mac is None: logging.critical('Unable to find a valid mac address for VM') return 1 # Creating vPort logging.debug('Creating vPort for VM %s' % vcenter_vm.name) nc_vport = vsdk.NUVPort( name='{0:s}-vport'.format(vcenter_vm.name), address_spoofing='INHERITED', type='VM', description='Automatically created, do not edit.') if vm_is_l2domain: vm_domain.create_child(nc_vport) else: vm_subnet.create_child(nc_vport) # Creating VM logging.debug('Creating a Nuage VM for VM %s' % vcenter_vm) nc_vm = vsdk.NUVM(name=vcenter_vm.name, uuid=vcenter_vm_uuid, interfaces=[{ 'name': vcenter_vm_mac, 'VPortID': nc_vport.id, 'MAC': vcenter_vm_mac }]) nc.user.create_child(nc_vm) else: logging.critical('Invalid mode') return 1 except vmodl.MethodFault, e: logging.critical('Caught vmodl fault: {0:s}'.format(e.msg)) return 1
supportedArgs = [ (["h:", "host="], "localhost", "Host name", "host"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "ca$hc0w", "Password", "pwd"), (["d:", "dsName="], "datastore-1", "Datastore Name", "dsName")] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage") ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Connect si = SmartConnect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) atexit.register(Disconnect, si) dsName = args.GetKeyValue("dsName") vsomgr = si.RetrieveContent().vStorageObjectManager if not vsomgr: raise Exception("FCD feature is disabled") def GetDatastore(si, dsName): hs = host.GetHostSystem(si) datastores = hs.GetDatastore() for ds in datastores: if ds.name == dsName: return ds
help="remote host to connect to") parser.add_option("-u", "--user", default="root", help="User name to use when connecting to hostd") parser.add_option("-p", "--password", default="", help="Password to use when connecting to hostd") parser.add_option('--list', dest='list', default=False, action='store_true', help='List the available services') parser.add_option("--start", default=None, help="Start the specified service") parser.add_option("--stop", default=None, help="Start the specified service") parser.add_option("--restart", default=None, help="Start the specified service") (options, args) = parser.parse_args() serviceInstance = SmartConnect(host=options.host, user=options.user, pwd=options.password, preferredApiVersions=[ 'vim.version.version9' ]) serviceSystem = GetHostConfigManager(serviceInstance).serviceSystem; def ListServices(serviceSystem): maxLabelWidth = max([len(s.label) for s in serviceSystem.serviceInfo.service]) maxKeyWidth = max([len(s.key) for s in serviceSystem.serviceInfo.service]) print('%-*s %-*s %s' % (maxLabelWidth, 'Label', maxKeyWidth, 'Key', ' Status ')) print('%-*s %-*s %s' % (maxLabelWidth, '-----', maxKeyWidth, '---', ' ------ ')) for service in serviceSystem.serviceInfo.service: print('%-*s %-*s %s' % (maxLabelWidth, service.label, maxKeyWidth, service.key, service.running and 'Running' or 'Stopped')) if options.list: ListServices(serviceSystem)
def connect(self, hyper, name): """ Connects to a Hypervisor """ ignore_dead_node = Common.get_config_value('ignore-dead-node') _hyper = Common.LOCAL['hypervisor'][hyper]['device'] _ip = Common.GLOBAL['device'][_hyper]['ip'] _type = Common.GLOBAL['device'][_hyper]['type'] _access_tmpl = Common.GLOBAL['access-template'][_type] _access = _access_tmpl['access'] _auth_type = _access_tmpl['auth'] _profile = _access_tmpl['profile'] _auth = Common.GLOBAL['auth'][_auth_type][_profile] _driver = None try: channel_info = {} if _access == 'vmware': # register information id = self._max_id + 1 conn = SmartConnect(host=_ip, user=_auth['user'], pwd=_auth['pass'], sslContext=self._ssl_context) ssh_id = self._ssh_lib.open_connection(_ip, alias=name + '_ssh', term_type='vt100') output = self._ssh_lib.login(_auth['user'], _auth['pass']) self._ssh_lib.write('unalias -a') self._ssh_lib.read_until_regexp(self._ssh_prompt) result_folder = Common.get_result_path() log_file = name + '_ssh.log' _logger = codecs.open(result_folder + "/" + log_file, 'w', 'utf-8') _logger.write(output) # atexit.register(Disconnect,conn) # channel_info['id'] = id channel_info['ip'] = _ip channel_info['type'] = _type # channel_info['access-type'] = 'vmware-esxi' channel_info['connection'] = conn channel_info['ssh'] = self._ssh_lib channel_info['ssh_logger'] = _logger channel_info['capture_counter'] = 0 channel_info['capture_format'] = 'vmware_%010d' self._max_id = id self._current_id = id self._current_name = name self._channels[name] = channel_info except Exception as err: if ignore_dead_node: msg = 'WARN: Error when connecting to `%s(%s)` bug ignored' % ( name, _ip) BuiltIn().log(msg) else: msg = 'ERROR: Error when connecting to `%s(%s)` bug ignored' % ( name, _ip) BuiltIn().log(msg) raise BuiltIn().log('Connected to `%s(%s)`' % (name, _ip)) return id
def main(): global status supportedArgs = [ (["H:", "host="], "localhost", "Host name", "host"), (["k:", "keep="], "0", "Keep configs", "keep"), (["e:", "useExisting="], False, "Use existing VM", "useExistingVm"), (["u:", "user="******"root", "User name", "user"), (["p:", "pwd="], "", "Password", "pwd"), (["v:", "VM name="], "vmotionErrs", "Name of the virtual machine", "vmname"), (["d:", "dsName="], None, "Target datastore for storage VMotions", "dsName"), (["i:", "numiter="], "1", "Number of iterations", "iter"), ] supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"), ] args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles) if args.GetKeyValue("usage") == True: args.Usage() sys.exit(0) # Process command line host = args.GetKeyValue("host") vmname = args.GetKeyValue("vmname") numiter = int(args.GetKeyValue("iter")) keep = int(args.GetKeyValue("keep")) useExistingVm = bool(args.GetKeyValue("useExistingVm")) dsName = args.GetKeyValue("dsName") for i in range(numiter): si = None try: # Connect to host si = SmartConnect(host=host, user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) Log("Connected to Host") dsName = GetDatastore(si, dsName) Log('Using datastore: %s' % dsName) # Cleanup from previous runs if not useExistingVm: CleanupVm(vmname) CleanDirs(si, dsName, vmname) # Create new VM connect.SetSi(si) theVm = None if useExistingVm: theVm = folder.Find(vmname) if theVm == None: raise Exception("No VM with name %s found!" % vmname) Log("Using VM %s" % vmname) else: Log("Creating VM %s" % vmname) # Short delay to avoid colliding with a cleanup. time.sleep(1) # XXX numScsiDisks=0. vm.Migrate() doesn't know about # xvmotion, so it fails to set up the disk copy spec theVm = vm.CreateQuickDummy(vmname, guest="winXPProGuest", memory=4, cdrom=0, numScsiDisks=0, datastoreName=dsName) if theVm.GetRuntime().GetPowerState() == PowerState.poweredOff: Log("Powering on VM.") vm.PowerOn(theVm) else: Log("VM already powered on.") srcDir = os.path.dirname(theVm.config.files.vmPathName) tests = [ TestVmxFailTo, TestVmxFailToEvent, TestVmxFailInit, TestVmxFailFrom, TestVmxFailPrepareDst, TestVmxFailStart, TestVmxSuccess, # Must be last: does not switch back to source ] for testClass in tests: Log("Creating dummy dest") dstPath = os.path.join(srcDir, testClass.name, vmname + ".vmx") dummySpec = vm.CreateQuickDummySpec(vmname, guest="winXPProGuest", memory=4, cdrom=0, numScsiDisks=0, datastoreName=dsName) dummySpec.GetFiles().SetVmPathName(dstPath) dstVm = vm.CreateFromSpec(dummySpec) # Core of running an individual test theTest = testClass(theVm, dstVm) theTest.setup() Log("Attempt to vMotion with test '%s'" % str(theTest)) try: vm.Migrate(theVm, si, si, vmotionType='vmotion', unsharedSwap=True, dstPath=dstPath) except Exception as e: theTest.gotFailure(e) else: theTest.gotSuccess() theTest.cleanup() if not useExistingVm: CleanupVm(vmname) except Exception as e: Log("Caught exception : %s" % e) excType, excValue, excTB = sys.exc_info() stackTrace = " ".join( traceback.format_exception(excType, excValue, excTB)) Log(stackTrace) status = "FAIL" Disconnect(si) if IsTestEsx(): ReportFiles('/vmfs/volumes/%s/%s' % (dsName, vmname)) return if status == "PASS" and IsTestEsx( ) and 'test-esx-vmotionErrs' in vmname: CleanDirs(si, dsName, vmname) Disconnect(si)
def _get_instances(self, inkwargs): ''' Make API calls ''' instances = [] try: si = SmartConnect(**inkwargs) except ssl.SSLError as connection_error: if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str( connection_error) and self.validate_certs: sys.exit("Unable to connect to ESXi server due to %s, " "please specify validate_certs=False and try again" % connection_error) except Exception as exc: self.debugl("Unable to connect to ESXi server due to %s" % exc) sys.exit("Unable to connect to ESXi server due to %s" % exc) self.debugl('retrieving all instances') if not si: sys.exit("Could not connect to the specified host using specified " "username and password") atexit.register(Disconnect, si) content = si.RetrieveContent() # Create a search container for virtualmachines self.debugl('creating containerview for virtualmachines') container = content.rootFolder viewType = [vim.VirtualMachine] recursive = True containerView = content.viewManager.CreateContainerView( container, viewType, recursive) children = containerView.view for child in children: # If requested, limit the total number of instances if self.args.max_instances: if len(instances) >= self.args.max_instances: break instances.append(child) self.debugl("%s total instances in container view" % len(instances)) if self.args.host: instances = [x for x in instances if x.name == self.args.host] instance_tuples = [] for instance in sorted(instances): if self.guest_props: ifacts = self.facts_from_proplist(instance) else: ifacts = self.facts_from_vobj(instance) instance_tuples.append((instance, ifacts)) self.debugl('facts collected for all instances') try: cfm = content.customFieldsManager if cfm is not None and cfm.field: for f in cfm.field: if f.managedObjectType == vim.VirtualMachine: self.custom_fields[f.key] = f.name self.debugl('%d custom fields collected' % len(self.custom_fields)) except vmodl.RuntimeFault as exc: self.debugl("Unable to gather custom fields due to %s" % exc.msg) except IndexError as exc: self.debugl("Unable to gather custom fields due to %s" % exc) return instance_tuples
from pyVim.connect import SmartConnect, Disconnect from pyVmomi import vim import ssl import subprocess argvs = sys.argv HV_NAME = argvs[1] VSPHERE_HOST = argvs[2] ZABBIX_SERVER = 'Zabbixサーバホスト名またはIPアドレス' VSPHERE_ID = 'vSphereユーザ名' VSPHERE_PASSWORD = '******' ZABBIX_SENDER = 'zabbix_senderパス' context = ssl._create_unverified_context() si = SmartConnect(host=VSPHERE_HOST, user=VSPHERE_ID, pwd=VSPHERE_PASSWORD, port=443, sslContext=context) content = si.RetrieveContent() for child in content.rootFolder.childEntity: if hasattr(child, 'vmFolder'): datacenter = child hostFolder = datacenter.hostFolder entityList = hostFolder.childEntity for entity in entityList: for host in entity.host: for vm in host.vm: cmd = "{} -z {} -s {} -k hv.hostname -o {} > /dev/null".format( ZABBIX_SENDER, ZABBIX_SERVER, vm.name, HV_NAME)
def main(): """ Main function to handle vcenter vm os and the mapping to a policy group """ # Handling arguments args = get_args() clusters = [] if args.clusters: clusters = args.clusters debug = args.debug log_file = None if args.logfile: log_file = args.logfile mapping_file = args.mapping_file nuage_enterprise = args.nuage_enterprise nuage_host = args.nuage_host nuage_port = args.nuage_port nuage_password = None if args.nuage_password: nuage_password = args.nuage_password nuage_username = args.nuage_username remove_policygroups = args.remove_policygroups nosslcheck = args.nosslcheck verbose = args.verbose vcenter_host = args.vcenter_host vcenter_https_port = args.vcenter_https_port vcenter_password = None if args.vcenter_password: vcenter_password = args.vcenter_password vcenter_username = args.vcenter_username # Logging settings if debug: log_level = logging.DEBUG elif verbose: log_level = logging.INFO else: log_level = logging.WARNING logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level) logger = logging.getLogger(__name__) # Getting user password for Nuage connection if nuage_password is None: logger.debug( 'No command line Nuage password received, requesting Nuage password from user' ) nuage_password = getpass.getpass( prompt='Enter password for Nuage host {0:s} for user {1:s}: '. format(nuage_host, nuage_username)) # Getting user password for vCenter connection if vcenter_password is None: logger.debug( 'No command line vCenter password received, requesting vCenter password from user' ) vcenter_password = getpass.getpass( prompt='Enter password for vCenter host {0:s} for user {1:s}: '. format(vcenter_host, vcenter_username)) try: vc = None nc = None # Connecting to Nuage try: logger.info( 'Connecting to Nuage server {0:s}:{1:d} with username {2:s}'. format(nuage_host, nuage_port, nuage_username)) nc = vsdk.NUVSDSession(username=nuage_username, password=nuage_password, enterprise=nuage_enterprise, api_url="https://{0:s}:{1:d}".format( nuage_host, nuage_port)) nc.start() except IOError: pass if not nc or not nc.is_current_session(): logger.error( 'Could not connect to Nuage host {0:s} with user {1:s} and specified password' .format(nuage_host, nuage_username)) return 1 # Connecting to vCenter try: logger.info( 'Connecting to vCenter server {0:s}:{1:d} with username {2:s}'. format(vcenter_host, vcenter_https_port, vcenter_username)) if nosslcheck: vc = SmartConnectNoSSL(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port)) else: vc = SmartConnect(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port)) except IOError: pass if not vc: logger.error( 'Could not connect to vCenter host {0:s} with user {1:s} and specified password' .format(vcenter_host, vcenter_username)) return 1 logger.debug('Registering vCenter disconnect at exit') atexit.register(Disconnect, vc) logger.info('Connected to both Nuage & vCenter servers') except vmodl.MethodFault as e: logger.critical('Caught vmodl fault: {0:s}'.format(e.msg)) return 1 # CSV Handling if not os.path.isfile(mapping_file): logger.critical( 'Mapping file {0:s} does not exist, exiting'.format(mapping_file)) return 1 mapping_list = {} # CSV fields: # vCenter VM name regex, Policy group logger.debug('Parsing mapping file {0:s}'.format(mapping_file)) with open(mapping_file, 'rb') as maplist: mapping_list_raw = csv.reader(maplist, delimiter=',', quotechar='"') for row in mapping_list_raw: logger.debug('Found CSV row: {0:s}'.format(','.join(row))) mapping_list[row[0]] = row[1] # Getting clusters in the current vCenter logger.debug( 'Gathering all Clusters from the vCenter {0:s}'.format(vcenter_host)) content = vc.content obj_view = content.viewManager.CreateContainerView( content.rootFolder, [vim.ClusterComputeResource], True) vc_cl_list = obj_view.view obj_view.Destroy() for vc_cl in vc_cl_list: if vc_cl.name not in clusters: continue # Getting VMs in the current vCenter Cluster logger.debug('Gathering all VMs from the vCenter Cluster {0:s}'.format( vc_cl.name)) obj_view = content.viewManager.CreateContainerView( vc_cl, [vim.VirtualMachine], True) vc_vm_list = obj_view.view obj_view.Destroy() for vc_vm in vc_vm_list: # Verifying if VM matches a regex in the list logger.debug('Found VM {0:s}, checking'.format(vc_vm.name)) # If the VM is a template skip it if vc_vm.config.template: logger.debug('VM {0:s} is a template, skipping'.format( vc_vm.name)) continue # Getting VM info nc_vm_properties = {} vc_vm_nuage_enterprise = next((x for x in vc_vm.config.extraConfig if x.key == 'nuage.enterprise'), None) vc_vm_nuage_domain = next((x for x in vc_vm.config.extraConfig if x.key == 'nuage.nic0.domain'), None) vc_vm_nuage_l2domain = next((x for x in vc_vm.config.extraConfig if x.key == 'nuage.nic0.l2domain'), None) vc_vm_nuage_zone = next((x for x in vc_vm.config.extraConfig if x.key == 'nuage.nic0.zone'), None) vc_vm_nuage_network = next((x for x in vc_vm.config.extraConfig if x.key == 'nuage.nic0.network'), None) # Check if all the settings for an L3 domain are present if vc_vm_nuage_enterprise is None or vc_vm_nuage_domain is None or vc_vm_nuage_zone is None or vc_vm_nuage_network is None: # Check if it is an L2 domain if vc_vm_nuage_enterprise is None or vc_vm_nuage_l2domain is None: logger.info( 'VM {0:s} has no correct Nuage metadata set, assuming it is not a VM connected through Nuage and skipping it.' .format(vc_vm.name)) continue nc_vm_properties['name'] = vc_vm.name nc_vm_properties['os'] = vc_vm.config.guestFullName nc_vm_properties['nuage.enterprise'] = vc_vm_nuage_enterprise.value # If domain is not set, it is an l2 domain if vc_vm_nuage_domain is not None: nc_vm_properties['nuage.domain'] = vc_vm_nuage_domain.value nc_vm_properties['nuage.l2domain'] = None nc_vm_domain_name = vc_vm_nuage_domain.value else: nc_vm_properties['nuage.domain'] = None nc_vm_properties['nuage.l2domain'] = vc_vm_nuage_l2domain.value nc_vm_domain_name = vc_vm_nuage_l2domain.value if vc_vm_nuage_zone is not None: nc_vm_properties['nuage.zone'] = vc_vm_nuage_zone.value else: nc_vm_properties['nuage.zone'] = None if vc_vm_nuage_network is not None: nc_vm_properties['nuage.network'] = vc_vm_nuage_network.value else: nc_vm_properties['nuage.network'] = None logger.debug( 'VM {0:s} with OS {1:s} has following Nuage settings: Enterprise {2:s}, Domain {3:s}, Zone {4:s}, Subnet {5:s}' .format(nc_vm_properties['name'], nc_vm_properties['os'], nc_vm_properties['nuage.enterprise'], nc_vm_domain_name, nc_vm_properties['nuage.zone'], nc_vm_properties['nuage.network'])) # Getting VM MAC vc_vm_nic = next( (x for x in vc_vm.config.hardware.device if isinstance(x, vim.vm.device.VirtualEthernetCard)), None) if vc_vm_nic is None: logger.error( 'VM {0:s} has no valid network interfaces, skipping it'. format(nc_vm_properties['name'])) continue nc_vm_properties['mac'] = vc_vm_nic.macAddress logger.debug('VM {0:s} has MAC {1:s}'.format( nc_vm_properties['name'], nc_vm_properties['mac'])) # Getting Nuage vport for this VM nc_vm_properties['vm_interface'] = nc.user.vm_interfaces.get_first( filter="MAC == '{0:s}'".format(nc_vm_properties['mac'])) if nc_vm_properties['vm_interface'] is None: logger.error( 'VM {0:s} with MAC address {1:s} is not known in Nuage, skipping it' .format(nc_vm_properties['name'], nc_vm_properties['mac'])) continue # Getting Nuage vport for this VM nc_vm_properties['vport'] = vsdk.NUVPort( id=nc_vm_properties['vm_interface'].vport_id) try: nc_vm_properties['vport'].fetch() except BambouHTTPError: logger.error( 'VM {0:s} with MAC address {1:s} has a vm_interface but no vport in Nuage, this should not be possible... Skipping it' .format(nc_vm_properties['name'], nc_vm_properties['mac'])) continue logger.debug( 'Found vm_interface and vport for VM {0:s} with MAC address {1:s}' .format(nc_vm_properties['name'], nc_vm_properties['mac'])) # Checking regex's on VMs nc_vm_pgs = [] for regex in list(mapping_list.keys()): logger.debug( 'Checking regex "{0:s}" on VM {1:s} with OS {2:s}'.format( regex, nc_vm_properties['name'], nc_vm_properties['os'])) pattern = re.compile(regex) if pattern.match(nc_vm_properties['os']): logger.debug( 'Found match: regex "{0:s}" and VM OS "{1:s}", adding to the task list to hand over to Nuage.' .format(regex, nc_vm_properties['os'])) nc_vm_pgs.append(mapping_list[regex]) if len(nc_vm_pgs) > 0: logger.debug( 'Handing task over to Nuage part to set {0:d} Policy Groups on VM {1:s}' .format(len(nc_vm_pgs), nc_vm_properties['name'])) update_nuage_policy_group( logger=logger, nc=nc, nc_vm_properties=nc_vm_properties, nc_vm_pgs=nc_vm_pgs, remove_policygroups=remove_policygroups) logger.info('All done!') return 0