def test_retrieve_various(self): si = SmartConnect(host=self.options.host, user=self.options.user, pwd=self.options.pwd, port=self.options.port) Disconnect(si) try: internalContent = si.RetrieveInternalContent() content = si.RetrieveContent() except Exception as e: self.fail("Unexpected exception for anon-allowable calls: %s" % e) pass try: uptime = self.hostSystem.RetrieveHardwareUptime() except Vim.Fault.NotAuthenticated as e: Log("Caught NotAuthenticated exception: %s" % e) pass
def init(hostname, user, passwd, vmname, vmxpath, guestuser, guestpwd, guestrootuser, guestrootpassword, powerOn=True, getIntCont=False): # Connect and get the Service Instance. # Make sure we get the proper version (dev). svcInst = SmartConnect(host=hostname, user=user, pwd=passwd) svcInstIntCont = "" if getIntCont: svcInstIntCont = svcInst.RetrieveInternalContent() # Find the vm if it's there. virtualMachine = folder.Find(vmname) # if it's not there, maybe we just rebooted and it lost its config, # so try to register and re-find. if virtualMachine == None: Log("Registering " + vmxpath) folder.Register(vmxpath) virtualMachine = folder.Find(vmname) # set up a guest auth object with root privs guestAdminAuth = "" if guestrootuser != "": guestAdminAuth = npAuth(username=guestrootuser, password=guestrootpassword, interactiveSession=False) # set up a guest auth object (good and bad) guestAuth = npAuth(username=guestuser, password=guestpwd, interactiveSession=False) guestAuthBad = npAuth(username="******", password="******", interactiveSession=False) # power on the VM if needed if powerOn and virtualMachine.GetRuntime().GetPowerState() != Vim.VirtualMachine.PowerState.poweredOn: Log("Powering on") vm.PowerOn(virtualMachine) if not getIntCont: globs = [svcInst, virtualMachine, guestAdminAuth, guestAuth, guestAuthBad] else: globs = [svcInst, svcInstIntCont, virtualMachine, guestAdminAuth, guestAuth, guestAuthBad] return globs
class TestHostProfileEngine(unittest.TestCase): """Tests most of the interesting commands for the host profile engine. """ def setOptions(self, options): """ Command line options """ self.options = options def cleanupTestDatastore(self): hostView = self.si.content.viewManager.CreateContainerView( self.si.content.rootFolder, [Vim.HostSystem], True) if len(hostView.view) != 1: raise Exception('Unable to find host to clean up NAS datastores') datastoreSystem = hostView.view[0].configManager.datastoreSystem for ds in datastoreSystem.datastore: if ds.summary.name == TEST_DATASTORE_NAME: datastoreSystem.RemoveDatastore(ds) break def setUp(self): """ Setting test suite """ options = get_options() self.si = SmartConnect(host=self.options.host, user=self.options.user, preferredApiVersions=self.options.vimversion, pwd=self.options.password) # Make sure that the test datastore is not on the system in case a # previous test run had failed and left the test datastore on the system self.cleanupTestDatastore() internalContent = self.si.RetrieveInternalContent() hostProfileEngine = internalContent.hostProfileEngine self.hostProfileManager = hostProfileEngine.hostProfileManager self.hostComplianceManager = hostProfileEngine.hostComplianceManager self.answerFileData = None # Make sure that hostd's cache is valid. This is needed in cases where # a user has made changes using localcli, esxcfg, or DCUI that have not # propagated to hostd. self.refreshHostdCache() def tearDown(self): """ Reset test suite """ # The test datastore may not have been deleted if there was an error in # the middle of the test. Make sure it gets cleaned up. self.cleanupTestDatastore() Disconnect(self.si) def refreshHostdCache(self): """Refresh hostd's cache by doing a no-op Apply operation. """ # Build an empty config spec and apply configSpec = Vim.Host.ConfigSpec() applyTask = self.hostProfileManager.ApplyHostConfig(configSpec) taskTimeout = 2.5 * 60 # 2.5 mins waitPeriod = 1 # Check the task completed or if we hit the timeout while applyTask.info.progress != 100 and taskTimeout != 0: time.sleep(waitPeriod) taskTimeout -= waitPeriod def runTest(self): """Tests most of the interesting commands for the host profile engine. """ self.test_extractProfile() self.test_createDefaultProfile() self.test_bookKeep() self.test_generateAnswerFile() self.test_checkCompliance() self.test_generateConfigTasks() self.test_applyConfigTasks() self.restoreOriginalConfig() def _copyHostProfile(self, hostProfile): """Helper function that makes a copy of the host profile document. """ # TBD: Is there a better way than serializing/deserializing? serializedProf = SoapAdapter.Serialize( hostProfile, version=newestVersions.get('vim')) deserializedProf = SoapAdapter.Deserialize(serializedProf) return deserializedProf def test_extractProfile(self): """Performs a RetrieveProfile() operation and saves the results. """ # Let's do it twice: once to capture the original configuration and once # to create a new profile that can be modified and applied. self.origConfigProfile = self.hostProfileManager.RetrieveProfile() self.testProfile = self._copyHostProfile(self.origConfigProfile) # Make sure that the basic components are there self.failIf(self.testProfile is None or \ not hasattr(self.testProfile, 'applyProfile') or \ self.testProfile.applyProfile is None or \ not hasattr(self.testProfile, 'defaultComplyProfile') or \ self.testProfile.defaultComplyProfile is None, 'Invalid host profile extracted from host') applyProfile = self.testProfile.applyProfile # Now make sure that at least the Storage and NAS profiles are there. self.failIf(not hasattr(applyProfile, 'storage') or \ applyProfile.storage is None, 'Extracted host profile missing Storage profile') storageProfile = applyProfile.storage self.failIf(not hasattr(storageProfile, 'nasStorage') or \ storageProfile.nasStorage is None, 'Extracted host profile missing NAS Storage profile') def test_createDefaultProfile(self): """Performs a CreateDefaultProfile() operation to create a NFS datastore in the test profile. """ newNasDatastore = self.hostProfileManager.CreateDefaultProfile( profileType=Vim.Profile.Host.NasStorageProfile, profileTypeName=None) self.failIf(newNasDatastore is None, 'Failed to create new NAS datastore profile instance') # Set the parameters for the Nas datastore. Pick a NFS host that # everyone should have access to, but a remotePath that probably # no one has mounted. for param in newNasDatastore.policy[0].policyOption.parameter: if param.key == 'localPath': param.value = TEST_DATASTORE_NAME elif param.key == 'remoteHost': param.value = 'build-toolchain.eng.vmware.com' elif param.key == 'remotePath': param.value = '/toolchain/lin32/python-2.5/man' # Save it in the testProfile self.testProfile.applyProfile.storage.nasStorage.append( newNasDatastore) def test_bookKeep(self): """Performs a BookKeep() operation on the test profile. """ verifiedProfile = self.hostProfileManager.BookKeep(self.testProfile) defComplyProf = self.hostComplianceManager.GetDefaultCompliance( verifiedProfile.applyProfile) defComplyProf.applyProfile = verifiedProfile.applyProfile self.testProfile = defComplyProf def test_checkCompliance(self, expectedNonCompliant=True): """Checks compliance with the test profile. """ complyResult = self.hostComplianceManager.CheckHostCompliance( self.testProfile, deferredParam=self.answerFileData) if expectedNonCompliant: self.failUnless( complyResult.complianceStatus == 'nonCompliant', 'Unexpected compliance result: ' + str(complyResult)) else: self.failUnless( complyResult.complianceStatus == 'compliant', 'Unexpected compliance result: ' + str(complyResult)) def test_generateAnswerFile(self): """Generates a default answer file to be used with the checkCompliance and generateConfigTasks tests. """ execRes = self.hostProfileManager.Execute( self.testProfile.applyProfile) if execRes.status == 'needInput': self.answerFileData = execRes.requireInput else: self.failIf( execRes.status != 'success', 'Unexpected Execute result in generateAnswerFile: ' + str(execRes)) def test_generateConfigTasks(self): """Generates the config spec for applying the test profile. """ execRes = self.hostProfileManager.Execute( self.testProfile.applyProfile, deferredParam=self.answerFileData) self.failIf( execRes.status != 'success', 'Unexpected Execute result in generateConfigTasks: ' + str(execRes)) updatedConfigSpec = self.hostProfileManager.UpdateTaskConfigSpec( configSpec=execRes.configSpec) foundNfsTask = False for task in updatedConfigSpec.taskDescription: if 'hpTestNfsDatastore' in task.message: foundNfsTask = True break self.failIf( foundNfsTask == False, 'Failed to find task data and message for new NAS datastore') # Save the part needed for ApplyConfigTasks self.configSpec = updatedConfigSpec.configSpec def _waitForTask(self, task): """Helper method that waits for a task to complete. """ while task.info.state == 'running': time.sleep(1) def test_applyConfigTasks(self): """Applies the config spec created for the test profile. """ task = self.hostProfileManager.ApplyHostConfig( configSpec=self.configSpec) while task.info.state == 'running': time.sleep(1) self.failIf(task.info.state != 'success', 'Apply task failed:\n' + str(task.info)) # Check compliance again. Should be compliant this time. self.test_checkCompliance(expectedNonCompliant=False) def restoreOriginalConfig(self): """Restores the original configuration according to the host profile document collected at the beginning of the test. """ # The simplest way to implement this is to replace the testProfile with # the original profile and then re-run the test_generateConfigTasks() # and test_applyConfigTasks. self.testProfile = self.origConfigProfile self.test_generateConfigTasks() self.test_applyConfigTasks()
class TestAnon(unittest.TestCase): def setOptions(self, options): """ Command line options """ self.options = options def setUp(self): """ Setting test suite """ self.si = SmartConnect(host=self.options.host, user=self.options.user, pwd=self.options.pwd, port=self.options.port) self.authenticated = 1 print("Retriving some MOs while authenticated:") self.content = self.si.RetrieveContent() rootFolder = self.content.GetRootFolder() dataCenter = rootFolder.GetChildEntity()[0] hostFolder = dataCenter.hostFolder host = hostFolder.childEntity[0] self.hostSystem = host.host[0] self.vmFolder = dataCenter.vmFolder self.configManager = self.hostSystem.GetConfigManager() internalContent = self.si.RetrieveInternalContent() hostProfileEngine = internalContent.hostProfileEngine self.hostProfileManager = hostProfileEngine.hostProfileManager self.storageResourceManager = self.content.storageResourceManager self.dvsManager = self.si.RetrieveInternalContent().hostDistributedVirtualSwitchManager print(repr(self.hostProfileManager)) print(repr(self.storageResourceManager)) print(repr(self.dvsManager)) print(repr(self.vmFolder)) Disconnect(self.si) self.authenticated = 0 self.si = None def tearDown(self): if self.authenticated: Disconnect(self.si) def test_task_methods_with_null_arguments(self): # todo : test non anonymous methods with null arguments pass def test_retrieve_various(self): si = SmartConnect(host=self.options.host, user=self.options.user, pwd=self.options.pwd, port=self.options.port) Disconnect(si) try: internalContent = si.RetrieveInternalContent() content = si.RetrieveContent() except Exception as e: self.fail("Unexpected exception for anon-allowable calls: %s" % e) pass try: uptime = self.hostSystem.RetrieveHardwareUptime() except Vim.Fault.NotAuthenticated as e: Log("Caught NotAuthenticated exception: %s" % e) pass def test_invalid_login(self): print("test_invalid_login") for i in xrange(self.options.num_bad_logins): try: print("iteration: " + repr(i)) bad_pwd = self.options.pwd + "JUNK" si = SmartConnect(host=self.options.host, user=self.options.user, pwd=bad_pwd, port=self.options.port) Disconnect(si) except Vim.Fault.InvalidLogin as e: Log("Caught InvalidLogin exception") pass else: self.fail('InvalidLogin not thrown') def runTest(self): self.test_invalid_login() self.test_retrieve_various()
class VvolDs: def __init__(self, args): # Connect self._si = SmartConnect(host=args.GetKeyValue("host"), user=args.GetKeyValue("user"), pwd=args.GetKeyValue("pwd")) self._vc=args.GetKeyValue("host") self._user=args.GetKeyValue("user") self._pwd=args.GetKeyValue("pwd") self._vpuser=args.GetKeyValue("vpuser") self._vppwd=args.GetKeyValue("vppwd") self._nfs=args.GetKeyValue("nfs") self._path=args.GetKeyValue("mount") self._pe=args.GetKeyValue("pe") self._vasaMgr = self._si.RetrieveInternalContent().GetVasaManager() self.PopulateHosts() self.PopulateSmsInfo() def __del__(self): Disconnect(self._si) def banner(self, fn): VerboseLog(logInfo," " + fn.__name__ + " ", msgend='') def PrintExistingDs(self): for ds in self._host.GetConfigManager().GetDatastoreSystem().datastore: VerboseLog(logTrivia, ds.name) # Create a datastore on a host def CreateDs(self, spec): try: VerboseLog(logTrivia, "{ Creating: " + spec.GetScId()) dsId = self._host.GetConfigManager().GetDatastoreSystem().CreateVvolDatastore(spec) return dsId except: #print e #traceback.print_exc(file=sys.stdout) raise finally: VerboseLog(logTrivia, "}") # Remove a datastore from host def removeDs(self, ds): try: VerboseLog(logTrivia, "{ Removing: " + ds.name) self._host.GetConfigManager().GetDatastoreSystem().RemoveDatastore(ds) except: #print e #traceback.print_exc(file=sys.stdout) raise finally: VerboseLog(logTrivia, "}") # Add NFS PE to all the hosts in the VC def addPE(self): try: VerboseLog(logTrivia, "{ Adding PE: " + self._nfs) spec = Vim.Host.NasVolume.Specification() spec.SetRemoteHost(self._nfs) spec.SetRemotePath(self._path) spec.SetLocalPath(self._pe) spec.SetAccessMode("readWrite") for host in self._hosts: VerboseLog(logTrivia, host) try: host.GetConfigManager().GetDatastoreSystem().CreateNasDatastore(spec) except Vim.Fault.DuplicateName: pass; except Vim.Fault.AlreadyExists: pass; except Vim.Fault.DuplicateName: pass; except Vim.Fault.AlreadyExists: pass; except : traceback.print_exc(file=sys.stdout) VerboseLog(logTrivia, "failed") finally: VerboseLog(logTrivia, "}") # Populate host list by iterating the first dc def PopulateHosts(self): VerboseLog(logTrivia, "{ Getting hosts: ") self._dc = invt.GetRootFolder().GetChildEntity()[0] hostFolder = self._dc.hostFolder self._hosts = [] if len(hostFolder.childEntity) > 0: for computeResource in hostFolder.childEntity: self._hosts.append(computeResource.host[0]) self._host = self._hosts[0] VerboseLog(logTrivia, ", ".join(map(str, self._hosts))) else: raise RuntimeError('no hosts found') VerboseLog(logTrivia, "}") # Populate sms storagemgr and a storage container id to test def PopulateSmsInfo(self): VerboseLog(logTrivia, "{ Getting SMS info: ") self._smsStorageMgr = GetStorageManager(self._vc) scList = [] scResult = self._smsStorageMgr.QueryStorageContainer(None) if scResult != None: print(scResult) if len(scResult.storageContainer) <= 0: raise RuntimeError('no storage containers found') provInfo = FindProvider(self._smsStorageMgr, self._nfs) if provInfo == None: raise RuntimeError('no provider found') self._sc = None for sc in scResult.storageContainer: for prov in sc.providerId: if prov == provInfo.uid: self._sc = sc.uuid self._arrayIds = sc.arrayId if self._sc == None: raise RuntimeError('no storage containers found') arrayFound=False for id in self._arrayIds: for relatedArray in provInfo.relatedStorageArray: if id == relatedArray.arrayId: self._arrayId = id self._arrayPriority = relatedArray.priority self._provUrl = provInfo.url arrayFound=True break if arrayFound: break; if not arrayFound: raise Exception('array not found') # Test create vvol by specifying a dummy storage container id # and expects NotFound exception # 1. Attemtps create dummy datastore and expects excpetion def TestDummyCreateVvolDs(self): self.banner(self.TestDummyCreateVvolDs) VerboseLog(logTrivia, self._host) spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId('dummy') spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) try: self.CreateDs(spec) except Vim.Fault.NotFound: VerboseLog(logInfo,"passed") pass # Test create vvol datastore by specifying a valid storage container id. # Once created, also removes the datastore from the host # 1. create datastore on host # 2. remove datastore from host def TestCreateVvolDs(self): self.banner(self.TestCreateVvolDs) VerboseLog(logTrivia, self._host) scId = self._sc spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId) spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) ret=True try: ds = self.CreateDs(spec) self.removeDs(ds) except: VerboseLog(logInfo, traceback.format_exc()) ret=False VerboseLog(logInfo, "passed" if ret else "failed"); # Test destroy vvol datastore # 1. create datastore on host # 2. destroy datastore from host def TestDestroyVvolDs(self): self.banner(self.TestDestroyVvolDs) scId = self._sc spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId) spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) ret=True try: VerboseLog(logTrivia, "{ Creating bulk: ") create_task = self._vasaMgr.CreateVVolDatastore(spec, self._hosts) task.WaitForTask(create_task) VerboseLog(logVerbose, create_task.info.result) for result in create_task.info.result : if result.result == 'fail': raise Exception("create failed for host " + result.hostKey) for ds in self._host.GetConfigManager().GetDatastoreSystem().datastore: if ds.name.startswith('vvol-test-ds:'): vimutil.InvokeAndTrack(ds.Destroy) break except: VerboseLog(logTrivia, traceback.format_exc()) ret=False finally: VerboseLog(logTrivia, "}") VerboseLog(logInfo, "passed" if ret else "failed"); # Test create vvol datastore on a set of hosts. # 1. invokes batch create vvol api on vasamanager # 2. wait for the task to complete # 3. search the result if there is a failure on any of the host def TestBulkCreateVvolDs(self): self.banner(self.TestBulkCreateVvolDs) scId = self._sc spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId) spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) ret=True try: VerboseLog(logTrivia, "{ Creating bulk: ") create_task = self._vasaMgr.CreateVVolDatastore(spec, self._hosts) task.WaitForTask(create_task) VerboseLog(logVerbose, create_task.info.result) for result in create_task.info.result : if result.result == 'fail': VerboseLog(logInfo, "create failed for host " + result.hostKey) ret=False # store the ds ref self._bulkDs = create_task.info.result[0].ds; except: VerboseLog(logTrivia, traceback.format_exc()) ret=False finally: VerboseLog(logTrivia, "}") VerboseLog(logInfo, "passed" if ret else "failed"); # Test remove non-vvol datastore from a set of hosts. # 1. invokes batch remove vvol api on vasamanager # 2. Expect the result to be invalid datastore def TestBulkRemoveNonVvolDs(self): self.banner(self.TestBulkRemoveNonVvolDs) ret=True try: VerboseLog(logTrivia, "{ Removing bulk: ") dc = invt.GetRootFolder().GetChildEntity()[0] for ids in dc.GetDatastore(): if ids.GetName().startswith(self._pe): VerboseLog(logTrivia,"Removing " + ids.GetName()) self._vasaMgr.RemoveVVolDatastore(ids, self._hosts) except Vim.Fault.InvalidDatastore: pass except: VerboseLog(logTrivia, traceback.format_exc()) ret=False finally: VerboseLog(logTrivia, "}") VerboseLog(logInfo, "passed" if ret else "failed"); # Test remove vvol datastore from a set of hosts. # 1. invokes batch remove vvol api on vasamanager # 2. wait for the task to complete # 3. search the result if there is a failure on any of the host def TestBulkRemoveVvolDs(self): self.banner(self.TestBulkRemoveVvolDs) if self._bulkDs == None: VerboseLog(logInfo, "skipping") return ret=True try: VerboseLog(logTrivia, "{ Removing bulk: ") create_task = self._vasaMgr.RemoveVVolDatastore(self._bulkDs, self._hosts) task.WaitForTask(create_task) VerboseLog(logVerbose, create_task.info.result) for result in create_task.info.result : if result.result == 'fail': VerboseLog(logInfo, "remove failed for host " + result.hostKey) ret=False except: VerboseLog(logTrivia, traceback.format_exc() + "}") ret=False finally: VerboseLog(logTrivia, "}") VerboseLog(logInfo, "passed" if ret else "failed"); # Test remove vvol datastore with an existing VM having its disk on this datastore. # Expects Resource in use exception when there is a VM already. # 1. cleanup any test datastore # 2. cleanup any vvoldummyvms # 3. create vvol datastore on a host # 4. create vm with a disk on that datastore # 5. try remove datastore, and expects resource in use exception # 6. now remove the vm # 7. remove the datastore def TestRemoveVvolDsWithVms(self): self.banner(self.TestRemoveVvolDsWithVms) VerboseLog(logTrivia, self._host) scId = self._sc vmname = "vvoldummy" self.CleanupExistingTestDatastores() self.CleanupVm(vmname) spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId); spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) testvm = None ds = None try: ds = self.CreateDs(spec) testvm = vm.CreateQuickDummy(vmname, host=self._host, datastoreName=ds.name, dc=self._dc.name, numScsiDisks=1, memory=10) self.removeDs(ds) except Vim.Fault.ResourceInUse: if testvm != None: vm.Destroy(testvm) if ds != None: self.removeDs(ds) pass except: VerboseLog(logInfo, traceback.format_exc()) VerboseLog(logInfo, 'failed') VerboseLog(logInfo, "passed") # Test update vvol datastore. Following steps are executed inorder # 1. create datastore on host # 2. bump the VP priority via SCST backend # 3. sleep for a while and check the datastore info for updated information def TestUpdateVvolDs(self): self.banner(self.TestUpdateVvolDs) VerboseLog(logTrivia, self._host) scId = self._sc spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId) spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) ret=True try: vvolds = self.CreateDs(spec) parseResult = urlparse.urlparse(self._provUrl) path = parseResult.path.lstrip('/') # get provider name from the path provName = re.sub(r'/.*$', "", path) # strip off the sms prepended namespace arrayId = re.sub(r'.*:', "", self._arrayId) newPriority= self._arrayPriority + 10 # bump priority of VP cmd="perl -I/usr/local/scst/scst_scripts/vasa_scripts/ /usr/local/scst/scst_scripts/vasa_scripts/updateVPArrayPriority.pl " + arrayId + " " + provName + " " + str(newPriority) RunCmd(cmd, self._nfs, self._vpuser, self._vppwd) time.sleep(60) updated=False for vp in vvolds.info.vvolDS.vasaProviderInfo: if vp.provider.url == self._provUrl: for arrayState in vp.arrayState: if arrayState.arrayId == self._arrayId: if arrayState.priority != newPriority: raise Exception("priority not updated") else: updated=True break if updated: break if not updated: raise Exception("no vp found") self.removeDs(vvolds) except: ret=False VerboseLog(logInfo, traceback.format_exc()) VerboseLog(logInfo, "passed" if ret else "failed"); # Helper function to stop vpxa daemon on host # 1. stop vpxa on host by ssh # 2. wait till the host status reflects that it is not connected def StopVpxa(self): cmd = "/etc/init.d/vpxa stop>/dev/null 2>&1" RunCmd(cmd, self._host.GetName(), "root", "") count=0 while count < 20: time.sleep(20) count += 1 if self._host.GetRuntime().GetConnectionState() != "connected": break if count == 20: raise Exception("host not disconnected") # Helper function to start vpxa daemon on host # 1. start vpxa on host by ssh # 2. wait till the host status reflects that it is connected def StartVpxa(self): cmd = "/etc/init.d/vpxa start>/dev/null 2>&1" RunCmd(cmd, self._host.GetName(), "root", "") count=0 while count < 20: time.sleep(20) count += 1 if self._host.GetRuntime().GetConnectionState() == "connected": break if count == 20: raise Exception("host not connected") # Test create apis on disconnected host and expect that HostNotConnected fault # 1. Stop vpxa on a host say AAA # invoke create vvol datastore # Expect the HostNotConnected fault # 2. Invoke bulk api # Expect HostNotConnected fault for host AAA and rest of the hosts succeed # 3. Start vpxa # Create vvol datastore on host AAA # Stop vpxa # Invoke bulk remove method # Expect HostNotConnected fault for host AAA and rest of the hosts succeed # 4. Do cleanup by starting vpxa and removing vvol datastore def TestDisconnectedHost(self): self.banner(self.TestDisconnectedHost) VerboseLog(logTrivia, self._host) scId = self._sc spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId) spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) ret=True try: self.StopVpxa() try: VerboseLog(logInfo, "{Testing simple create") ds = self.CreateDs(spec) except vmodl.fault.HostNotConnected: pass except: raise finally: VerboseLog(logInfo, "}") try: VerboseLog(logInfo, "{Testing bulk create") create_task = self._vasaMgr.CreateVVolDatastore(spec, self._hosts) task.WaitForTask(create_task) VerboseLog(logVerbose, create_task.info.result) for result in create_task.info.result : if result.result == 'fail': hostid = self._host.__class__.__name__ + ":" + self._host._moId if result.hostKey == hostid: if isinstance(result.fault, vmodl.fault.HostNotConnected) == False: VerboseLog(logInfo, "failed for host " + result.hostKey) raise Exception("unexpected exception") else: raise Exception("unexpected failure") finally: VerboseLog(logInfo, "}") self.StartVpxa() ds = self.CreateDs(spec) self.StopVpxa() try: VerboseLog(logInfo, "{Testing bulk remove") delete_task = self._vasaMgr.RemoveVVolDatastore(ds, self._hosts) task.WaitForTask(delete_task) VerboseLog(logVerbose, delete_task.info.result) for result in delete_task.info.result : if result.result == 'fail': hostid = self._host.__class__.__name__ + ":" + self._host._moId if result.hostKey == hostid: if isinstance(result.fault, vmodl.fault.HostNotConnected) == False: VerboseLog(logInfo, "failed for host " + result.hostKey) raise Exception("unexpected exception") else: raise Exception("unexpected failure") finally: VerboseLog(logInfo, "}") self.StartVpxa() self.removeDs(ds) except: VerboseLog(logInfo, traceback.format_exc()) ret=False VerboseLog(logInfo, "passed" if ret else "failed"); # Test create directory on DatastoreNamespaceManager via hostd # 1. create vvol datastore on host # 2. create a directory on the vvol datastore # 3. query for the directory # 4. remove the directory # 5. remove the datastore def TestCreateDir(self): self.banner(self.TestCreateDir) VerboseLog(logTrivia, self._host) scId = self._sc spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId) spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) ret=True try: vvolds = self.CreateDs(spec) session = vimsupport.CreateSession(self._host.GetName(), 443, 'root', '', newestVersions.GetNamespace('vim')) stub = session.GetStub() si = Vim.ServiceInstance('ServiceInstance', stub) isc = si.RetrieveContent() dnm = isc.GetDatastoreNamespaceManager() ds = Vim.Datastore(scId, stub) stableName = dnm.CreateDirectory(ds, ".vSphere-HA") browser = ds.GetBrowser() path = "[" + ds.GetName() + "].vSphere-HA" task = browser.Search(path, None) session.WaitForTask(task) VerboseLog(logVerbose, task.GetInfo()) dnm.DeleteDirectory(stableName) self.removeDs(vvolds) except: VerboseLog(logInfo, traceback.format_exc()) ret=False VerboseLog(logInfo, "passed" if ret else "failed"); # Test VM migration on a vvol datastore # 1. create vvol datastore on set of hosts # 2. create a vm on src host # 3. power on vm # 4. migrate vm to dest host # 5. destroy vm # 6. remove datastore from the set of hosts def TestVmMigrate(self): self.banner(self.TestVmMigrate) if len(self._hosts) <= 1: VerboseLog(logInfo,"not enough hosts..skipping") vmname = "test_migrate_vvol_vm" self.CleanupVm(vmname) host1 = self._hosts[0] host2 = self._hosts[1] scId = self._sc spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec() spec.SetScId(scId) spec.SetName("vvol-test-ds:%s" % random.randint(1,1000)) ret=True try: VerboseLog(logTrivia, "{ Creating bulk: ") create_task = self._vasaMgr.CreateVVolDatastore(spec, self._hosts) task.WaitForTask(create_task) VerboseLog(logVerbose, create_task.info.result) for result in create_task.info.result : if result.result == 'fail': VerboseLog(logInfo, "create failed for host " + result.hostKey) raise Exception("unexpected failure") ds = create_task.info.result[0].ds; testvm = vm.CreateQuickDummy(vmname, host=host1, datastoreName=ds.name, dc=self._dc.name, numScsiDisks=1, memory=12) vm.PowerOn(testvm) migrate_task = testvm.Migrate(host2.parent.resourcePool, host2, Vim.VirtualMachine.MovePriority.highPriority, None) task.WaitForTask(migrate_task) vm.PowerOff(testvm) vm.Destroy(testvm) VerboseLog(logTrivia, "{ Removing bulk: ") delete_task = self._vasaMgr.RemoveVVolDatastore(ds, self._hosts) task.WaitForTask(delete_task) VerboseLog(logVerbose, delete_task.info.result) for result in delete_task.info.result : if result.result == 'fail': VerboseLog(logInfo, "remove failed for host " + result.hostKey) raise Exception("unexpected failure in bulk remove") except: VerboseLog(logTrivia, traceback.format_exc()) ret=False VerboseLog(logInfo, "passed" if ret else "failed"); # utility function to delete any existing vvol-test datastores def CleanupExistingTestDatastores(self): for host in self._hosts: VerboseLog(logTrivia, host) for ds in host.GetConfigManager().GetDatastoreSystem().datastore: if ds.name.startswith('vvol-test-ds:'): host.GetConfigManager().GetDatastoreSystem().RemoveDatastore(ds) # utility function to cleanup VMs with the given name def CleanupVm(self, vmname): oldVms = folder.FindPrefix(vmname) for oldVm in oldVms: if oldVm.GetRuntime().GetPowerState() == \ Vim.VirtualMachine.PowerState.poweredOn: vm.PowerOff(oldVm) vm.Destroy(oldVm)
def main(): options, remainingOptions = ParseArgs(sys.argv[1:]) # Connect si = SmartConnect(host=options.host, user=options.user, pwd=options.pwd) atexit.register(Disconnect, si) if options.verbose: logger.setLevel(logging.DEBUG) status = "PASS" # Get hold of the VMCI access manager through the host system # config manager rootFolder = si.content.GetRootFolder() dataCenter = rootFolder.GetChildEntity()[0] hostFolder = dataCenter.hostFolder host = hostFolder.childEntity[0] hostSystem = host.host[0] configManager = hostSystem.GetConfigManager() vmciAccessManager = si.RetrieveInternalContent().vmciAccessManager for i in range(options.iter): try: logger.info("Starting iteration %d." % (i + 1)) vm.Delete("VmciAccessMgrTest1", True) vm1 = vm.CreateQuickDummy("VmciAccessMgrTest1", vmxVersion="vmx-07", memory=4, guest="rhel5Guest") TestInvalidServiceTags(vm1, ["foo:", "", "foo:bar:", ":foo"], vmciAccessManager) GrantVmService(vmciAccessManager, vm1, [ "foo:bar.foo", "foo:bar.bar", "foo:bar.bar", "bar:bar.bar", "foo" ]) services = vmciAccessManager.RetrieveGrantedServices(vm1) if sorted(services) != [ "bar:bar.bar", "foo", "foo:bar.bar", "foo:bar.foo" ]: status = "FAIL" raise Exception("Mismatch in services granted to vm1") vm.Delete("VmciAccessMgrTest2", True) vm2 = vm.CreateQuickDummy("VmciAccessMgrTest2", vmxVersion="vmx-07", memory=4, guest="rhel5Guest") GrantVmService(vmciAccessManager, vm2, ["foo:bar.foo", "foo:bar.bar", "bar:bar.bar"]) services = vmciAccessManager.RetrieveGrantedServices(vm2) if sorted(services) != [ "bar:bar.bar", "foo:bar.bar", "foo:bar.foo" ]: status = "FAIL" raise Exception("Mismatch in services granted to vm2") RevokeMultiVmService(vmciAccessManager, [vm1, vm2], ["foo:bar.foo"]) services = vmciAccessManager.RetrieveGrantedServices(vm1) services2 = vmciAccessManager.RetrieveGrantedServices(vm2) if "foo:bar.foo" in services or "foo:bar.foo" in services2: status = "FAIL" raise Exception("Services foo:bar.foo still granted to a VM") vms = vmciAccessManager.QueryAccessToService( hostSystem, "foo:bar.bar") if not VmInVmList(vm1, vms) or not VmInVmList(vm2, vms): status = "FAIL" raise Exception( "Not all VMs reported as beeing a grantee of foo:bar.bar") RevokeVmService(vmciAccessManager, vm1, []) vms = vmciAccessManager.QueryAccessToService( hostSystem, "foo:bar.bar") if VmInVmList(vm1, vms): status = "FAIL" raise Exception( "vm1 still reported as beeing a grantee of foo:bar.bar") if not VmInVmList(vm2, vms): status = "FAIL" raise Exception( "vm2 not reported as beeing a grantee of foo:bar.bar") services = vmciAccessManager.RetrieveGrantedServices(vm1) if services != []: status = "FAIL" raise Exception("vm1 is still granted services") vm.Delete("VmciAccessMgrTest1", True) vm.Delete("VmciAccessMgrTest2", True) services = vmciAccessManager.RetrieveGrantedServices(vm2) if services != []: status = "FAIL" raise Exception("vm2 is still granted services after deletion") logger.info("End of iteration %d." % (i + 1)) except Exception as e: logger.error("Caught exception : " + str(e)) status = "FAIL" logger.info("TEST RUN COMPLETE: " + status)