Esempio n. 1
0
def RetrieveTests(host):
    hostSystem = GetHostSystem(host)
    llpm = GetLLPM(hostSystem)

    vmxFilePaths = []
    folders = BrowseDir(hostSystem, "[%s]" % options.datastore)
    for folder in folders:
        vmxFiles = BrowseDir(hostSystem,
                             "[%s] %s" % (options.datastore, folder.path),
                             "*.vmx")
        vmxFilePaths.extend([
            "[%s] %s/%s" % (options.datastore, folder.path, vmxFile.path)
            for vmxFile in vmxFiles
        ])

    for vmxFilePath in vmxFilePaths:
        Log("Invoking RetrieveVmRecoveryInfo on %s" % vmxFilePath)
        task = llpm.RetrieveVmRecoveryInfo(vmxFilePath)
        WaitForTask(task)
        Log(str(task.info.result))
        Log("Invoking RetrieveLastVmMigrationStatus on %s" % vmxFilePath)
        task = llpm.RetrieveLastVmMigrationStatus(vmxFilePath)
        WaitForTask(task)
        Log(str(task.info.result))
Esempio n. 2
0
File: vcFT.py Progetto: free-Zen/pvc
def CheckFTState(vm, state, si=None, isPrimary=True):
    expRRState = None
    if isPrimary:
        expRRState = vim.VirtualMachine.RecordReplayState.recording
    else:
        expRRState = vim.VirtualMachine.RecordReplayState.replaying
    ftState = vm.GetRuntime().GetFaultToleranceState()
    rrState = vm.GetRuntime().GetRecordReplayState()
    if ftState != state:
        raise Exception("Runtime FT state %s not set to %s" % (ftState, state))
    Log("Verified runtime fault tolerance state as " + str(state))

    if not checkRRState:
        return

    # Check record/replay state
    if ftState == FTState.running:
        if rrState != expRRState:
            raise Exception("Runtime recordReplay state %s not set to %s" %
                            (rrState, expRRState))
    elif rrState != vim.VirtualMachine.RecordReplayState.inactive:
        raise Exception("Runtime recordReplay state %s not set to inactive" %
                        rrState)
    Log("Verified runtime record/replay state as %s" % rrState)
Esempio n. 3
0
File: vcFT.py Progetto: free-Zen/pvc
def MountNas(host, localPath, remotePath, accessMode="readWrite"):
    m = re.match(r"^(.*?):(.*)$", remotePath)
    if m:
        remoteHost = m.string[m.start(1):m.end(1)]
        remotePath = m.string[m.start(2):m.end(2)]

        spec = vim.host.NasVolume.Specification(accessMode=accessMode,
                                                localPath=localPath,
                                                remoteHost=remoteHost,
                                                remotePath=remotePath)
        datastoreSystem = host.configManager.datastoreSystem
        return datastoreSystem.CreateNasDatastore(spec)

    Log("remotePath %s did not contain serverName:path format" % remotePath)
    return None
Esempio n. 4
0
def testDeadProc():
   Log("Testing StartProgram")
   spec = procDef.ProgramSpec(programPath="/bin/ls", arguments="/tmp")
   pid  = procMgr.StartProgram(virtualMachine, guestAuth, spec)
   Log("Pid %s" % pid)
   pids = [ pid ]
   result  = procMgr.ListProcesses(virtualMachine, guestAuth, pids)
   Log("Expected process not to show completion (no endTime or exitCode) yet")
   Log("Process info %s" % result)
   Log("Sleeping 3 seconds")
   time.sleep(3)
   result  = procMgr.ListProcesses(virtualMachine, guestAuth, pids)
   Log("Expected process to show full results now")
   Log("Process info %s" % result)
Esempio n. 5
0
def FindHostByName(name):
    """ Finds a host specified by its name """
    global si
    idx = si.content.searchIndex
    host = idx.FindByIp(None, name, False) or idx.FindByDnsName(
        None, name, False)

    if not host:
        from socket import gethostbyaddr, herror
        try:
            hostName = gethostbyaddr(name)[0]
            host = idx.FindByDnsName(None, hostName, False)
        except herror as err:
            Log("Cannot find host %s" % name)
            host = None

    return host
Esempio n. 6
0
   def test_invalid_login(self):
       print("test_invalid_login")
       for i in xrange(self.options.num_bad_logins):
           try:
               print("iteration: " + repr(i))
               bad_pwd = self.options.pwd + "JUNK"
               si = SmartConnect(host=self.options.host,
                            user=self.options.user,
                            pwd=bad_pwd,
                            port=self.options.port)

               Disconnect(si)
           except Vim.Fault.InvalidLogin as e:
               Log("Caught InvalidLogin exception")
               pass
           else:
               self.fail('InvalidLogin not thrown')
Esempio n. 7
0
def GetFeatureReqs(reqs, count=1, val=1):
    keyValues = {}

    for op in reqs:
        if len(keyValues) >= count:
            break
        if op.key in excludeKeys:
            continue
        m = re.match("(?P<type>)?:?(?P<operation>.*):(?P<value>.*)", op.value)
        if m:
            operation = m.group('operation')
            value = m.group('value')
            if int(value) == val:
                keyValues[op.key] = {"operation": operation, "value": value}
                Log("Mask %s=%s" % (op.key, keyValues[op.key]))
                continue
    return keyValues
Esempio n. 8
0
   def test_retrieve_various(self):
       si = SmartConnect(host=self.options.host,
                         user=self.options.user,
                         pwd=self.options.pwd,
                         port=self.options.port)
       Disconnect(si)
       try:
           internalContent = si.RetrieveInternalContent()
           content = si.RetrieveContent()
       except Exception as e:
           self.fail("Unexpected exception for anon-allowable calls: %s" % e)
           pass

       try:
           uptime = self.hostSystem.RetrieveHardwareUptime()
       except Vim.Fault.NotAuthenticated as e:
           Log("Caught NotAuthenticated exception: %s" % e)
           pass
Esempio n. 9
0
def main():
   supportedArgs = [ (["h:", "host="], "10.20.109.41", "Host name", "host"),
                     (["u:", "user="******"root", "User name", "user"),
                     (["p:", "pwd="], "vmware", "Password", "pwd"),
                     (["vpuser="******"root", "VP User name", "vpuser"),
                     (["vppwd="], "ca$hc0w", " VP Password", "vppwd"),
                     (["n:", "nfs="], "10.20.108.115", "nfs host name", "nfs"),
                     (["m:", "mount="], "/mnt/pes/pepsi_nfs_pe", "Nfs server mount point", "mount"),
                     (["d:", "pe="], "pepsi_nfs_pe", "PE name", "pe"),
                     (["v:", "verbose="], "3", "log level(1-3), the lesser the more verbose", "verbose")]

   supportedToggles = [(["usage", "help"], False, "Show usage information", "usage")]

   args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
   if args.GetKeyValue("usage") == True:
      args.Usage()
      print('Prerequisite:\n\
               1. VC with minimum of one host configured\n\
               2. VP is registered with SMS\n\
               3. VP is configured with NFS pe')
      sys.exit(0)

   global verbose
   verbose = int(args.GetKeyValue("verbose"))



   Log("Connected to vc " + args.GetKeyValue("host"))

   ds = VvolDs(args)

   ds.CleanupExistingTestDatastores()
   ds.addPE()
   ds.TestDummyCreateVvolDs()
   ds.TestCreateVvolDs()
   ds.TestDestroyVvolDs()
   ds.TestBulkCreateVvolDs()
   ds.TestBulkRemoveVvolDs()
   ds.TestBulkRemoveNonVvolDs()
   ds.TestRemoveVvolDsWithVms()
   ds.TestCreateDir()
   ds.TestVmMigrate()
   ds.TestDisconnectedHost()
   ds.TestUpdateVvolDs()
Esempio n. 10
0
def TestDatastores(repManager, datastoreConf, dsMgrList):

    storageManager = repManager.GetStorageManager()

    Log("Test configured datastores")
    # Create a group on each datastore
    for datastoreMgr in dsMgrList:
        gspec = CreateRandomizedGroupSpec(datastoreMgr=datastoreMgr)
        newGroup = repManager.CreateGroup(gspec)

    dsList = storageManager.GetExpectedDatastores()

    # There should be at least one datastore for each CreateGroup above
    if len(dsList) != len(dsMgrList):
        Log("Expected datastores present...")
        PrintDatastores(dsList)
        Log("Test provided datastores...")
        PrintDatastores(dsMgrList)
        raise TestFailedExc("Expected " + str(len(dsMgrList)) +
                            " datastores. " + "Found " + str(len(dsList)) +
                            ".")
    PrintDatastores(dsList)

    numDs = len(dsList)

    # Test datastore removal (triggered by removal of all groups)
    Log("Test datastore removal...")
    for group in repManager.groups:
        group.Remove()
        dsList = storageManager.GetExpectedDatastores()
        newNumDs = len(dsList)
        if numDs != newNumDs:
            print("Remaining datastores...")
            PrintDatastores(dsList)
        newNumDs = numDs

    dsList = storageManager.GetExpectedDatastores(
    )  # Should have no datastores...
    numDs = len(dsList)

    if numDs != 0:
        Log("Groups that still exist")
        for group in repManager.groups:
            print(group)
        Log("Datastores that weren't removed...")
        PrintDatastores(dsList)
        raise TestFailedExc("Expected zero datastores. Found " + str(numDs) +
                            ".")

    return
Esempio n. 11
0
def main():
    global vvolDsName
    vvolDsName = args.GetKeyValue("dsName")

    global vvolDs
    vvolDs = GetDatastore(si, vvolDsName)

    global vvolId
    vvolId = vvolDs.info.vvolDS.scId

    if not isinstance(vvolDs.info, Vim.Host.VvolDatastoreInfo):
        raise Exception("Datastore must be vvol datastore: %s" % \
                        (vvolDsName))

    global vmRefs
    vmRefs = []

    runTest()

    Log("Tests passed")
Esempio n. 12
0
def TestAddDevice(vm1, device, allowVMotion, ctlr):
    Log("Testing adding of device '" + device.description + "' for VM " +
        vm1.GetConfig().GetName() + " allowVmotion:" + str(allowVMotion))
    cspec = Vim.Vm.ConfigSpec()

    cspec = vmconfig.AddUSBDev(cspec,
                               cfgInfo=vm1.GetConfig(),
                               devName=device.name,
                               allowVMotion=allowVMotion,
                               ctlr=ctlr)
    # Hot-add the devices
    vm.Reconfigure(vm1, cspec)

    # Check for device presence in VM's config
    usbDev = CheckDevice(vm1, Vim.Vm.Device.VirtualUSB, "USB device")
    ctlrDev = vmconfig.GetControllers(vmconfig.GetCfgOption(None), ctlr,
                                      vm1.GetConfig(), Vim.Vm.ConfigSpec())[0]
    if ctlrDev.key != usbDev.controllerKey:
        raise Exception("Wrong controller for USB device:" +
                        str(usbDev.controllerKey))
Esempio n. 13
0
def addUnmappedAliases():
    Log("adding some unmapped aliases for listing")
    nSubj = aliasDef.GuestAuthNamedSubject(name="unMappedSubjectName")
    aInfo = aliasDef.GuestAuthAliasInfo(
        subject=nSubj, comment="This is a unmapped test comment")
    nSubj2 = aliasDef.GuestAuthNamedSubject(name="unMappedSubjectName2")
    aInfo2 = aliasDef.GuestAuthAliasInfo(
        subject=nSubj2, comment="This is a unmapped test comment2")
    nSubj3 = aliasDef.GuestAuthAnySubject()
    aInfo3 = aliasDef.GuestAuthAliasInfo(
        subject=nSubj3, comment="This is a unmapped test comment3 for an ANY")

    result = aliasMgr.AddAlias(virtualMachine, guestAuth, gUser, False, caCert,
                               aInfo)
    result = aliasMgr.AddAlias(virtualMachine, guestAuth, gUser, False, caCert,
                               aInfo2)
    result = aliasMgr.AddAlias(virtualMachine, guestAuth, gUser, False, caCert,
                               aInfo3)

    result = aliasMgr.AddAlias(virtualMachine, guestAuth, gUser, False, cert2,
                               aInfo2)
Esempio n. 14
0
   def RunTests(self):
      Log("VirtualDiskManager: Run tests")
      if self._dsName == None:
         self._dsName = self.FindDatastoreName()

      self.TestCreateFileBacked()
      self.TestCreateFileBackedWeirdPaths()
      self.TestMove()
      self.TestUuid()
      #self.TestExtend()
      self.TestCopy()
      self.TestCopySelf()
      self.TestCopySpecless()
      self.TestEagerZero()
      self.TestOptimizeEagerZero()
      self.TestInflate()
      if self._rdm != None:
         self.TestCreateDeviceBacked()
      ##self.TestDelete()
      self.TestQueryDiskInfo()
      self.TestInsufficientCapacity()
Esempio n. 15
0
def addRdmDisk(options, machine, shared):
    cspec = Vim.Vm.ConfigSpec()
    diskDev = VirtualDisk()
    diskDev.key = vmconfig.GetFreeKey(cspec)
    diskDev.controllerKey = 1000
    diskDev.capacityInKB = long(1024)
    diskDev.unitNumber = -1

    diskBacking = VirtualDisk.RawDiskMappingVer1BackingInfo()
    diskBacking.fileName = ""
    diskBacking.diskMode = VirtualDiskOption.DiskMode.persistent
    diskBacking.deviceName = options.disk
    if shared:
        diskBacking.sharing = VirtualDisk.Sharing.sharingMultiWriter
    diskBacking.compatibilityMode = VirtualDiskOption.CompatibilityMode.physicalMode
    diskDev.backing = diskBacking

    vmconfig.AddDeviceToSpec(cspec, diskDev, VirtualDeviceSpec.Operation.add,
                             VirtualDeviceSpec.FileOperation.create)

    vm.Reconfigure(machine, cspec)
    Log("Reconfigure(%s) - add RDM disk" % machine.name)
Esempio n. 16
0
def addFlatDisk(options, machine, shared):
    cspec = Vim.Vm.ConfigSpec()
    diskDev = VirtualDisk()
    diskDev.key = vmconfig.GetFreeKey(cspec)
    diskDev.controllerKey = 1000
    diskDev.capacityInKB = long(1024)
    diskDev.unitNumber = -1

    diskBacking = VirtualDisk.FlatVer2BackingInfo()
    diskBacking.fileName = "[" + options.datastore + "]"
    diskBacking.diskMode = VirtualDiskOption.DiskMode.persistent
    if shared:
        diskBacking.sharing = VirtualDisk.Sharing.sharingMultiWriter
    diskBacking.thinProvisioned = False
    diskBacking.eagerlyScrub = True
    diskDev.backing = diskBacking

    vmconfig.AddDeviceToSpec(cspec, diskDev, VirtualDeviceSpec.Operation.add,
                             VirtualDeviceSpec.FileOperation.create)

    vm.Reconfigure(machine, cspec)
    Log("Reconfigure(%s) -> add flat disk" % machine.name)
Esempio n. 17
0
def TestEditSataDisk(vm1):
    """
    Test reconfigures of SATA disks
    """
    cspec = Vim.Vm.ConfigSpec()
    cspec = vmconfig.AddSataCtlr(cspec)
    cspec = vmconfig.AddScsiCtlr(cspec)
    vm.Reconfigure(vm1, cspec)

    Log("Add SATA disk.")
    AddSataDisk(vm1)

    Log("Reconfigure disk capacity.")
    TestExtendDisk(vm1)

    Log("Snapshot and reconfigure delta disk.")
    TestReconfigDeltaDisk(vm1)

    Log("Move SATA disk to SCSI controller.")
    scsiCtlrs = vmconfig.CheckDevice(vm1.config, Vim.Vm.Device.VirtualSCSIController)
    if len(scsiCtlrs) < 1:
       raise Exception("Failed to find SCSI controller!")
    disk = vmconfig.CheckDevice(vm1.config, Vim.Vm.Device.VirtualDisk)[0]
    TestMoveDevice(vm1, disk, scsiCtlrs[0])

    Log("Move SCSI disk to SATA controller.")
    ctlrs = vmconfig.CheckDevice(vm1.config, Vim.Vm.Device.VirtualSATAController)
    disk = vmconfig.CheckDevice(vm1.config, Vim.Vm.Device.VirtualDisk)[0]
    TestMoveDevice(vm1, disk, ctlrs[0])
    vm.RemoveDevice(vm1, scsiCtlrs[0])

    Log("Remove SATA disk.")
    RemoveSataDisk(vm1);

    Log("Testing hot-add and hot-remove of SATA disk.")
    vm.PowerOn(vm1)
    AddSataDisk(vm1)
    RemoveSataDisk(vm1);
    vm.PowerOff(vm1)

    vm.RemoveDevice(vm1, ctlrs[0])
Esempio n. 18
0
def main():
    supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"),
                     (["u:", "user="******"root", "User name", "user"),
                     (["p:", "pwd="], "ca$hc0w", "Password", "pwd")]

    supportedToggles = [(["usage",
                          "help"], False, "Show usage information", "usage")]

    args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
    if args.GetKeyValue("usage") == True:
        args.Usage()
        sys.exit(0)

    # Connect
    si = SmartConnect(host=args.GetKeyValue("host"),
                      user=args.GetKeyValue("user"),
                      pwd=args.GetKeyValue("pwd"))
    atexit.register(Disconnect, si)

    Log("Connected to host " + args.GetKeyValue("host"))

    vmfs5 = Vmfs5(si)
    vmfs5.TestCreateAndUpgradeVmfsDatastores()
Esempio n. 19
0
def CreateTestVm(si, dsName):
    # Create a VM for testing
    vmName = "TestStatsVm1"

    # Destroy old VMs
    for vm1 in si.content.rootFolder.childEntity[0].vmFolder.childEntity:
        if vm1.name == vmName:
            if vm1.runtime.powerState != vim.VirtualMachine.PowerState.poweredOff:
                vm.PowerOff(vm1)
            vm1.Destroy()

    spec = vm.CreateQuickDummySpec(vmName,
                                   nic=1,
                                   memory=32,
                                   datastoreName=dsName)
    resPool = invt.GetResourcePool(si=si)
    vmFolder = invt.GetVmFolder(si=si)
    t = vmFolder.CreateVm(spec, pool=resPool)
    WaitForTask(t)
    vm1 = t.info.result

    Log("Created VM %s on %s" % (vm1.name, resPool.owner.host[0].name))

    devices = vmconfig.CheckDevice(vm1.GetConfig(),
                                   vim.vm.Device.VirtualEthernetCard)
    if len(devices) < 1:
        raise Exception("Failed to find nic")

    # Reconfigure to add network
    cspec = vim.vm.ConfigSpec()
    devices[0].GetConnectable().SetStartConnected(True)
    devices[0].GetConnectable().SetConnected(True)
    vmconfig.AddDeviceToSpec(cspec, devices[0],
                             vim.vm.Device.VirtualDeviceSpec.Operation.edit)
    vm.Reconfigure(vm1, cspec)
    vm.PowerOn(vm1)
    return vm1
Esempio n. 20
0
def waitForLazySnapshot(virtualMachine, snapTask):
    while snapTask.info.progress != 100 and snapTask.info.state not in [
            'success', 'error'
    ]:
        Log(">>> Current snapshot: state='%s', progress='%s'" %
            (snapTask.info.state, snapTask.info.progress))

        if virtualMachine.GetRuntime().GetSnapshotInBackground() is True:
            Log(">>> Entered lazy phase for snapshot")
            Log("-----------------------------------")
            break
        else:
            Log(">>> Snapshot still in eager phase...")
            Log("-----------------------------------")
        time.sleep(3)
    # check task status
    Log(">>> Current snapshot: state='%s', progress='%s'" %
        (snapTask.info.state, snapTask.info.progress))
Esempio n. 21
0
def CollectStats(statData):
    si, perfManager, hostSystem, hbrManager = GetParams(
        statData.host, statData.user, statData.password)
    global counterInfoDict
    global counterSem
    # Add depot stats counter / name pairs
    counterSem.acquire()
    for info in perfManager.perfCounter:
        counterInfoDict[info.key] = "%s.%s.%s" % (
            info.groupInfo.key, info.nameInfo.key, info.rollupType)
    counterSem.release()

    statData.hostDepotStats = GetDepotStats(perfManager, hostSystem)

    statData.vm = CreateTestVm(si, statData.datastore)

    # Enable hbr replication for real host, don't need to do this for simulator
    if hbrManager is not None and not statData.sim:
        EnableReplication(hbrManager, statData.vm)
    elif not statData.sim:
        Log("Warning: hbrManager is None!")

    if statData.enableReg:
        # Add registry stats counter / name pairs
        counterInfo = perfManager.QueryPerfCounterInt()
        counterSem.acquire()
        for info in counterInfo:
            counterInfoDict[info.key] = "%s.%s.%s" % (
                info.groupInfo.key, info.nameInfo.key, info.rollupType)
        counterSem.release()
        statData.regStats = GetRegistryStats(perfManager, hostSystem)

    time.sleep(40)  # Sleep to let stats populate

    statData.vmDepotStats = GetDepotStats(perfManager, statData.vm)
    vm.PowerOff(statData.vm)
    WaitForTask(statData.vm.Destroy())
Esempio n. 22
0
def AddDisks(vm1, num, ctlrKeys, startingOffset=-1, datastore=None):
    cspec = Vim.Vm.ConfigSpec()
    dchange = cspec.GetDeviceChange()
    cspec.SetDeviceChange(dchange)
    for z in ctlrKeys:
        ctlrKey = z
        thisOffset = startingOffset
        for i in range(num):
            devSpec = Vim.Vm.Device.VirtualDeviceSpec()
            diskDev = Vim.Vm.Device.VirtualDisk()
            diskDev.SetKey(-i)
            diskDev.SetControllerKey(ctlrKey)
            if thisOffset < 0:
                diskDev.SetUnitNumber(-1)
            else:
                if thisOffset + i == 7:
                    thisOffset += 1
                diskDev.SetUnitNumber(thisOffset + i)

            diskDev.capacityInKB = long(1024)
            devSpec.SetDevice(diskDev)
            devSpec.SetOperation("add")
            devSpec.SetFileOperation("create")
            dchange.append(devSpec)
            diskBacking = Vim.Vm.Device.VirtualDisk.FlatVer2BackingInfo()
            diskDev.SetBacking(diskBacking)
            if datastore != None:
                diskBacking.SetFileName("[" + datastore + "]")
            else:
                diskBacking.SetFileName("")
            diskBacking.SetDiskMode("persistent")
            Log("Adding disk " + str(i + 1) + " to spec " +
                str(diskDev.unitNumber))

    task = vm1.Reconfigure(cspec)
    WaitForTask(task)
Esempio n. 23
0
def testExplicitListProc():
   # explicit pid test -- try some system processes that should be there
   pids = [0, 1, 2]
   Log("Testing explicit ListProcesses pid %s" % pids)
   result  = procMgr.ListProcesses(virtualMachine, guestAuth, pids)
   Log("Note that this may have 0 results on Windows")
   numResults = len(result)
   Log("Found %s results" % numResults)
   if numResults > 0:
      Log("Explicit processes: %s" % result)
      for process in result:
         found = False
         for pid in pids:
            if process.pid == pid:
               Log("Found pid %s" % pid)
               found = True
               break
         if not found:
            Log("Unwanted pid %s" % process.pid)
            raise AssertionError("Received unwanted pid %s" % process.pid)
Esempio n. 24
0
def mainTestFirmware():
    Log("---[ TEST " + testName + " ]---")

    vmname = "HwV8_Firmware"
    status = "PASS"

    bigClock = StopWatch()
    vm1 = None
    try:
        macosVmName = vmname + "_MacOS"
        Log("Cleaning up VMs from previous runs...")

        vm.Delete(macosVmName, True)

        Log("Creating Mac OS VM..")
        vm1 = vm.CreateQuickDummy(macosVmName,
                                  vmxVersion="vmx-08",
                                  memory=4,
                                  guest="darwin11Guest")

        firmware = "efi"
        ChangeFirmware(vm1, firmware, True)
        if firmware != vm1.config.firmware:
            raise Exception("Firmware don't match set value")

        firmware = "bios"
        ChangeFirmware(vm1, firmware, True)
        if firmware != vm1.config.firmware:
            raise Exception("Firmware don't match set value")

        Log("Deleting VM " + macosVmName)
        vm.Delete(macosVmName, True)

        bigClock.finish(testName)
    except Exception as e:
        status = "FAIL"
        Log("Caught exception : " + str(e))

    Log("TEST [" + testName + "] COMPLETE: " + status)
    return status
Esempio n. 25
0
    try:
        # Connect to hosts.
        si = connect.Connect(host=host,
                             user=args.GetKeyValue("user"),
                             pwd=args.GetKeyValue("pwd"),
                             version="vim.version.version9")

        atexit.register(Disconnect, si)

        id = args.GetKeyValue("id")
        if args.GetKeyValue("flag"):
            ToggleDispatcherFlag(si, args.GetKeyValue("flag"))

        Log("TEST RUN %s COMPLETE: %s" % (id, status))
    except Exception:
        (excType, excVal, excTB) = sys.exc_info()
        Log("Caught exception: ")
        traceback.print_exception(excType, excVal, excTB)
        status = "FAIL"


# Start program
if __name__ == "__main__":
    try:
        main()
    except:
        (excType, excVal, excTB) = sys.exc_info()
        Log("Test exiting on unhandled exception: ")
        traceback.print_exception(excType, excVal, excTB)
        sys.exit(42)
Esempio n. 26
0
def main():
    supportedArgs = [
        (["P:",
          "primary host="], "localhost", "Primary host name", "primaryHost"),
        (["S:", "secondary host="], "localhost", "Secondary host name",
         "secondaryHost"),
        (["d:", "shared datastore name="], "storage1", "shared datastore name",
         "dsName"),
        (["k:", "keep="], "0", "Keep configs", "keep"),
        (["u:", "user="******"root", "User name", "user"),
        (["p:", "pwd="], "", "Password", "pwd"),
        (["v:", "vmname="], "vmFT", "Name of the virtual machine", "vmname"),
        (["i:", "numiter="], "1", "Number of iterations", "iter"),
        (["t:",
          "FT type="], "up", "Type of fault tolerance [up|smp]", "ftType"),
    ]
    supportedToggles = [(["usage",
                          "help"], False, "Show usage information", "usage")]

    args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
    if args.GetKeyValue("usage") == True:
        args.Usage()
        sys.exit(0)

    # Process command line
    vmname = args.GetKeyValue("vmname")
    numiter = int(args.GetKeyValue("iter"))
    keep = int(args.GetKeyValue("keep"))
    dsName = args.GetKeyValue("dsName")
    primaryHost = args.GetKeyValue("primaryHost")
    secondaryHost = args.GetKeyValue("secondaryHost")
    ftType = args.GetKeyValue("ftType")

    numCPU = 2 if ftType == "smp" else 1
    memSize = 64

    for i in range(numiter):
        primaryVm = None
        primarySi = None
        secondarySi = None
        try:
            # Connect to primary host
            primarySi = SmartConnect(host=primaryHost,
                                     user=args.GetKeyValue("user"),
                                     pwd=args.GetKeyValue("pwd"))
            Log("Connected to Primary host")

            # Cleanup from previous runs
            try:
                CleanupVm(vmname)
            except vim.fault.InvalidOperationOnSecondaryVm:
                pass

# Connect to secondary host
            secondarySi = SmartConnect(host=secondaryHost,
                                       user=args.GetKeyValue("user"),
                                       pwd=args.GetKeyValue("pwd"))
            Log("Connected to Secondary host")

            for si in [primarySi, secondarySi]:
                if len(FindNicType(si, ftLoggingNicType)) == 0:
                    SelectVnic(si, "vmk0", ftLoggingNicType)
                if len(FindNicType(si, vmotionNicType)) == 0:
                    SelectVnic(si, "vmk0", vmotionNicType)

            ftMgrDst = host.GetFaultToleranceMgr(secondarySi)

            # Cleanup from previous runs
            CleanupVm(vmname)
            CleanupVm(vmname, True)

            connect.SetSi(primarySi)
            CleanupDir(dsName, vmname)
            if ftType == "smp":
                CleanupDir(dsName, "%s_shared" % vmname)

# Create new VM
            Log("Creating primary VM " + vmname)
            primaryVm = vm.CreateQuickDummy(vmname,
                                            guest="winNetEnterpriseGuest",
                                            numScsiDisks=2,
                                            scrubDisks=True,
                                            memory=memSize,
                                            datastoreName=dsName)
            primaryUuid = primaryVm.GetConfig().GetInstanceUuid()
            primaryCfgPath = primaryVm.GetConfig().GetFiles().GetVmPathName()
            primaryDir = primaryCfgPath[:primaryCfgPath.rfind("/")]

            ftMetadataDir = GetSharedPath(primarySi, primaryVm)

            Log("Using VM : " + primaryVm.GetName() + " with instanceUuid " +
                primaryUuid)

            ftMetadataDir = GetSharedPath(primarySi, primaryVm)
            cSpec = vim.vm.ConfigSpec()
            if ftType != "smp":
                # Enable record/replay for the primaryVm
                # See PR 200254
                flags = vim.vm.FlagInfo(recordReplayEnabled=True)
                cSpec.SetFlags(flags)
                task = primaryVm.Reconfigure(cSpec)
                WaitForTask(task)
                Log("Enabled record/replay for Primary VM.")
                CheckFTState(
                    primaryVm,
                    vim.VirtualMachine.FaultToleranceState.notConfigured)
            else:
                cSpec.files = vim.vm.FileInfo(
                    ftMetadataDirectory=ftMetadataDir)
                cSpec.numCPUs = numCPU
                task = primaryVm.Reconfigure(cSpec)
                WaitForTask(task)

            # Create secondary VM
            connect.SetSi(secondarySi)
            Log("Creating secondary VM " + vmname)
            secondaryVm = vm.CreateQuickSecondary(vmname,
                                                  primaryVm,
                                                  ftType=ftType,
                                                  scrubDisks=True,
                                                  numScsiDisks=2,
                                                  datastoreName=dsName,
                                                  ftMetadataDir=ftMetadataDir)
            if secondaryVm == None:
                raise "Secondary VM creation failed"
            secondaryUuid = secondaryVm.GetConfig().GetInstanceUuid()
            secondaryCfgPath = secondaryVm.GetConfig().GetFiles(
            ).GetVmPathName()
            Log("Created secondary VM " + secondaryVm.GetName())
            Log("Secondry VM: instanceUuid " + secondaryUuid)
            Log("Secondary cfg path: " + secondaryCfgPath)

            ##  Configure some additional config variables needed for FT
            ##  This should eventually be done automatically at FT Vmotion time
            Log("Setting up extra config settings for the primary VM...")

            cSpec = vim.Vm.ConfigSpec()
            extraCfgs = []
            if ftType == "smp":  # some of these options are temporary
                cSpec.flags = vim.vm.FlagInfo(
                    faultToleranceType=FTType.checkpointing)
                AddExtraConfig(extraCfgs, "ftcpt.maxDiskBufferPages", "0")
                AddExtraConfig(extraCfgs, "sched.mem.pshare.enable", "FALSE")
                AddExtraConfig(extraCfgs, "sched.mem.fullreservation", "TRUE")
                AddExtraConfig(extraCfgs,
                               "monitor_control.disable_mmu_largepages",
                               "TRUE")
                AddExtraConfig(extraCfgs, "sched.mem.min", memSize)
                AddExtraConfig(extraCfgs, "migration.dataTimeout", "2000")
                cSpec.files = vim.vm.FileInfo(
                    ftMetadataDirectory=ftMetadataDir)
            else:
                cSpec.flags = vim.vm.FlagInfo(
                    faultToleranceType=FTType.recordReplay)
                AddExtraConfig(extraCfgs, "replay.allowBTOnly", "TRUE")

            cSpec.SetExtraConfig(extraCfgs)
            WaitForTask(primaryVm.Reconfigure(cSpec))

            # Register secondary VM
            Log("Register secondary VM with the primary")
            ftMgr = host.GetFaultToleranceMgr(primarySi)
            connect.SetSi(primarySi)
            task = ftMgr.RegisterSecondary(primaryVm, secondaryUuid,
                                           secondaryCfgPath)
            WaitForTask(task)
            Log("Secondary VM registered successfully")

            # Verify FT role & state
            CheckFTRole(primaryVm, 1)
            CheckFTState(primaryVm,
                         vim.VirtualMachine.FaultToleranceState.enabled)

            Log("FT configured successfully.")

            # PowerOn FT VM
            Log("Powering on Primary VM")
            vm.PowerOn(primaryVm)
            if ftType == "smp":  # some of these options are temporary
                task = primaryVm.CreateSnapshot("snap-early",
                                                "before secondary starts",
                                                memory=False,
                                                quiesce=True)
                WaitForTask(task)

# Perform the FT VMotion
            Log("Calling StartSecondary on remote host...")
            primaryThumbprint = GetHostThumbprint(primaryHost)
            secondaryThumbprint = GetHostThumbprint(secondaryHost)
            Log("Primary thumbprint: %s" % primaryThumbprint)
            Log("Secondary thumbprint: %s" % secondaryThumbprint)

            secondaryHostSystem = secondarySi.content.rootFolder.childEntity[
                0].hostFolder.childEntity[0].host[0]
            sslThumbprintInfo = vim.host.SslThumbprintInfo(
                ownerTag='hostd-test', principal='vpxuser')
            sslThumbprintInfo.sslThumbprints = [primaryThumbprint]
            secondaryHostSystem.UpdateSslThumbprintInfo(
                sslThumbprintInfo, "add")

            sslThumbprintInfo.sslThumbprints = [secondaryThumbprint]
            primaryHostSystem = primarySi.content.rootFolder.childEntity[
                0].hostFolder.childEntity[0].host[0]
            primaryHostSystem.UpdateSslThumbprintInfo(sslThumbprintInfo, "add")

            task = ftMgr.StartSecondaryOnRemoteHost(primaryVm,
                                                    secondaryCfgPath,
                                                    secondaryHost, 80,
                                                    secondaryThumbprint)
            WaitForTask(task)
            Log("Start secondary done.")

            if ftType == "smp":
                # Verify snapshot is gone
                if primaryVm.snapshot is not None:
                    raise Exception("Snapshot still exists on primary")

                task = primaryVm.CreateSnapshot("snap",
                                                "without memory snapshot",
                                                memory=False,
                                                quiesce=True)
                WaitForTask(task)

                if not primaryVm.snapshot or not primaryVm.snapshot.currentSnapshot:
                    raise Exception("Snapshot was not created")
                else:
                    Log("Snapshot %s exists as expected" %
                        primaryVm.snapshot.currentSnapshot)

            # Retrieve reference to new secondary VM
            connect.SetSi(secondarySi)
            secondaryVm = folder.FindCfg(secondaryCfgPath)
            connect.SetSi(primarySi)

            # FT state check
            CheckFTState(primaryVm,
                         vim.VirtualMachine.FaultToleranceState.running)
            CheckFTState(secondaryVm,
                         vim.VirtualMachine.FaultToleranceState.running)

            Log("Start secondary done.")

            # allows some time for FT to run and checkpoint before failing
            # over. This seems more necessary on nested VM environments
            # than physical
            time.sleep(20)

            Log("Failing over to the secondary.")
            WaitForTask(ftMgr.MakePrimary(primaryVm, secondaryUuid))
            WaitForPowerState(primaryVm, primarySi,
                              vim.VirtualMachine.PowerState.poweredOff)
            Log("Verified primary power state is off.")
            WaitForFTState(secondaryVm, FTState.needSecondary)

            Log("Starting secondary.")
            task = ftMgrDst.StartSecondaryOnRemoteHost(secondaryVm,
                                                       primaryCfgPath,
                                                       primaryHost, 80,
                                                       primaryThumbprint)
            WaitForTask(task)

            # Verify snapshot is gone
            if primaryVm.snapshot is not None:
                raise Exception("Snapshot still exists on old primary")

            Log("Failing over to the old-primary.")
            WaitForTask(ftMgrDst.MakePrimary(secondaryVm, secondaryUuid))
            WaitForPowerState(secondaryVm, secondarySi,
                              vim.VirtualMachine.PowerState.poweredOff)
            Log("Verified primary power state is off.")
            WaitForFTState(primaryVm, FTState.needSecondary)

            task = ftMgr.StartSecondaryOnRemoteHost(primaryVm,
                                                    secondaryCfgPath,
                                                    secondaryHost, 80,
                                                    secondaryThumbprint)
            WaitForTask(task)

            # PowerOff FT VMs
            Log("Power off Primary VM")
            vm.PowerOff(primaryVm)
            connect.SetSi(secondarySi)
            for i in range(10):
                if secondaryVm.GetRuntime().GetPowerState(
                ) == vim.VirtualMachine.PowerState.poweredOn:
                    time.sleep(1)
            if secondaryVm.GetRuntime().GetPowerState(
            ) == vim.VirtualMachine.PowerState.poweredOn:
                raise Exception("Secondary VM is still powered on!")
            Log("Verified secondary power state.")

            Log("Unregistering secondary VM " + vmname)
            ftMgrDst.Unregister(secondaryVm)

            # Cleanup
            if not keep:
                connect.SetSi(primarySi)
                CleanupVm(vmname)
                CleanupDir(dsName, vmname)
                if ftType == "smp":
                    CleanupDir(dsName, "%s_shared" % vmname)

                connect.SetSi(secondarySi)
                CleanupVm(vmname, True)
        except Exception as e:
            Log("Caught exception : %s" % e)
            stackTrace = " ".join(
                traceback.format_exception(sys.exc_info()[0],
                                           sys.exc_info()[1],
                                           sys.exc_info()[2]))
            Log(stackTrace)
            global status
            status = "FAIL"
            Disconnect(primarySi)
            Disconnect(secondarySi)
            return

        Disconnect(primarySi)
        Disconnect(secondarySi)
Esempio n. 27
0
                CleanupDir(dsName, vmname)
                if ftType == "smp":
                    CleanupDir(dsName, "%s_shared" % vmname)

                connect.SetSi(secondarySi)
                CleanupVm(vmname, True)
        except Exception as e:
            Log("Caught exception : %s" % e)
            stackTrace = " ".join(
                traceback.format_exception(sys.exc_info()[0],
                                           sys.exc_info()[1],
                                           sys.exc_info()[2]))
            Log(stackTrace)
            global status
            status = "FAIL"
            Disconnect(primarySi)
            Disconnect(secondarySi)
            return

        Disconnect(primarySi)
        Disconnect(secondarySi)


# Start program
if __name__ == "__main__":
    main()
    Log("Test status: " + status)
    Log("FT Tests completed")
    if status != "PASS":
        sys.exit(1)
Esempio n. 28
0
def main():
   supportedArgs = [ (["h:", "host="], "localhost", "Host name", "host"),
                     (["u:", "user="******"root", "User name", "user"),
                     (["p:", "pwd="], "ca$hc0w", "Password", "pwd"),
                     (["d:", "disk="], "/vmfs/devices/", "Disk", "disk"),
                     (["s:", "ds="], "storage1", "Datastore 1", "ds"),
                     (["f:", "file="], "[datastore1] rdm/rdm.vmdk", "Virtual Disk", "file"),
                     (["v:", "vmname="], "RdmVM", "Name of the virtual machine", "vmname"),
                     (["i:", "numiter="], "1", "Number of iterations", "iter") ]

   supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"),
                        (["runall", "r"], True, "Run all the tests", "runall"),
                        (["nodelete"], False, "Dont delete vm on completion", "nodelete") ]

   args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
   if args.GetKeyValue("usage") == True:
      args.Usage()
      sys.exit(0)

   # Connect
   si = SmartConnect(host=args.GetKeyValue("host"),
                     user=args.GetKeyValue("user"),
                     pwd=args.GetKeyValue("pwd"))
   atexit.register(Disconnect, si)

   # Process command line
   vmname = args.GetKeyValue("vmname")
   numiter = int(args.GetKeyValue("iter"))
   runall = args.GetKeyValue("runall")
   noDelete = args.GetKeyValue("nodelete")
   disk = args.GetKeyValue("disk")
   ds = args.GetKeyValue("ds")
   rdmDiskFile = args.GetKeyValue("file")


   status = "PASS"

   for i in range(numiter):
       bigClock = StopWatch()
       vm1 = None
       try:
           ## Cleanup old VMs
           vm1 = folder.Find(vmname)
           if vm1 != None:
               vm1.Destroy()

           Log("Creating VM: " + str(vmname))

           ## Add scsi disk
           Log("Adding a new rdm disk to VM: " + str(vmname))
           cspec = Vim.Vm.ConfigSpec()
           cspec = vmconfig.CreateDefaultSpec(name = vmname, datastoreName = ds)
           cspec = vmconfig.AddScsiCtlr(cspec)

           # Get config options and targets
           cfgOption = vmconfig.GetCfgOption(None)
           cfgTarget = vmconfig.GetCfgTarget(None)

           rdmBacking = Vim.Vm.Device.VirtualDisk.RawDiskMappingVer1BackingInfo()
           rdmBacking.SetFileName("");
           rdmBacking.SetDeviceName(disk);
           rdmBacking.SetCompatibilityMode("physicalMode");
           rdmBacking.SetDiskMode("");
           rdmBacking.SetParent(None);

           diskDev = Vim.Vm.Device.VirtualDisk()
           diskDev.SetKey(vmconfig.GetFreeKey(cspec))
           diskDev.SetBacking(rdmBacking)
           ctlrs = vmconfig.GetControllers(cfgOption, Vim.Vm.Device.VirtualSCSIController, None, cspec)

           # XXX Fix this up
           for ctlrIdx in range(len(ctlrs)):
              freeSlot = vmconfig.GetFreeSlot(cspec, None, cfgOption, ctlrs[ctlrIdx])
              if (freeSlot >= 0):
                 diskDev.SetControllerKey(ctlrs[ctlrIdx].GetKey())
                 diskDev.SetUnitNumber(-1)
                 diskDev.SetCapacityInKB(long(4096))
                 break


           vmconfig.AddDeviceToSpec(cspec, diskDev, \
                    Vim.Vm.Device.VirtualDeviceSpec.Operation.add, \
                    Vim.Vm.Device.VirtualDeviceSpec.FileOperation.create)

           Log("create VM: " + str(vmname) + " with the RDM disk")
           vmFolder = vm.GetVmFolder()
           resPool = vm.GetResourcePool()

           task = vmFolder.CreateVm(cspec, resPool)
           WaitForTask(task)
           Log("Finished Reconfiguring VM: " + str(vmname));
           vm1 = task.info.result

           Log("Now reconfiguring VM: " + str(vmname));

           cspec = Vim.Vm.ConfigSpec()

           rdmBacking = Vim.Vm.Device.VirtualDisk.RawDiskMappingVer1BackingInfo()
           rdmBacking.SetFileName(rdmDiskFile);
           rdmBacking.SetCompatibilityMode("physicalMode");
           rdmBacking.SetDiskMode("persistent");
           rdmBacking.SetParent(None);

           diskDev = Vim.Vm.Device.VirtualDisk()
           diskDev.SetKey(vmconfig.GetFreeKey(cspec))
           diskDev.SetBacking(rdmBacking)
           ctlrs = vmconfig.GetControllers(cfgOption, Vim.Vm.Device.VirtualSCSIController, vm1.GetConfig(), cspec)
           # XXX Fix this up
           for ctlrIdx in range(len(ctlrs)):
              freeSlot = vmconfig.GetFreeSlot(cspec, vm1.GetConfig(), cfgOption, ctlrs[ctlrIdx])
              if (freeSlot >= 0):
                 diskDev.SetControllerKey(ctlrs[ctlrIdx].GetKey())
                 diskDev.SetUnitNumber(-1)
                 diskDev.SetCapacityInKB(long(4096))
                 break


           vmconfig.AddDeviceToSpec(cspec, diskDev, \
                    Vim.Vm.Device.VirtualDeviceSpec.Operation.add, \
                    Vim.Vm.Device.VirtualDeviceSpec.FileOperation.create)
           vm.Reconfigure(vm1, cspec)
           task = vmFolder.ReconfigVm(cspec, resPool)
           WaitForTask(task)
           Log("Finished Reconfiguring VM: " + str(vmname));


       except Exception as e:
           status = "FAIL"
           Log("Caught exception : " + str(e))
   Log("TEST RUN COMPLETE: " + status)
Esempio n. 29
0
host = 'localhost'
stub = SoapStubAdapter(host=host, version="vim.version.version10")
newsi = Vim.ServiceInstance("ServiceInstance", stub)

# Try to acquire a clone ticket on a un-authenticated session. Should fail.
try:
    newsi.GetContent().GetSessionManager().AcquireCloneTicket()
except:
    pass

newsm = newsi.GetContent().GetSessionManager().CloneSession(ticket)
for vm1 in folder.GetVmAll(si=newsi):
    print vm1

try:
    Log("Power Off (should pass)")
    vm1.PowerOff()
    time.sleep(5)
    Log("pass\n")
except Exception, e:
    print Exception, e
    Log("fail\n")
    pass

try:
    Log("Power On (should pass)")
    vm1.PowerOn()
    time.sleep(5)
    Log("pass\n")
except Exception, e:
    print Exception, e
Esempio n. 30
0
def main():
   supportedArgs = [
      (["h:", "host="], "localhost", "Host name", "host"),
      (["u:", "user="******"root", "User name", "user"),
      (["p:", "pwd="], "ca$hc0w", "Password", "pwd"),
      (["d:", "ds="], None, "Datastore name", "ds"),
      (["r:", "rdm="], None, "Device path used in rdm creation", "rdm"),
      (["n:", "nas="], None,
              "Nas datastore creation info format:'host:share:dsname'", "nas"),
#     (["s:", "subdir="], "testvdm/", "Subdirectory in selected datastore as "
#                         "possible destination for disks'", "subdir"),
      (["i:", "numiter="], "1", "Number of iterations", "iter") ]

   supportedToggles = [
          (["usage", "help"], False, "Show usage information", "usage"),
          (["cleanup", "c"], True, "Try to cleanup test vms from previous runs",
                                   "cleanup")]

   args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
   if args.GetKeyValue("usage") == True:
      args.Usage()
      sys.exit(0)

   # Connect
   si = SmartConnect(host=args.GetKeyValue("host"),
                     user=args.GetKeyValue("user"),
                     pwd=args.GetKeyValue("pwd"))

   Log("Connected to host " + args.GetKeyValue("host"))

   # Process command line
   numiter = int(args.GetKeyValue("iter"))
   doCleanup = args.GetKeyValue("cleanup")
   status = "PASS"

   resultsArray = []

   serviceInstanceContent = si.RetrieveContent()
   vdiskMgr = serviceInstanceContent.GetVirtualDiskManager()

   hostSystem = host.GetHostSystem(si)
   hostConfigManager = hostSystem.GetConfigManager()
   global datastoreSystem
   datastoreSystem = hostConfigManager.GetDatastoreSystem()

   if vdiskMgr == None:
      Log("Virtual Disk Manager not found")
      sys.exit(0)

   for i in range(numiter):
      bigClock = StopWatch()
      try:
         try:
            ph = Phase()

            vdiskMgrTest = VirtualDiskManagerTest(vdiskMgr, args)
            vdiskMgrTest.RunTests()

            ph.SetPhase("Virtual Disk Manager Tests")
            status = "PASS"

         finally:
            bigClock.finish("iteration " + str(i))

      # While debugging, uncomment the line below to see backtraces
      # when an exception occurs.
      except Exception as e:
         Log("Caught exception : " + str(e))
         status = "FAIL"

      Log("TEST RUN COMPLETE: " + status)
      resultsArray.append(status)

   Log("Results for each iteration: ")
   for i in range(len(resultsArray)):
      Log("Iteration " + str(i) + ": " + resultsArray[i])