Esempio n. 1
0
def cleanup(dvsUuidList):
    """
   Remove the dvs created as part of the setup phase. Assumes no clients are connected.
   """
    if options.nocleanup:
        print("Not doing cleanup as requested")
        return
    vm1 = folder.Find(options.vmName)
    if vm1 != None:
        try:
            vm.PowerOff(vm1)
        except Exception as e:
            pass
        vm.Destroy(vm1)

    dvsManager = si.RetrieveInternalContent(
    ).hostDistributedVirtualSwitchManager
    # If list is None, script collapses here
    if dvsUuidList != None:
        for dvsUuid in dvsUuidList:
            try:
                dvsManager.RemoveDistributedVirtualSwitch(dvsUuid)
            except Exception as e:
                print(e)
        del dvsUuidList[:]

    try:
        dvsManager.RemoveDistributedVirtualSwitch(options.uuid)
    except Exception as e:
        print(e)
Esempio n. 2
0
def main():
    supportedArgs = [
        (["h:", "host="], "localhost", "Host name", "host"),
        (["u:", "user="******"root", "User name", "user"),
        (["p:", "pwd="], None, "Password", "pwd"),
        (["v:", "vmname="], None, "Name of the virtual machine", "vmname"),
    ]
    supportedToggles = [
        (["usage", "help"], False, "Show usage information", "usage"),
    ]
    args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
    if args.GetKeyValue("usage") == True:
        args.Usage()
        sys.exit(0)

    # Connect
    connection = Connection(host=args.GetKeyValue("host"),
                            user=args.GetKeyValue("user"),
                            pwd=args.GetKeyValue("pwd"),
                            namespace="vim25/5.5")
    with connection as si:
        # Process command line
        vmname = args.GetKeyValue("vmname")
        vm = folder.Find(vmname)
        if vm == None:
            print "Could not find VM", vmname
            sys.exit(1)
        print SerializeToConfig(vm.config, tag="configInfo")
Esempio n. 3
0
def main():
    supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"),
                     (["u:", "user="******"root", "User name", "user"),
                     (["p:", "pwd="], "", "Password", "pwd"),
                     (["v:", "vmname="], "", "Name of the virtual machine",
                      "vmname")]

    supportedToggles = [(["usage",
                          "help"], False, "Show usage information", "usage")]

    args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
    if args.GetKeyValue("usage") == True:
        args.Usage()
        sys.exit(0)

    # Connect
    si = Connect(host=args.GetKeyValue("host"),
                 user=args.GetKeyValue("user"),
                 pwd=args.GetKeyValue("pwd"),
                 version="vim.version.version9")

    atexit.register(Disconnect, si)

    # Process command line
    vmname = args.GetKeyValue("vmname")

    vm1 = None
    try:
        # Find the VM
        vm1 = folder.Find(vmname)
        if vm1 == None:
            raise Exception("VM with name " + vmname + " cannot be found!")

        instanceId = "0"
        # Inspired by InternalCommand() in vim/py/tests/vmTestHbr.py!!
        vprobesMgr = Hostd.VprobesManager("ha-internalsvc-vprobesmgr",
                                          si._stub)
        bad_script = '''0
         (vprobe VMXLoad (printf \"Test script loaded\\n\"))
         (vprobe VMM1Hz (printf \"Hello World\\n\"))
         (vprobe badProbeName (printf \"Test script unloaded\\n\"))
         '''
        task = vprobesMgr.LoadVprobes(vm1, bad_script)
        WaitForTask(task)

        Log("FAILURE: Failed to catch exception")

    except Vmodl.Fault.SystemError as e:
        Log("EXPECTED FAILURE: Load failed: " + e.reason)
        Log("SUCCESS: VProbes tests completed")
        return
    except Exception as e:
        Log("FAILURE: Caught exception : " + str(e))
        raise e

    return
Esempio n. 4
0
 def destroyTestVM(self, vmname):
     """
     Powers down the VM if needed and then destroys it.
     """
     existing_vm = folder.Find(vmname)
     if existing_vm is not None:
         if vm.VM(existing_vm, None, None).IsRunning():
             vm.PowerOff(existing_vm)
         existing_vm.Destroy()
         self._vm = None
Esempio n. 5
0
    def tearDown(self):
        """
        Destroys test VM.
        """
        self._si = None

        self.destroyTestVM(self._vmname)
        existing_vm = folder.Find(self._vmname)
        if (existing_vm is not None):
            raise self.failureException("Test VM should have been destroyed.")
        print("INFO: destroyed vm %s " % (self._vmname))
Esempio n. 6
0
def ReloadSecondary(si, vm1):
    with LogSelfOp() as logOp:
        curSi = connect.GetSi()
        connect.SetSi(si)
        vmname = vm1.GetConfig().GetName()
        Log("Reloading secondary VM")
        vm1.Reload()
        vm2 = folder.Find(vmname)
        if vm2 == None:
            raise Exception("Reload caused the VM to go invalid")
        connect.SetSi(curSi)
Esempio n. 7
0
def init(hostname, user, passwd, vmname, vmxpath, guestuser, guestpwd,
         guestrootuser, guestrootpassword, powerOn=True, getIntCont=False):
   # Connect and get the Service Instance.
   # Make sure we get the proper version (dev).
   svcInst = SmartConnect(host=hostname, user=user, pwd=passwd)
   svcInstIntCont = ""
   if getIntCont:
      svcInstIntCont = svcInst.RetrieveInternalContent()

   # Find the vm if it's there.
   virtualMachine = folder.Find(vmname)

   # if it's not there, maybe we just rebooted and it lost its config,
   # so try to register and re-find.
   if virtualMachine == None:
      Log("Registering " + vmxpath)
      folder.Register(vmxpath)
      virtualMachine = folder.Find(vmname)

   # set up a guest auth object with root privs
   guestAdminAuth = ""
   if guestrootuser != "":
      guestAdminAuth = npAuth(username=guestrootuser, password=guestrootpassword,
                              interactiveSession=False)

   # set up a guest auth object (good and bad)
   guestAuth = npAuth(username=guestuser, password=guestpwd, interactiveSession=False)

   guestAuthBad = npAuth(username="******", password="******", interactiveSession=False)

   # power on the VM if needed
   if powerOn and virtualMachine.GetRuntime().GetPowerState() != Vim.VirtualMachine.PowerState.poweredOn:
      Log("Powering on")
      vm.PowerOn(virtualMachine)

   if not getIntCont:
      globs = [svcInst, virtualMachine, guestAdminAuth, guestAuth, guestAuthBad]
   else:
      globs = [svcInst, svcInstIntCont, virtualMachine, guestAdminAuth, guestAuth, guestAuthBad]

   return globs
Esempio n. 8
0
def cleanupvm(vmName):
   print("cleaning up vm:'" + vmName + "'")
   vm1 = folder.Find(vmName)
   if vm1 is not None:
       try:
          vm.PowerOff(vm1)
       except Exception as e:
          print(e)
       try:
          vm.Destroy(vm1)
       except Exception as e:
          print(e)
Esempio n. 9
0
def testEditDisk(options):
    name = getUniqueVmName()
    machine = folder.Find(name)
    if machine: vm.Destroy(machine)
    machine = vm.CreateQuickDummy(name,
                                  datastoreName=options.datastore,
                                  scsiCtlrs=1)
    Log("CreateVM(%s, %s)" % (name, options.datastore))

    addFlatDisk(options, machine, shared=False)
    diskDev = vmconfig.CheckDevice(machine.config, VirtualDisk)[0]
    editDisk(options, machine, diskDev, shared=True)

    vm.Destroy(machine)
Esempio n. 10
0
def main():
    # Process command line
    host = "jairam-esx"
    if len(sys.argv) > 1:
        host = sys.argv[1]

    try:
        si = Connect(host)
        atexit.register(Disconnect, si)
        vm.CreateQuickDummy("CpuIdTest")
        v1 = folder.Find("CpuIdTest")
        print("Created a dummy")

        # Print current.
        print(v1.GetConfig().GetCpuFeatureMask())

        # Change level 0 and level 80
        config = Vim.Vm.ConfigSpec()

        lvl0 = Vim.Vm.ConfigSpec.CpuIdInfoSpec()
        info = Vim.Host.CpuIdInfo()
        info.SetLevel(0)
        info.SetEax("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX")
        info.SetEbx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX")
        info.SetEcx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX")
        info.SetEdx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX")
        lvl0.SetOperation("add")
        lvl0.SetInfo(info)

        lvl1 = Vim.Vm.ConfigSpec.CpuIdInfoSpec()
        info2 = Vim.Host.CpuIdInfo()
        info2.SetLevel(1)
        info2.SetVendor("amd")
        info2.SetEax("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX")
        info2.SetEdx("XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX")
        lvl1.SetOperation("add")
        lvl1.SetInfo(info2)

        config.SetCpuFeatureMask([lvl0, lvl1])
        print("Assigned features")

        task = v1.Reconfigure(config)
        if WaitForTask(task) == "error":
            raise task.GetInfo().GetError()
        vm.Destroy(v1)
    except Exception as e:
        print("Failed test due to exception: %s" % e)
        raise
Esempio n. 11
0
def main():
    # command line
    options = get_options()

    si = connect.SmartConnect(host=options.host, pwd=options.password)
    print("Connected %s" % options.host)

    vmHandle = folder.Find(options.vmname)

    cspec = Vim.Vm.ConfigSpec()
    cspec.messageBusTunnelEnabled = (options.task.lower() == 'enable')
    task = vmHandle.Reconfigure(cspec)
    WaitForTask(task)

    # util.interrogate(vmHandle.config)

    print("messageBusTunnelEnabled: %s" %
          vmHandle.config.messageBusTunnelEnabled)
Esempio n. 12
0
def testAddDisk(options, online, shared):
    name = getUniqueVmName()
    machine = folder.Find(name)
    if machine: vm.Destroy(machine)
    machine = vm.CreateQuickDummy(name,
                                  datastoreName=options.datastore,
                                  scsiCtlrs=1)
    Log("CreateVM(%s, %s)" % (name, options.datastore))

    if online:
        vm.PowerOn(machine)
        Log("PowerOn(%s)" % machine.name)

    addFlatDisk(options, machine, shared)
    addRdmDisk(options, machine, shared)

    if online:
        vm.PowerOff(machine)
        Log("PowerOff(%s)" % machine.name)

    vm.Destroy(machine)
Esempio n. 13
0
def TestIllegalOptions(vm1):
    bldType = os.environ.get('VMBLD', '') or os.environ.get('BLDTYPE', '')
    if bldType != 'obj':
        Log('Test require obj build')
        return vm1
    vmname = vm1.GetSummary().GetConfig().GetName()
    cfg = vm1.GetSummary().GetConfig().GetVmPathName()
    vm.CreateSnapshot(vm1, "backup", "backup", False, False)
    for v in ['', ' with reload']:
        Log('Testing illegal config file modification%s' % v)
        for i in range(3):
            illegalOpt = Vim.Option.OptionValue(
                key='vmx.test.sandbox.illegalOption', value=str(i))
            nthWriteOpt = Vim.Option.OptionValue(
                key='vmx.test.sandbox.nthWrite', value='%d' % (2 + i))
            cspec = Vim.Vm.ConfigSpec(extraConfig=[nthWriteOpt, illegalOpt])
            task = vm.Reconfigure(vm1, cspec)
            vm.PowerOn(vm1)
            if v == ' with reload':
                try:
                    vm1.Reload()
                except:
                    Log('Illegal options detected before Reload')
                    pass
            time.sleep(10)
            if vm1.runtime.powerState != Vim.VirtualMachine.PowerState.poweredOff:
                raise Exception(
                    'VM unexpectedly still powered on (option %d)' % i)
            try:
                vm.PowerOn(vm1)
                raise Exception('PowerOn is allowed unexpectedly (option %d)' %
                                i)
            except:
                pass
            vm1.Unregister()
            folder.Register(cfg)
            vm1 = folder.Find(vmname)
            vm.RevertToCurrentSnapshot(vm1)
    return vm1
Esempio n. 14
0
def cleanup(si, options, force=False):
    '''
      Try to remove everything possible.
      General cleanup, that can be called after every test.
      This should not throw or fail.
   '''

    if options.nocleanup and not force:
        print("Not doing cleanup as requested")
        return

    vm1 = folder.Find(options.vmName)
    if vm1 != None:
        try:
            vm.PowerOff(vm1)
        except Exception as e:
            pass
        vm.Destroy(vm1)

    networkSystem = host.GetHostSystem(si).GetConfigManager().networkSystem
    try:
        networkSystem.RemoveVirtualSwitch(vswitchName=options.vsName)
    except Exception as e:
        pass
Esempio n. 15
0
def main():
    supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"),
                     (["u:", "user="******"root", "User name", "user"),
                     (["p:", "pwd="], "ca$hc0w", "Password", "pwd"),
                     (["v:", "vmname="], "HotPlugTest",
                      "Name of the virtual machine", "vmname"),
                     (["i:", "numiter="], "1", "Number of iterations", "iter")]

    supportedToggles = [
        (["usage", "help"], False, "Show usage information", "usage"),
        (["runall", "r"], True, "Run all the tests", "runall"),
        (["nodelete"], False, "Dont delete vm on completion", "nodelete")
    ]

    args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
    if args.GetKeyValue("usage") == True:
        args.Usage()
        sys.exit(0)

    # Connect
    si = Connect(host=args.GetKeyValue("host"),
                 user=args.GetKeyValue("user"),
                 pwd=args.GetKeyValue("pwd"),
                 version="vim.version.version9")
    atexit.register(Disconnect, si)

    # Process command line
    vmname = args.GetKeyValue("vmname")
    numiter = int(args.GetKeyValue("iter"))
    runall = args.GetKeyValue("runall")
    noDelete = args.GetKeyValue("nodelete")
    status = "PASS"

    for i in range(numiter):
        bigClock = StopWatch()
        vm1 = folder.Find(vmname)
        try:
            if vm1:
                Log("Powering on VM " + vm1.GetConfig().GetName())
                if vm1.GetRuntime().GetPowerState(
                ) == Vim.VirtualMachine.PowerState.poweredOff:
                    vm.PowerOn(vm1)

                ## Positive test for the vm
                TestDeviceHotPlugForVm(vm1, True)

            else:
                Log("Did not specify a vmname or the VM was not found. Using the default name HotPlugTest"
                    )

                posVmName = vmname + "_Pos_" + str(i)
                negVmName = vmname + "_Neg_" + str(i)
                Log("Cleaning up VMs from previous runs...")

                vm.Delete(posVmName, True)
                vm.Delete(negVmName, True)

                ## Positive tests on a hwVersion 8 VM
                Log("Creating Hw8 VM..")
                vm1 = vm.CreateQuickDummy(posVmName,
                                          vmxVersion="vmx-08",
                                          memory="1024",
                                          guest="rhel5Guest")
                Log("Powering on VM " + vm1.GetConfig().GetName())
                vm.PowerOn(vm1)

                # Positive tests for hw8 VM
                TestDeviceHotPlugForVm(vm1, True)
                Log("Powering off and deleting VM " + vm1.GetName())
                vm.Delete(posVmName, True)

                ## Positive tests on a hwVersion 7 VM
                Log("Creating Hw7 VM..")
                vm1 = vm.CreateQuickDummy(posVmName,
                                          vmxVersion="vmx-07",
                                          memory="1024",
                                          guest="rhel5Guest")
                Log("Powering on VM " + vm1.GetConfig().GetName())
                vm.PowerOn(vm1)

                # Positive tests for hw7 VM
                TestDeviceHotPlugForVm(vm1, True)
                Log("Powering off and deleting VM " + vm1.GetName())
                vm.Delete(posVmName, True)

                Log("Creating Hw4 VM..")
                vm2 = vm.CreateQuickDummy(negVmName, 1, vmxVersion="vmx-04")
                Log("Powering on VM " + negVmName)
                vm.PowerOn(vm2)

                # Negative tests for hw4 VM
                TestDeviceHotPlugForVm(vm2, False)
                Log("Powering off and deleting VM " + vm2.GetName())
                vm.Delete(negVmName, True)

            Log("Tests completed.")
            bigClock.finish("iteration " + str(i))
        except Exception as e:
            status = "FAIL"
            Log("Caught exception : " + str(e))

    if testFailedCount == 0:
        Log("TEST RUN COMPLETE: " + status)
    else:
        Log("TEST RUN COMPLETE: FAIL")
        Log("Number of total tests failed : " + str(testFailedCount))
Esempio n. 16
0
def TestEphemeral(vmName, uuid):
    """
    Test epehemeral portgroups.
    - Create a VM configure it to connect to an ephemeral portgroup.
    - Power on the VM and validate that backing is valid.
    - Hot add a nic to connect to an ephemeral portgroup and validate backing.
    - Poweroff and destroy the VM
    """
    print("Testing Ephemeral portgroup behaviour")
    cleanupvm(vmName)
    envBrowser = invt.GetEnv()
    config = vm.CreateQuickDummySpec(vmName)
    cfgOption = envBrowser.QueryConfigOption(None, None)
    # Add a latebinding dvPortgroup backed nic.
    config = vmconfig.AddDvPortBacking(config, "", uuid, 0, cfgOption, "pg1")
    try:
        vmFolder = invt.GetVmFolder()
        vimutil.InvokeAndTrack(vmFolder.CreateVm, config, invt.GetResourcePool(), None)
    except Exception as e:
        raise
    myVm = folder.Find(vmName)
    devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    if len(devices) < 1:
        raise Exception("Failed to add nic")
    if not IsBackingPortNotAllocated(devices):
        print(devices)
        raise Exception ("Nic has a dvPort assigned to it or nic add failed")
    print("Test 1: Create a vm with an ephemeral portgroup backing: PASS")
    vm.PowerOn(myVm)
    devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    if len(devices) < 1:
        raise Exception("Failed to add nic")
    if not IsBackingValid(devices):
        raise Exception("Invalid backing allocated")
    print("Test 2: powerOn VM with a ephemeral backing: PASS")
    # Remove and add hot add a nic device to a powered on VM.
    vm.PowerOff(myVm)
    for device in devices:
        if isinstance(device.GetBacking(),\
            Vim.Vm.Device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
            cspec = Vim.Vm.ConfigSpec()
            vmconfig.AddDeviceToSpec(cspec, device, Vim.Vm.Device.VirtualDeviceSpec.Operation.remove)
            break
    task = myVm.Reconfigure(cspec)
    WaitForTask(task)
    devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    if IsBackingValid(devices):
        print(devices)
        raise Exception("Remove of device failed.")
    # powerOn the VM and hot add the nic.
    vm.PowerOn(myVm)
    config = Vim.Vm.ConfigSpec()
    config = vmconfig.AddDvPortBacking(config, "", uuid, 0, cfgOption, "pg1")
    task = myVm.Reconfigure(config)
    WaitForTask(task)
    devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    if len(devices) < 1:
        raise Exception("Failed to add nic")
    if not IsBackingValid(devices):
        raise Exception("Invalid backing allocated")
    print("Test 3: remove and hot add nic to VM with a ephemeral backing: PASS")
    # Foundry issue wait for fix and then uncomment.
    time.sleep(10)
    for device in devices:
        if isinstance(device.GetBacking(),\
            Vim.Vm.Device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
            device.GetBacking().GetPort().SetPortgroupKey("pg3")
            device.GetBacking().GetPort().SetPortKey(None)
            device.GetBacking().GetPort().SetConnectionCookie(None)
            device.GetConnectable().SetConnected(True)
            cspec = Vim.Vm.ConfigSpec()
            vmconfig.AddDeviceToSpec(cspec, device, Vim.Vm.Device.VirtualDeviceSpec.Operation.edit)
            break
    #task = myVm.Reconfigure(cspec)
    #WaitForTask(task)
    #devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    #if len(devices) < 1:
     #   raise Exception("Failed to edit nic")
    #if not IsBackingValid(devices):
     #   raise Exception("Invalid backing allocated")
    #print("Test4: Reconfig poweredon with a ephemeral backing: PASS")
    print("Ephemeral portgoup tests complete")
Esempio n. 17
0
def TestSimulatedVcClone(vmName, uuid):
    """
    Test the code paths that VC excercises during cloning a VM with
    a dvs backing.
    """
    print("Testing hostd code corresponding to clone")
    cleanupvm(vmName)
    envBrowser = invt.GetEnv()
    config = vm.CreateQuickDummySpec(vmName)
    cfgOption = envBrowser.QueryConfigOption(None, None)
    # Add a nic backed by a dvs portgroup pair.
    config = vmconfig.AddDvPortBacking(config, "", uuid, 0, cfgOption, "invalidPg")
    try:
        vmFolder = invt.GetVmFolder()
        vimutil.InvokeAndTrack(vmFolder.CreateVm, config, invt.GetResourcePool(), None)
    except Vim.Fault.InvalidDeviceSpec:
        print("Test1: Caught invalid device spec as expected")
    else:
        raise "Test1: Create vm with invalid dvPortgroup backing didn't fail as expected"
    print("Test1: Create vm with invalid dvPortgroup backing failed as expected: PASS")

    config = vm.CreateQuickDummySpec(vmName)
    config = vmconfig.AddDvPortBacking(config, "", uuid, 0, cfgOption, "pg1")
    try:
        vmFolder = invt.GetVmFolder()
        vimutil.InvokeAndTrack(vmFolder.CreateVm, config, invt.GetResourcePool(), None)
    except Exception:
        print("Failed to clone a VM to connect to a dvPortgroup")
        raise
    print("Test2: Create vm with valid dvPort backing: PASS")

    # Create a VM only specifying the dvs uuid in its backing.
    vm1 = folder.Find(vmName)
    vm.Destroy(vm1)
    config = vm.CreateQuickDummySpec(vmName)
    config = vmconfig.AddDvPortBacking(config, "", uuid, None, cfgOption, "")
    try:
        vmFolder = invt.GetVmFolder()
        vimutil.InvokeAndTrack(vmFolder.CreateVm, config, invt.GetResourcePool(), None)
    except Exception:
        print("Failed to clone a VM to connected to a standalone port")
        raise
    myVm = folder.Find(vmName)
    devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    if not IsBackingPortNotAllocated(devices):
        print(devices)
        raise Exception ("Nic has a dvPort assigned to it or nic add failed")
    print("Test3: Create vm with valid dvs uuid specified in the dvsbacking (standalone): PASS")

    # Reconfigure a VM only specifying a dvs uuid in its backing
    for device in devices:
        if isinstance(device.GetBacking(),\
            Vim.Vm.Device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
            cspec = Vim.Vm.ConfigSpec()
            device.GetConnectable().SetConnected(True)
            device.SetUnitNumber(9)
            vmconfig.AddDeviceToSpec(cspec, device, Vim.Vm.Device.VirtualDeviceSpec.Operation.add)
            break
    try:
        task = myVm.Reconfigure(cspec)
        WaitForTask(task)
    except Exception:
        print("Test4: failed to add a device with only dvs backing specified")
    print("Test4: Reconfig VM specifying only the dvsUuid in backing: PASS")

    print("Testing simulate vc clone done")
Esempio n. 18
0
def TestEarlyBinding(vmName, uuid):
    """
    Test early binding portgroup behaviour
    - Create a VM with a nic connecting to an early binding portgroup and start connected to true
      fails
    - Create a VM with a nic connecting to an early binding portgroup and start connected to false
      succeeds
    - Poweron the created VM succeds.
    - Reconfigure the VM to connect to an invalid port fails.
    - Hot add of device connected to an early binding portgroup fails.
    - Reconfigure the VM to connect to an early binding portgroup when device is not connected succeeds
    - Reconfigure a powered on VM to connect to an early binding portgroup when device is connected fails.
    """

    print("Testing early binding portgroup behaviour")
    cleanupvm(vmName)
    envBrowser = invt.GetEnv()
    config = vm.CreateQuickDummySpec(vmName)
    cfgOption = envBrowser.QueryConfigOption(None, None)
    # Add a earlybinding dvPortgroup backed nic.
    config = vmconfig.AddDvPortBacking(config, "", uuid, 0, cfgOption, "pg2")
    try:
        vmFolder = invt.GetVmFolder()
        vimutil.InvokeAndTrack(vmFolder.CreateVm, config, invt.GetResourcePool(), None)
    except Vim.Fault.InvalidDeviceSpec:
        print("Caught invalid device backing as expected")
    print("Test 1: Creating a device backed by an early binding portgroup with"
          "startConnected = true fails as expected: PASS")
    config = vm.CreateQuickDummySpec(vmName)
    cfgOption = envBrowser.QueryConfigOption(None, None)
    # Add an earlybinding dvPortgroup backed nic.
    config = vmconfig.AddDvPortBacking(config, "", uuid, 0, cfgOption, "pg2", False)
    vmFolder = invt.GetVmFolder()
    vimutil.InvokeAndTrack(vmFolder.CreateVm, config, invt.GetResourcePool(), None)
    myVm = folder.Find(vmName)
    devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    if not IsBackingPortNotAllocated(devices):
        print(devices)
        raise Exception ("Nic has a dvPort assigned to it or nic add failed")
    print("Test 2: Creating a device backed by and early binding portgroup with"
          "startConnected = false succeeds: PASS")
    myVm = folder.Find(vmName)
    vm.PowerOn(myVm)
    devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    if len(devices) < 1:
        raise Exception("nic not present")
    if not IsBackingPortNotAllocated(devices):
        print(devices)
        raise Exception ("Nic has a dvPort assigned to it or nic add failed")
    print("Test 3: Power on VM succeeds: PASS")
    # Reconfigure the VM to connect to an invalid port.
    for device in devices:
        if isinstance(device.GetBacking(),\
            Vim.Vm.Device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
            device.GetBacking().GetPort().SetPortKey("100")
            cspec = Vim.Vm.ConfigSpec()
            vmconfig.AddDeviceToSpec(cspec, device, Vim.Vm.Device.VirtualDeviceSpec.Operation.edit)
            break
    try:
        task = myVm.Reconfigure(cspec)
        WaitForTask(task)
    except Vim.Fault.InvalidDeviceSpec:
        print("Caught invalid device backing")
    print("Test 4: Reconfig a VM to connect to an invalid dvPort fails as expected: PASS")

    # Add a device to connect to an early binding portgroup with no dvPort specified.
    config = Vim.Vm.ConfigSpec()
    config = vmconfig.AddDvPortBacking(config, "", uuid, 0, cfgOption, "pg2")
    try:
        task = myVm.Reconfigure(config)
        WaitForTask(task)
    except Vim.Fault.InvalidDeviceSpec:
        print("Caught invalid device backing")
    print("Test 4: Hot add of a device to connect to an earlybinding portgroup fails as expected: PASS")

    # Reconfigure device to connect to an early binding portgroup.
    for device in devices:
        if isinstance(device.GetBacking(),\
            Vim.Vm.Device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
            device.GetBacking().GetPort().SetPortgroupKey("pg2")
            device.GetBacking().GetPort().SetPortKey(None)
            device.GetBacking().GetPort().SetConnectionCookie(None)
            device.GetConnectable().SetConnected(True)
            cspec = Vim.Vm.ConfigSpec()
            vmconfig.AddDeviceToSpec(cspec, device, Vim.Vm.Device.VirtualDeviceSpec.Operation.edit)
            break
    try:
        task = myVm.Reconfigure(cspec)
        WaitForTask(task)
    except Vim.Fault.InvalidDeviceSpec:
        print("Caught invalid device backing")
    print("Test 5: Reconfig a VM to connect to an early binding portgroup fails as expected: PASS")

    # Reconfigure a device to disconnected state and connect to an early binding dvPortgroup.
    for device in devices:
        if isinstance(device.GetBacking(),\
            Vim.Vm.Device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
            device.GetBacking().GetPort().SetPortgroupKey("pg2")
            device.GetBacking().GetPort().SetPortKey(None)
            device.GetBacking().GetPort().SetConnectionCookie(None)
            device.GetConnectable().SetConnected(False)
            cspec = Vim.Vm.ConfigSpec()
            vmconfig.AddDeviceToSpec(cspec, device, Vim.Vm.Device.VirtualDeviceSpec.Operation.edit)
            break
    task = myVm.Reconfigure(cspec)
    WaitForTask(task)
    if not IsBackingPortNotAllocated(devices):
        print(devices)
        raise Exception ("Nic has a dvPort assigned to it or nic add failed")
    print("Test6 complete: Reconfig powered on VM to connect to a earlybinding backing with device disconnected: PASS")
    print("EarlyBinding tests complete")
Esempio n. 19
0
def TestLateBinding(vmName, uuid):
    """
     Create a VM and connect it to a latebinding portgroup.
     Poweron the VM. Validate that a dvPort has not been allocated for the VM.
     Reconfig the VM to connect to a ephemeral dvPortgroup validate that the VM
     has a valid backing.
     Reconfigure the VM to connect back to the latebinding portgroup the reconfig
     should fail.
     Reconfigure the VM to connect back to the latebinding portgroup with the
     device disconnected the connect should succeed.
    """
    cleanupvm(vmName)
    print("Testing latebinding portgroup behaviour")
    envBrowser = invt.GetEnv()
    config = vm.CreateQuickDummySpec(vmName)
    cfgOption = envBrowser.QueryConfigOption(None, None)
    # Add a latebinding dvPortgroup backed nic.
    config = vmconfig.AddDvPortBacking(config, "", uuid, 0, cfgOption, "pg4",
                                       type = 'vmxnet3')
    try:
        vmFolder = invt.GetVmFolder()
        vimutil.InvokeAndTrack(vmFolder.CreateVm, config, invt.GetResourcePool(), None)
    except Exception as e:
        raise
    myVm = folder.Find(vmName)
    devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    if len(devices) < 1:
        raise Exception("Failed to add nic")
    if not IsBackingPortNotAllocated(devices):
        raise Exception("dvPort allocated for a latebinding portgroup")
    print("Test1: Create VM with a latebinding portgroup backing: PASS")
    # power on the VM.
    vm.PowerOn(myVm)
    devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    if len(devices) < 1:
        raise Exception("Nic seems to be missing")
    if not IsBackingPortNotAllocated(devices):
        raise Exception("dvPort allocated for a latebinding portgroup after powerOn")
    print("Test2: Powering on a VM with a latebinding portgroup backing: PASS")

    # Reconfigure the VM to connect to an ephemeral backing.
    for device in devices:
        if isinstance(device.GetBacking(),\
            Vim.Vm.Device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
            device.GetBacking().GetPort().SetPortgroupKey("pg3")
            device.GetBacking().GetPort().SetPortKey(None)
            device.GetBacking().GetPort().SetConnectionCookie(None)
            device.GetConnectable().SetConnected(True)
            cspec = Vim.Vm.ConfigSpec()
            vmconfig.AddDeviceToSpec(cspec, device, Vim.Vm.Device.VirtualDeviceSpec.Operation.edit)
            break
    task = myVm.Reconfigure(cspec)
    WaitForTask(task)
    devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    if len(devices) < 1:
        raise Exception("Failed to edit nic")
    if not IsBackingValid(devices):
        raise Exception("Invalid backing allocated")
    print("Test3: Reconfig poweredon with a ephemeral backing from a latebinding backing allocates a port: PASS")

    #Reconfig the VM to connect to a latebinding backing.
    for device in devices:
        if isinstance(device.GetBacking(),\
            Vim.Vm.Device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
            device.GetBacking().GetPort().SetPortgroupKey("pg4")
            device.GetBacking().GetPort().SetPortKey(None)
            device.GetBacking().GetPort().SetConnectionCookie(None)
            cspec = Vim.Vm.ConfigSpec()
            vmconfig.AddDeviceToSpec(cspec, device, Vim.Vm.Device.VirtualDeviceSpec.Operation.edit)
            break
    try:
        task = myVm.Reconfigure(cspec)
        WaitForTask(task)
    except Vim.Fault.InvalidDeviceSpec:
        print("Caught invalid device backing")
    print("Test4: Reconfig powered on VM to connect to a latebinding backing fails as expected: PASS")

    # reconfigure the VM to connect to a latebinding portgroup and disconnect the device.
    for device in devices:
        if isinstance(device.GetBacking(),\
            Vim.Vm.Device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
            device.GetBacking().GetPort().SetPortgroupKey("pg4")
            device.GetBacking().GetPort().SetPortKey(None)
            device.GetBacking().GetPort().SetConnectionCookie(None)
            device.GetConnectable().SetConnected(False)
            cspec = Vim.Vm.ConfigSpec()
            vmconfig.AddDeviceToSpec(cspec, device, Vim.Vm.Device.VirtualDeviceSpec.Operation.edit)
            break
    task = myVm.Reconfigure(cspec)
    WaitForTask(task)
    devices = vmconfig.CheckDevice(myVm.GetConfig(), Vim.Vm.Device.VirtualEthernetCard)
    if not IsBackingPortNotAllocated(devices):
        print(devices)
        raise Exception ("Nic has a dvPort assigned to it or nic add failed")
    print("Test5: Reconfig powered on VM to connect to a latebinding backing with device disconnected: PASS")
    print("Late binding tests complete")
Esempio n. 20
0
def main():
    supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"),
                     (["u:", "user="******"root", "User name", "user"),
                     (["p:", "pwd="], "ca$hc0w", "Password", "pwd"),
                     (["v:", "vmname="], "CreateScreenshot-VM",
                      "Name of the virtual machine", "vmname"),
                     (["i:", "numiter="], "1", "Number of iterations", "iter")]

    supportedToggles = [(["usage",
                          "help"], False, "Show usage information", "usage")]

    args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
    if args.GetKeyValue("usage") == True:
        args.Usage()
        sys.exit(0)

    # Connect
    si = Connect(host=args.GetKeyValue("host"),
                 user=args.GetKeyValue("user"),
                 pwd=args.GetKeyValue("pwd"),
                 version="vim.version.version9")
    atexit.register(Disconnect, si)

    # Process command line
    vmname = args.GetKeyValue("vmname")
    numiter = int(args.GetKeyValue("iter"))
    status = "PASS"

    for i in range(numiter):
        vm1 = None
        # Cleanup from previous runs
        vm1 = folder.Find(vmname)
        if vm1 != None:
            vm1.Destroy()

        # Create new VM
        vm1 = vm.CreateQuickDummy(vmname, guest="winXPProGuest")
        print("Using VM : " + vm1.GetName())

        try:
            # CreateScreenshot when VM is powered off
            print("Attemp to CreateScreenshot for a powered-off VM")
            try:
                vm.CreateScreenshot(vm1)
                status = "FAIL"
                return
            except Exception as e:
                print("Verified negative test case and got an exception")
                print("Caught exception : " + str(e))

            print("Powering on the VM...")
            vm.PowerOn(vm1)

            # CreateScreenshot when VM is powered on
            print("Attempt to CreateScreenshot for a powered-on VM")

            for i in range(10):
                task = vm1.CreateScreenshot()
                WaitForTask(task)
                screenshotPath = task.GetInfo().GetResult()
                print("The datastore path of the screenshot is: " +
                      screenshotPath)

            print("Suspending the VM...")
            vm.Suspend(vm1)

            # CreateScreenshot when VM is suspended
            print("Attempt to CreateScreenshot for a suspended VM")
            try:
                vm.CreateScreenshot(vm1)
                status = "FAIL"
                return
            except Exception as e:
                print("Verified negative test case and got an exception")
                print("Caught exception : " + str(e))

            # Delete the VM and check whether the screenshot files are deleted
            print("Deleting the VM...")
            delTask = vm1.Destroy()
            WaitForTask(delTask)

        except Exception as e:
            print("Caught exception : " + str(e))
            status = "FAIL"

        if status == "FAIL":
            break

    print("Test status : " + str(status))
    return
Esempio n. 21
0
def main():
    supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"),
                     (["u:", "user="******"root", "User name", "user"),
                     (["p:", "pwd="], "", "Password", "pwd"),
                     (["v:", "vmname="], "t1", "Name of the virtual machine",
                      "vmname"),
                     (["i:", "iter="], "1", "Num of iterations", "iter")]
    supportedToggles = [(["usage",
                          "help"], False, "Show usage information", "usage")]

    args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
    if args.GetKeyValue("usage") == True:
        args.Usage()
        sys.exit(0)

    # Connect
    si = SmartConnect(host=args.GetKeyValue("host"),
                      port=443,
                      user=args.GetKeyValue("user"),
                      pwd=args.GetKeyValue("pwd"))
    atexit.register(Disconnect, si)

    # Process command line
    vmname = args.GetKeyValue("vmname")
    numiter = int(args.GetKeyValue("iter"))

    # Cleanup from previous runs.
    vm1 = folder.Find(vmname)
    if vm1 == None:
        print("VM not found.")
        sys.exit(0)

    for i in xrange(numiter):
        try:
            Log("Create initial root snapshot")
            vm.CreateSnapshot(vm1, "gg", "old root", False, False)
            gg = vm1.snapshot.currentSnapshot

            Log("Create furture root snapshot R1")
            vm.CreateSnapshot(vm1, "R1", "new root 1", False, False)
            s = vm1.snapshot.currentSnapshot

            task = s.Revert()
            WaitForTask(task)

            Log("Create furture root snapshot R2")
            vm.CreateSnapshot(vm1, "R2", "new root 2", False, False)
            r2 = vm1.snapshot.currentSnapshot

            Log("Power On")
            vm.PowerOn(vm1)

            Log("Remove initial root snapshot and consolidate")
            vimutil.InvokeAndTrack(gg.Remove, False)
            gg = None

            if vm1.runtime.consolidationNeeded:
                raise "Consolidation failed while removing gg."

            Log("Remove the next root snapshot without consolidation")
            vimutil.InvokeAndTrack(s.Remove, False, False)
            s = None

            if vm1.runtime.consolidationNeeded == False:
                raise "Consolidation flag not raised correctly at 2nd root."

            Log("Consolidate snapshot")
            vimutil.InvokeAndTrack(vm1.ConsolidateDisks)

            if vm1.runtime.consolidationNeeded:
                raise "Consolidation flag not cleared after consolidate."

            # time.sleep(5)

            Log("Remove all snapshots without consolidation")
            vimutil.InvokeAndTrack(vm1.RemoveAllSnapshots, False)

            if vm1.runtime.consolidationNeeded == False:
                raise "Consolidation flag not raised correctly at removeall."

            Log("Create online snapshot after removeall")
            vm.CreateSnapshot(vm1, "R3", "new root 3", False, False)

            Log("Power off")
            vm.PowerOff(vm1)

            Log("Remove all snapshots and consolide")
            vm.RemoveAllSnapshots(vm1)

            if vm1.runtime.consolidationNeeded:
                raise "Consolidation flag not cleared after removeall."

            Log("Success: iter " + str(i))

        except Exception as e:
            Log("Caught exception at iter " + str(i) + ": " + str(e))
Esempio n. 22
0
def main():
    supportedArgs = [(["h:", "host="], "localhost", "Host name", "host"),
                     (["u:", "user="******"root", "User name", "user"),
                     (["p:", "pwd="], "", "Password", "pwd"),
                     (["v:", "vmname="], "", "Name of the virtual machine",
                      "vmname")]

    supportedToggles = [(["usage",
                          "help"], False, "Show usage information", "usage")]

    args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
    if args.GetKeyValue("usage") == True:
        args.Usage()
        sys.exit(0)

    # Connect
    si = Connect(host=args.GetKeyValue("host"),
                 user=args.GetKeyValue("user"),
                 pwd=args.GetKeyValue("pwd"),
                 version="vim.version.version9")

    atexit.register(Disconnect, si)

    # Process command line
    vmname = args.GetKeyValue("vmname")

    vm1 = None
    try:
        # Find the VM
        vm1 = folder.Find(vmname)
        if vm1 == None:
            raise Exception("VM with name " + vmname + " cannot be found!")

        instanceId = "0"
        # Inspired by InternalCommand() in vim/py/tests/vmTestHbr.py!!
        vprobesMgr = Hostd.VprobesManager("ha-internalsvc-vprobesmgr",
                                          si._stub)

        # Print out the VProbes version in the VM domain
        task = vprobesMgr.GetVprobesVersion(vm1)
        WaitForTask(task)
        Log(str(task.info.result))

        # Print out the static probes in the VM domain
        task = vprobesMgr.ListVprobesStaticProbes(vm1)
        WaitForTask(task)
        Log("List of static probes:\n" + str(task.info.result))

        # Print out the global variables in the VM domain
        task = vprobesMgr.ListVprobesGlobals(vm1)
        WaitForTask(task)
        Log("List of globals:\n" + str(task.info.result))

        script = '''0
         (vprobe VMXLoad (printf \"Test script loaded\\n\"))
         (vprobe VMM1Hz (printf \"Hello World\\n\"))
         (vprobe VMMUnload (printf \"Test script unloaded\\n\"))
         '''
        task = vprobesMgr.LoadVprobes(vm1, script)
        WaitForTask(task)
        Log("Load instance id: " + str(task.info.result))

        task = vprobesMgr.ResetVprobes(vm1, instanceId)
        WaitForTask(task)
        Log("VProbes instance reset successfully")

        Log("SUCCESS: VProbes tests completed")

    except Vmodl.Fault.SystemError as e:
        Log("FAILURE: Failed: " + e.reason)
    except Vim.Fault.InvalidState as e:
        Log("FAILURE: VM in an invalid state")
    except Exception as e:
        Log("FAILURE: Caught exception : " + str(e))
Esempio n. 23
0
def testLinkedClone(si, numiter, deltaDiskFormat, backingType, vmxVersion, ds1, ds2, status, resultsArray):
   for i in range(numiter):
      bigClock = StopWatch()
      try:
         try:
            vm1Name = "LinkedParent_" + str(i)
            vm1 = folder.Find(vm1Name)
            if vm1 != None:
               Log("Cleaning up old vm with name: " + vm1Name)
               vm1.Destroy()

            # Create a simple vm with nothing but two disk on ds1
            vm1 = vm.CreateQuickDummy(vm1Name, numScsiDisks=2, \
                                      datastoreName=ds1, diskSizeInMB=1, \
                                      vmxVersion=vmxVersion, \
                                      backingType=backingType)
            Log("Created parent VM1 --" + vm1Name + "with Native snapshotting"
              + " capability set to " + str(vm1.IsNativeSnapshotCapable()))

            vm1DirName = vm1.config.files.snapshotDirectory

            # Create snapshots

            # S1, S1C1, S1C1C1 and S1C2
            vm.CreateSnapshot(vm1, "S1", "S1 is the first snaphost", \
                              False, False)
            snapshotInfo = vm1.GetSnapshot()
            S1Snapshot = snapshotInfo.GetCurrentSnapshot()
            Log("Create Snapshot S1 for VM1")

            vm.CreateSnapshot(vm1, "S1-C1", "S1-C1 is the first child of S1",\
                              False, False)

            snapshotInfo = vm1.GetSnapshot()
            S1C1Snapshot = snapshotInfo.GetCurrentSnapshot()
            Log("Create Snapshot S1C1 for VM1")

            vm.CreateSnapshot(vm1, "S1-C1-C1", \
                              "S1-C1-C1 is the grand child of S1", \
                              False, False)
            snapshotInfo = vm1.GetSnapshot()
            S1C1C1Snapshot = snapshotInfo.GetCurrentSnapshot()
            Log("Create Snapshot S1C1C1 for VM1")

            # revert to S1
            vimutil.InvokeAndTrack(S1Snapshot.Revert)
            Log("Reverted VM1 to Snapshot S1C1")

            vm.CreateSnapshot(vm1, "S1-C2", \
                              "S1-C2 is the second child of S1", False, False)

            snapshotInfo = vm1.GetSnapshot()
            S1C2Snapshot = snapshotInfo.GetCurrentSnapshot()
            Log("Create Snapshot S1C2 for VM1")

            # revert to S1C1C1, so it is the current snapshot
            vimutil.InvokeAndTrack(S1C1C1Snapshot.Revert)
            Log("Reverted VM1 to Snapshot S1C1C1")

            # Get the name of the parent disks
            disks = vmconfig.CheckDevice(S1C2Snapshot.GetConfig(), \
                                         Vim.Vm.Device.VirtualDisk)

            if len(disks) != 2:
               raise Exception("Failed to find parent disk1")

            parentDisk1 = disks[0].GetBacking().GetFileName()

            disks = vmconfig.CheckDevice(S1C1C1Snapshot.GetConfig(), Vim.Vm.Device.VirtualDisk)

            if len(disks) != 2:
               raise Exception("Failed to find parent disk2")

            parentDisk2 = disks[1].GetBacking().GetFileName()

            # Create a VM2 on ds2 that is linked off S1C2
            vm2Name = "LinkedChild1_" + str(i)
            configSpec = vmconfig.CreateDefaultSpec(name = vm2Name, datastoreName = ds2)
            configSpec = vmconfig.AddScsiCtlr(configSpec)
            configSpec = vmconfig.AddScsiDisk(configSpec, datastorename = ds2, capacity = 1024, backingType = backingType)
            configSpec.SetVersion(vmxVersion)
            childDiskBacking = configSpec.GetDeviceChange()[1].GetDevice().GetBacking()
            parentBacking = GetBackingInfo(backingType)
            parentBacking.SetFileName(parentDisk1)
            childDiskBacking.SetParent(parentBacking)
            childDiskBacking.SetDeltaDiskFormat(deltaDiskFormat)

            resPool = invt.GetResourcePool()
            vmFolder = invt.GetVmFolder()
            vimutil.InvokeAndTrack(vmFolder.CreateVm, configSpec, resPool)

            vm2 = folder.Find(vm2Name)
            Log("Created child VM2 --" + vm2Name)

            vm2DirName = vm2.config.files.snapshotDirectory

            # Create a VM3 on ds2 that is linked off S1C1C1
            vm3Name = "LinkedChild2_" + str(i)
            configSpec.SetName(vm3Name)
            parentBacking.SetFileName(parentDisk2)

            vimutil.InvokeAndTrack(vmFolder.CreateVm, configSpec, resPool)
            vm3 = folder.Find(vm3Name)
            Log("Created child VM3 --" + vm3Name)

            vm3DirName = vm3.config.files.snapshotDirectory

            # Create snapshot VM3S1 for VM3
            vm.CreateSnapshot(vm3, "VM3S1", "VM3S1 is VM3 snaphost", False, False)
            Log("Create Snapshot VM3S1 for VM3")

            # Create snapshot VM3S2 for VM3
            vm.CreateSnapshot(vm3, "VM3S2", "VM3S2 is VM3 snaphost", False, False)
            Log("Create Snapshot VM3S2 for VM3")
            snapshotInfo = vm3.GetSnapshot()
            VM3S2Snapshot = snapshotInfo.GetCurrentSnapshot()

            # get the disk name of VM3S2 so it can be configured as a
            # parent disk for VM2
            disks = vmconfig.CheckDevice(VM3S2Snapshot.GetConfig(), Vim.Vm.Device.VirtualDisk)

            if len(disks) != 1:
               raise Exception("Failed to find parent disk2")

            parentDisk3 = disks[0].GetBacking().GetFileName()

            # create a delta disk off VM3S2 on VM2
            Log("Adding delta disk off VM3S2 to VM2")
            configSpec = Vim.Vm.ConfigSpec()
            configSpec = vmconfig.AddScsiDisk(configSpec, \
                                              datastorename = ds2, \
                                              cfgInfo = vm2.GetConfig(), \
                                              backingType = backingType)
            childDiskBacking = configSpec.GetDeviceChange()[0].GetDevice().GetBacking()
            parentBacking = GetBackingInfo(backingType)
            parentBacking.SetFileName(parentDisk3)
            childDiskBacking.SetParent(parentBacking)
            childDiskBacking.SetDeltaDiskFormat(deltaDiskFormat)

            vimutil.InvokeAndTrack(vm2.Reconfigure, configSpec)

            Log("Power cycle VM1...")
            PowerCycle(vm1)
            Log("Power cycle VM2...")
            PowerCycle(vm2)
            Log("Power cycle VM3...")
            PowerCycle(vm3)

            Log("OP1: delete VM1.S1C2, then power cycle VM2")
            vimutil.InvokeAndTrack(S1C2Snapshot.Remove, True)
            PowerCycle(vm2)

            Log("OP2: destroy VM2, power cycle VM1")
            vimutil.InvokeAndTrack(vm2.Destroy)
            PowerCycle(vm1)

            Log("then recreate VM2 with just disk1")
            configSpec = vmconfig.CreateDefaultSpec(name = vm2Name, \
                                                    datastoreName = ds2)
            configSpec = vmconfig.AddScsiCtlr(configSpec)
            configSpec = vmconfig.AddScsiDisk(configSpec, datastorename = ds2, \
                                              capacity = 1024, \
                                              backingType = backingType)
            configSpec.SetVersion(vmxVersion)
            childDiskBacking = configSpec.GetDeviceChange()[1].GetDevice().GetBacking()
            parentBacking = GetBackingInfo(backingType)
            parentBacking.SetFileName(parentDisk1)
            childDiskBacking.SetParent(parentBacking)
            childDiskBacking.SetDeltaDiskFormat(deltaDiskFormat)

            resPool = invt.GetResourcePool()
            vmFolder = invt.GetVmFolder()
            vimutil.InvokeAndTrack(vmFolder.CreateVm, configSpec, resPool)
            vm2 = folder.Find(vm2Name)
            Log("ReCreated child VM2 --" + vm2Name)

            Log("OP3: delete VM3S2, power cycle VM1, revert to S1C1")
            vimutil.InvokeAndTrack(VM3S2Snapshot.Remove, True)
            vimutil.InvokeAndTrack(S1C1Snapshot.Revert)
            PowerCycle(vm1)

            llpm = si.RetrieveInternalContent().GetLlProvisioningManager()

            Log("OP4: refresh VM2 disk and destroy the disk and its parent")
            llpm.ReloadDisks(vm2, ['currentConfig', 'snapshotConfig'])

            disks = vmconfig.CheckDevice(vm2.GetConfig(), \
                                         Vim.Vm.Device.VirtualDisk)
            diskChain1 = disks[0]
            diskChain1.backing.parent.parent = None
            configSpec = Vim.Vm.ConfigSpec()
            configSpec = vmconfig.RemoveDeviceFromSpec(configSpec, \
                                                       diskChain1,
                                                       "destroy")
            configSpec.files = vm2.config.files
            llpm.ReconfigVM(configSpec)
            Log("verify only the immediate parent is deleted")
            PowerCycle(vm1)

            Log("OP5: destroy VM1, power cycle VM3")
            vimutil.InvokeAndTrack(vm1.Destroy)
            PowerCycle(vm3)

            Log("OP6: Consolidate VM3 disk chain")
            disks = vmconfig.CheckDevice(vm3.GetConfig(), \
                                         Vim.Vm.Device.VirtualDisk)

            shouldHaveFailed = 0
            try:
               task = llpm.ConsolidateDisks(vm3, disks)
               WaitForTask(task)
            except Exception as e:
               shouldHaveFailed = 1
               Log("Hit an exception when trying to consolidate cross " \
                   "snapshot point.")

            if shouldHaveFailed != 1:
               raise Exception("Error: allowed consolidation to merge snapshot")

            diskchain1 = disks[0]
            diskchain1.backing.parent.parent = None

            disks = vmconfig.CheckDevice(vm3.GetConfig(), \
                                         Vim.Vm.Device.VirtualDisk)
            diskchain2 = disks[0]
            diskchain2.backing = diskchain2.backing.parent.parent

            disks = []
            disks.append(diskchain1)
            disks.append(diskchain2)

            vimutil.InvokeAndTrack(llpm.ConsolidateDisks, vm3, disks)
            PowerCycle(vm3)

            Log("OP7: destroy VM2, no orphaned disks/files should have left")
            vimutil.InvokeAndTrack(vm2.Destroy)

            Log("Delete snapshot of VM3, and delete the disk with all parent."
                "then destroy vM3, no orphaned disks/files should have left")

            disks = vmconfig.CheckDevice(vm3.GetConfig(), \
                                         Vim.Vm.Device.VirtualDisk)
            diskChain1 = disks[0]
            configSpec = Vim.Vm.ConfigSpec()
            configSpec = vmconfig.RemoveDeviceFromSpec(configSpec, \
                                                       diskChain1,
                                                       "destroy")
            configSpec.files = vm3.config.files
            vimutil.InvokeAndTrack(llpm.ReconfigVM, configSpec)
            vimutil.InvokeAndTrack(vm3.Destroy)

            hostSystem = host.GetHostSystem(si)
            b = hostSystem.GetDatastoreBrowser()

            shouldHaveFailed = 0

            try:
               vimutil.InvokeAndTrack(b.Search, vm1DirName)
            except Vim.Fault.FileNotFound:
               Log("Caught " + vm1DirName + "Not found as expected")
               shouldHaveFailed += 1

            try:
               vimutil.InvokeAndTrack(b.Search, vm2DirName)
            except Vim.Fault.FileNotFound:
               Log("Caught " + vm2DirName + "Not found as expected")
               shouldHaveFailed += 1

            try:
               vimutil.InvokeAndTrack(b.Search, vm3DirName)
            except Vim.Fault.FileNotFound:
               Log("Caught " + vm3DirName + "Not found as expected")
               shouldHaveFailed += 1

            if shouldHaveFailed != 3:
               Log("Failed, orphaned disks left")
               raise Exception("orphaned disks")

            status = "PASS"

         finally:
            bigClock.finish("iteration " + str(i))

      except Exception as e:
         Log("Caught exception : " + str(e))
         status = "FAIL"

      Log("TEST RUN COMPLETE: " + status)
      resultsArray.append(status)

   Log("Results for each iteration: ")
   for i in range(len(resultsArray)):
      Log("Iteration " + str(i) + ": " + resultsArray[i])
Esempio n. 24
0
File: trio.py Progetto: free-Zen/pvc
def main():
    supportedArgs = [
        (["1:", "host1="], "localhost", "Host name", "host1"),
        (["2:", "host2="], "localhost", "Host name", "host2"),
        (["u:", "user="******"root", "User name", "user"),
        (["p:", "pwd="], "ca$hc0w", "Password", "pwd"),
        (["v:",
          "vmname="], "CreateTest", "Name of the virtual machine", "vmname"),
        (["t:", "vmotionType="], "vmotion", "VMotion type", "vmotionType"),
        (["d:",
          "destDs="], None, "Target datastore for storage VMotions", "destDs"),
        (["e:", "encrypt="], False, "Whether to use encryption", "encrypt")
    ]

    supportedToggles = [(["usage",
                          "help"], False, "Show usage information", "usage")]

    args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
    if args.GetKeyValue("usage") == True:
        args.Usage()
        sys.exit(0)

    host1 = args.GetKeyValue("host1")
    host2 = args.GetKeyValue("host2")
    encrypt = bool(args.GetKeyValue("encrypt"))
    vmotionType = args.GetKeyValue("vmotionType")
    destDs = args.GetKeyValue("destDs")
    supportedTypes = vmotion.GetSupportedVMotionTypes()
    if vmotionType not in supportedTypes:
        print "Unsupported vmotion type '%s'" % vmotionType
        print "Supported values are %s" % ", ".join(supportedTypes)
        sys.exit(-1)
    print "Using vmotion type: " + vmotionType

    # Connect to hosts
    print "Host 1: " + host1
    primarySi = connect.SmartConnect(host=host1,
                                     user=args.GetKeyValue("user"),
                                     pwd=args.GetKeyValue("pwd"))

    secondarySi = primarySi
    if vmotionType != Vim.Host.VMotionManager.VMotionType.disks_only:
        print "Host 2: " + host2
        secondarySi = connect.SmartConnect(host=host2,
                                           user=args.GetKeyValue("user"),
                                           pwd=args.GetKeyValue("pwd"))
        connect.SetSi(primarySi)

    print "Use encryption: " + str(encrypt)
    vmname = args.GetKeyValue("vmname")
    vm1 = folder.Find(vmname)
    if vm1 == None:
        print "Couldnt find the specified virtual machine " + vmname \
             + ". Check that the vm exists on host 1"
        sys.exit(-1)

    vm.Migrate(vm1,
               primarySi,
               secondarySi,
               vmotionType=vmotionType,
               encrypt=encrypt,
               destDs=destDs)
Esempio n. 25
0
def main():
   supportedArgs = [ (["h:", "host="], "localhost", "Host name", "host"),
                     (["u:", "user="******"root", "User name", "user"),
                     (["p:", "pwd="], "ca$hc0w", "Password", "pwd"),
                     (["v:", "vmname="], "Hw7ReconfigTest", "Name of the virtual machine", "vmname"),
                     (["i:", "numiter="], "1", "Number of iterations", "iter") ]

   supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"),
                        (["runall", "r"], True, "Run all the tests", "runall"),
                        (["nodelete"], False, "Dont delete vm on completion", "nodelete") ]

   args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
   if args.GetKeyValue("usage") == True:
      args.Usage()
      sys.exit(0)

   # Connect
   si = Connect(host=args.GetKeyValue("host"),
                user=args.GetKeyValue("user"),
                pwd=args.GetKeyValue("pwd"),
                version="vim.version.version9")
   atexit.register(Disconnect, si)


   # Process command line
   vmname = args.GetKeyValue("vmname")
   numiter = int(args.GetKeyValue("iter"))
   runall = args.GetKeyValue("runall")
   noDelete = args.GetKeyValue("nodelete")
   status = "PASS"

   for i in range(numiter):
       bigClock = StopWatch()
       vm1 = None
       try:
           ## Cleanup old VMs
	   vm1 = folder.Find(vmname)
	   if vm1 != None:
	      vm1.Destroy()

	   Log("Creating virtual machine")
	   vm1 = vm.CreateQuickDummy(vmname, 1, diskSizeInMB = 4096)

	   devices = vmconfig.CheckDevice(vm1.GetConfig(), Vim.Vm.Device.VirtualDisk)
	   if len(devices) < 1:
	         raise Exception("Failed to find added disk!")
	   cspec = Vim.Vm.ConfigSpec()
	   for i in range(0, len(devices)) :
	       disk = devices[i]
	       backing = disk.GetBacking()
	       backing.SetEagerlyScrub(True)
	       disk.SetBacking(backing)
	       vmconfig.AddDeviceToSpec(cspec, disk, Vim.Vm.Device.VirtualDeviceSpec.Operation.edit)

	   Log("Scrubbing existing disks of the VM")
	   task = vm1.Reconfigure(cspec)
	   WaitForTask(task)

	   Log("Add a new scrubbed disk to the VM")
	   cspec = Vim.Vm.ConfigSpec()
	   cspec = vmconfig.AddScsiCtlr(cspec)
	   vmconfig.AddScsiDisk(cspec, capacity = 128 * 1024, scrub = True)
	   #task = vm1.Reconfigure(cspec)
	   #WaitForTask(task)

       except Exception as e:
           status = "FAIL"
           Log("Caught exception : " + str(e))
   Log("TEST RUN COMPLETE: " + status)
Esempio n. 26
0
def main():
   supportedArgs = [ (["h:", "host="], "localhost", "Host name", "host"),
                     (["u:", "user="******"root", "User name", "user"),
                     (["p:", "pwd="], "ca$hc0w", "Password", "pwd"),
                     (["d:", "disk="], "/vmfs/devices/", "Disk", "disk"),
                     (["s:", "ds="], "storage1", "Datastore 1", "ds"),
                     (["f:", "file="], "[datastore1] rdm/rdm.vmdk", "Virtual Disk", "file"),
                     (["v:", "vmname="], "RdmVM", "Name of the virtual machine", "vmname"),
                     (["i:", "numiter="], "1", "Number of iterations", "iter") ]

   supportedToggles = [ (["usage", "help"], False, "Show usage information", "usage"),
                        (["runall", "r"], True, "Run all the tests", "runall"),
                        (["nodelete"], False, "Dont delete vm on completion", "nodelete") ]

   args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
   if args.GetKeyValue("usage") == True:
      args.Usage()
      sys.exit(0)

   # Connect
   si = SmartConnect(host=args.GetKeyValue("host"),
                     user=args.GetKeyValue("user"),
                     pwd=args.GetKeyValue("pwd"))
   atexit.register(Disconnect, si)

   # Process command line
   vmname = args.GetKeyValue("vmname")
   numiter = int(args.GetKeyValue("iter"))
   runall = args.GetKeyValue("runall")
   noDelete = args.GetKeyValue("nodelete")
   disk = args.GetKeyValue("disk")
   ds = args.GetKeyValue("ds")
   rdmDiskFile = args.GetKeyValue("file")


   status = "PASS"

   for i in range(numiter):
       bigClock = StopWatch()
       vm1 = None
       try:
           ## Cleanup old VMs
           vm1 = folder.Find(vmname)
           if vm1 != None:
               vm1.Destroy()

           Log("Creating VM: " + str(vmname))

           ## Add scsi disk
           Log("Adding a new rdm disk to VM: " + str(vmname))
           cspec = Vim.Vm.ConfigSpec()
           cspec = vmconfig.CreateDefaultSpec(name = vmname, datastoreName = ds)
           cspec = vmconfig.AddScsiCtlr(cspec)

           # Get config options and targets
           cfgOption = vmconfig.GetCfgOption(None)
           cfgTarget = vmconfig.GetCfgTarget(None)

           rdmBacking = Vim.Vm.Device.VirtualDisk.RawDiskMappingVer1BackingInfo()
           rdmBacking.SetFileName("");
           rdmBacking.SetDeviceName(disk);
           rdmBacking.SetCompatibilityMode("physicalMode");
           rdmBacking.SetDiskMode("");
           rdmBacking.SetParent(None);

           diskDev = Vim.Vm.Device.VirtualDisk()
           diskDev.SetKey(vmconfig.GetFreeKey(cspec))
           diskDev.SetBacking(rdmBacking)
           ctlrs = vmconfig.GetControllers(cfgOption, Vim.Vm.Device.VirtualSCSIController, None, cspec)

           # XXX Fix this up
           for ctlrIdx in range(len(ctlrs)):
              freeSlot = vmconfig.GetFreeSlot(cspec, None, cfgOption, ctlrs[ctlrIdx])
              if (freeSlot >= 0):
                 diskDev.SetControllerKey(ctlrs[ctlrIdx].GetKey())
                 diskDev.SetUnitNumber(-1)
                 diskDev.SetCapacityInKB(long(4096))
                 break


           vmconfig.AddDeviceToSpec(cspec, diskDev, \
                    Vim.Vm.Device.VirtualDeviceSpec.Operation.add, \
                    Vim.Vm.Device.VirtualDeviceSpec.FileOperation.create)

           Log("create VM: " + str(vmname) + " with the RDM disk")
           vmFolder = vm.GetVmFolder()
           resPool = vm.GetResourcePool()

           task = vmFolder.CreateVm(cspec, resPool)
           WaitForTask(task)
           Log("Finished Reconfiguring VM: " + str(vmname));
           vm1 = task.info.result

           Log("Now reconfiguring VM: " + str(vmname));

           cspec = Vim.Vm.ConfigSpec()

           rdmBacking = Vim.Vm.Device.VirtualDisk.RawDiskMappingVer1BackingInfo()
           rdmBacking.SetFileName(rdmDiskFile);
           rdmBacking.SetCompatibilityMode("physicalMode");
           rdmBacking.SetDiskMode("persistent");
           rdmBacking.SetParent(None);

           diskDev = Vim.Vm.Device.VirtualDisk()
           diskDev.SetKey(vmconfig.GetFreeKey(cspec))
           diskDev.SetBacking(rdmBacking)
           ctlrs = vmconfig.GetControllers(cfgOption, Vim.Vm.Device.VirtualSCSIController, vm1.GetConfig(), cspec)
           # XXX Fix this up
           for ctlrIdx in range(len(ctlrs)):
              freeSlot = vmconfig.GetFreeSlot(cspec, vm1.GetConfig(), cfgOption, ctlrs[ctlrIdx])
              if (freeSlot >= 0):
                 diskDev.SetControllerKey(ctlrs[ctlrIdx].GetKey())
                 diskDev.SetUnitNumber(-1)
                 diskDev.SetCapacityInKB(long(4096))
                 break


           vmconfig.AddDeviceToSpec(cspec, diskDev, \
                    Vim.Vm.Device.VirtualDeviceSpec.Operation.add, \
                    Vim.Vm.Device.VirtualDeviceSpec.FileOperation.create)
           vm.Reconfigure(vm1, cspec)
           task = vmFolder.ReconfigVm(cspec, resPool)
           WaitForTask(task)
           Log("Finished Reconfiguring VM: " + str(vmname));


       except Exception as e:
           status = "FAIL"
           Log("Caught exception : " + str(e))
   Log("TEST RUN COMPLETE: " + status)
Esempio n. 27
0
def testPromoteDisks(si, numDisks, numiter, backingType, vmxVersion, ds1, ds2,
                     status, resultsArray):
    for i in range(numiter):
        bigClock = StopWatch()
        try:
            try:
                vm1Name = "Parent" + str(i)
                vm1 = folder.Find(vm1Name)
                if vm1 != None:
                    Log("Cleaning up old vm with name: " + vm1Name)
                    vm1.Destroy()

                # Create a simple vm with numDisks on ds1
                vm1 = vm.CreateQuickDummy(vm1Name, numScsiDisks=numDisks, \
                                          datastoreName=ds1, diskSizeInMB=1, \
                                          vmxVersion=vmxVersion, \
                                          backingType=backingType)
                Log("Created parent VM1 --" + vm1Name)

                vm1DirName = vm1.config.files.snapshotDirectory

                # Create snapshot
                vm.CreateSnapshot(vm1, "S1", "S1 is the first snaphost", \
                                  False, False)
                snapshotInfo = vm1.GetSnapshot()
                S1Snapshot = snapshotInfo.GetCurrentSnapshot()
                Log("Created Snapshot S1 for VM1")

                # Get the name of the parent disks
                disks = vmconfig.CheckDevice(S1Snapshot.GetConfig(), \
                                             Vim.Vm.Device.VirtualDisk)

                if len(disks) != numDisks:
                    raise Exception("Failed to find parent disks")

                parentDisks = [None] * len(disks)
                for i in range(len(disks)):
                    parentDisks[i] = disks[i].GetBacking().GetFileName()

                # Create a VM2 on ds2 that is linked off S1
                vm2Name = "LinkedClone" + str(i)
                configSpec = vmconfig.CreateDefaultSpec(name=vm2Name,
                                                        datastoreName=ds2)
                configSpec = vmconfig.AddScsiCtlr(configSpec)
                configSpec = vmconfig.AddScsiDisk(configSpec,
                                                  datastorename=ds2,
                                                  capacity=1024,
                                                  backingType=backingType)
                configSpec.SetVersion(vmxVersion)
                childDiskBacking = configSpec.GetDeviceChange()[1].\
                                   GetDevice().GetBacking()
                parentBacking = GetBackingInfo(backingType)
                parentBacking.SetFileName(parentDisks[0])
                childDiskBacking.SetParent(parentBacking)
                childDiskBacking.SetDeltaDiskFormat("redoLogFormat")

                resPool = invt.GetResourcePool()
                vmFolder = invt.GetVmFolder()
                vimutil.InvokeAndTrack(vmFolder.CreateVm, configSpec, resPool)

                vm2 = folder.Find(vm2Name)
                Log("Created child VM2 --" + vm2Name)

                vm2DirName = vm2.config.files.snapshotDirectory

                # create delta disks off VM1 on VM2
                Log("Adding delta disks off VM1 to VM2")
                configSpec = Vim.Vm.ConfigSpec()
                for i in range(len(parentDisks)):
                    configSpec = vmconfig.AddScsiDisk(configSpec, \
                                                      datastorename = ds2, \
                                                      cfgInfo = vm2.GetConfig(), \
                                                      backingType = backingType)
                    SetDeltaDiskBacking(configSpec, i, parentDisks[i])

                vimutil.InvokeAndTrack(vm2.Reconfigure, configSpec)

                Log("Power (on) vm1")
                vm.PowerOn(vm1)
                time.sleep(5)

                Log("Power (on) vm2")
                vm.PowerOn(vm2)
                time.sleep(5)

                # prepare promoteDisksSpec
                diskList = GetVirtualDisks(vm2)
                promoteDisksSpec = [None] * len(diskList)
                for i in range(len(diskList)):
                    promoteDisksSpec[i]=vim.host.LowLevelProvisioningManager.\
                                        PromoteDisksSpec()
                    promoteDisksSpec[i].SetNumLinks(1)
                    promoteDisksSpec[i].SetOffsetFromBottom(0)
                    diskId = diskList[i].GetKey()
                    promoteDisksSpec[i].SetDiskId(diskId)

                Log("Calling LLPM PromoteDisks")
                llpm = invt.GetLLPM()
                try:
                    task = llpm.PromoteDisks(vm2, promoteDisksSpec)
                    WaitForTask(task)
                except Exception as e:
                    print(e)
                    Log("Caught exception : " + str(e))
                    status = "FAIL"

                status = "PASS"

                Log("Destroying VMs")
                vm.PowerOff(vm2)
                time.sleep(5)
                vm.PowerOff(vm1)
                time.sleep(5)

                vm2.Destroy()
                vm1.Destroy()

            finally:
                bigClock.finish("iteration " + str(i))

        except Exception as e:
            Log("Caught exception : " + str(e))
            status = "FAIL"

        Log("TEST RUN COMPLETE: " + status)
        resultsArray.append(status)

    Log("Results for each iteration: ")
    for i in range(len(resultsArray)):
        Log("Iteration " + str(i) + ": " + resultsArray[i])
Esempio n. 28
0
def main():
   supportedArgs = [ (["h:", "host="], "localhost", "Host name", "host"),
                     (["u:", "user="******"root", "User name", "user"),
                     (["p:", "pwd="], "ca$hc0w", "Password", "pwd"),
                     (["v:", "vmname="], "CreateTest", "Name of the virtual machine", "vmname")]
   supportedToggles = [(["usage", "help"], False, "Show usage information", "usage")]

   args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
   if args.GetKeyValue("usage") == True:
      args.Usage()
      sys.exit(0)

   # Connect
   si = Connect(args.GetKeyValue("host"), 443,
                args.GetKeyValue("user"), args.GetKeyValue("pwd"))
   atexit.register(Disconnect, si)

   # Process command line
   vmname = args.GetKeyValue("vmname")

   # Cleanup from previous runs.
   vm1 = folder.Find(vmname)
   if vm1 != None:
      vm1.Destroy()

   # Create vms
   envBrowser = invt.GetEnv()
   config = vm.CreateQuickDummySpec(vmname)
   cfgOption = envBrowser.QueryConfigOption(None, None)
   cfgTarget = envBrowser.QueryConfigTarget(None)
   NIC_DIFFERENCE = 7

   config = vmconfig.AddNic(config, cfgOption, cfgTarget, unitNumber = NIC_DIFFERENCE + 0)
   config = vmconfig.AddNic(config, cfgOption, cfgTarget, unitNumber = NIC_DIFFERENCE + 2)
   config = vmconfig.AddNic(config, cfgOption, cfgTarget, unitNumber = NIC_DIFFERENCE + 3)
   config = vmconfig.AddNic(config, cfgOption, cfgTarget, unitNumber = NIC_DIFFERENCE + 4)
   config = vmconfig.AddNic(config, cfgOption, cfgTarget)
   config = vmconfig.AddNic(config, cfgOption, cfgTarget, unitNumber = NIC_DIFFERENCE + 6)
   config = vmconfig.AddNic(config, cfgOption, cfgTarget, unitNumber = NIC_DIFFERENCE + 7)
   config = vmconfig.AddNic(config, cfgOption, cfgTarget, unitNumber = NIC_DIFFERENCE + 8)
   config = vmconfig.AddNic(config, cfgOption, cfgTarget, unitNumber = NIC_DIFFERENCE + 9)
   config = vmconfig.AddScsiCtlr(config, cfgOption, cfgTarget, unitNumber = 3)
   config = vmconfig.AddScsiDisk(config, cfgOption, cfgTarget, unitNumber = 0)
   isofile = "[] /usr/lib/vmware/isoimages/linux.iso"
   config = vmconfig.AddCdrom(config, cfgOption, cfgTarget, unitNumber = 0, isoFilePath=isofile)
   image = "[] /vmimages/floppies/vmscsi.flp"
   config = vmconfig.AddFloppy(config, cfgOption, cfgTarget, unitNumber = 0, type="image", backingName=image)
   config = vmconfig.AddFloppy(config, cfgOption, cfgTarget, unitNumber = 1, type="image", backingName=image)
   backing = Vim.Vm.Device.VirtualSerialPort.FileBackingInfo()
   backing.SetFileName("[]")
   config = vmconfig.AddSerial(config, backing)

   try:
      vmFolder = invt.GetVmFolder()
      vimutil.InvokeAndTrack(vmFolder.CreateVm, config, invt.GetResourcePool(), None)
   except Exception as e:
      raise

   vm1 = folder.Find(vmname)
   printNicUnitNumbers(vm1, "Test 1: Creating a vm with lots of nics")

   cspec = Vim.Vm.ConfigSpec()
   cspec = vmconfig.AddNic(cspec, cfgOption, cfgTarget)
   task = vm1.Reconfigure(cspec)
   WaitForTask(task)
   printNicUnitNumbers(vm1, "Test 2: Added a nic")


   cspec = Vim.Vm.ConfigSpec()
   cspec = vmconfig.AddNic(cspec, cfgOption, cfgTarget)
   task = vm1.Reconfigure(cspec)
   try:
      WaitForTask(task)
   except Vim.Fault.TooManyDevices as e:
      print("Caught too many devices as expected")
   nics = printNicUnitNumbers(vm1, "Test 3: Added too many nics")


   cspec = Vim.Vm.ConfigSpec()
   uni = nics[4].GetUnitNumber()
   cspec = vmconfig.RemoveDeviceFromSpec(cspec, nics[0])
   cspec = vmconfig.RemoveDeviceFromSpec(cspec, nics[2])
   cspec = vmconfig.RemoveDeviceFromSpec(cspec, nics[4])
   cspec = vmconfig.RemoveDeviceFromSpec(cspec, nics[5])
   cspec = vmconfig.RemoveDeviceFromSpec(cspec, nics[6])
   cspec = vmconfig.AddNic(cspec, cfgOption, cfgTarget)
   task = vm1.Reconfigure(cspec)
   WaitForTask(task)
   printNicUnitNumbers(vm1, "Test 4: Removed a bunch of nics")

   cspec = Vim.Vm.ConfigSpec()
   cspec = vmconfig.AddNic(cspec, cfgOption, cfgTarget, unitNumber = NIC_DIFFERENCE - 2)
   task = vm1.Reconfigure(cspec)
   WaitForTask(task)
   printNicUnitNumbers(vm1, "Test 5: Added a nic with slot incorrectly specified")

   cspec = Vim.Vm.ConfigSpec()
   cspec = vmconfig.AddNic(cspec, cfgOption, cfgTarget, unitNumber = uni)
   cspec = vmconfig.AddScsiCtlr(cspec, cfgOption, cfgTarget, unitNumber = 4)
   cspec = vmconfig.AddScsiDisk(cspec, cfgOption, cfgTarget, unitNumber = 1)
   task = vm1.Reconfigure(cspec)
   WaitForTask(task)
   printNicUnitNumbers(vm1, "Test 6: Added a nic with a slot correctly specified")

   cspec = Vim.Vm.ConfigSpec()
   cspec = vmconfig.AddNic(cspec, cfgOption, cfgTarget, unitNumber = uni)
   task = vm1.Reconfigure(cspec)
   try:
      WaitForTask(task)
   except Vim.Fault.InvalidDeviceSpec as e:
      print("Exception caught for adding same nic twice")
   printNicUnitNumbers(vm1, "Test 7: Added a nic with s slot specified to be an occupied slot")
   vm1.Destroy()
Esempio n. 29
0
supportedToggles = [(["usage",
                      "help"], False, "Show usage information", "usage")]

args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
if args.GetKeyValue("usage") == True:
    args.Usage()
    sys.exit(0)

# Connect
si = Connect(host=args.GetKeyValue("host"),
             user=args.GetKeyValue("user"),
             pwd=args.GetKeyValue("pwd"))
atexit.register(Disconnect, si)

vmname = args.GetKeyValue("vmname")
vm1 = folder.Find(vmname)

content = si.RetrieveContent()
dataCenter = content.GetRootFolder().GetChildEntity()[0]
hostFolder = dataCenter.GetHostFolder()
computeResource = hostFolder.GetChildEntity()[0]
hostSystem = computeResource.GetHost()[0]

diskInstance = args.GetKeyValue("diskInstance")
moduleInstance = args.GetKeyValue("moduleInstance")

perfManager = content.GetPerfManager()
counterInfos = perfManager.perfCounter


def main():
Esempio n. 30
0
    i = 0
    print "Test started on ", now()
    # Get vms
    while (time.time() - start_time) < (60 * 60 * 2):  # 2 hours
        print "== iteration ", str(i + 1), " time = ", now()
        print "  Creating: ", tempVmName
        vmTemp = vm.CreateQuickDummy(tempVmName, 1)
        if vmTemp == None:
            print "** Error in creating: ", tempVmName
            return (2)

        print "  Deleting: ", tempVmName
        task = vmTemp.Destroy()
        WaitForTask(task)

        vmTemp = folder.Find(tempVmName)
        if vmTemp != None:
            print "** Deleted Vm still present: ", tempVmName
            return (3)

        print "== iteration completed ", str(i + 1)
        i = i + 1

    print "## Total number of iterations performed ", str(i)
    return (0)


# Start program
if __name__ == "__main__":
    retval = main()
    print "Test ended with error code ", str(retval), " on ", now()