Пример #1
0
 def setupMelioDisk(self):
     # Setup a melio disk on the scsi device
     disk = self.hosts[0].execdom0("realpath %s" % self.device).strip()[5:]
     with self.getMelioClient(self.hosts[0]) as melioClient:
         deadline = xenrt.timenow() + 600
         while True:
             data = melioClient.get_all()
             unmanaged = data.get('unmanaged_disk')
             xenrt.TEC().logverbose("Unmanaged disks: %s" % json.dumps(unmanaged, indent=2))
             if unmanaged:
                 disksToManage = [x for x in unmanaged if x['system_name'] == disk]
             else:
                 disksToManage = []
             if disksToManage:
                 diskToManage = disksToManage[0]
                 break
             if xenrt.timenow() > deadline:
                 raise xenrt.XRTError("Timed out waiting for disk to appear")
             xenrt.sleep(10)
         melioClient.manage_disk(diskToManage['system_name'])
         deadline = xenrt.timenow() + 600
         while True:
             managedDisks = melioClient.get_all()['managed_disk']
             guid = [x for x in managedDisks.keys() if managedDisks[x]['system_name'] == disk][0]
             if int(managedDisks[guid]['state']) == 2:
                 break
             if xenrt.timenow() > deadline:
                 raise xenrt.XRTError("Timed out waiting for disk to get to state 2")
             xenrt.sleep(10)
         self.guid = melioClient.create_volume(guid.lstrip("_"), managedDisks[guid]['free_space'])
     self.getSanDeviceForHost(self.hosts[0])
     tasks = [xenrt.PTask(self.rebootAndWait, x) for x in self.hosts[1:]]
     xenrt.pfarm(tasks)
Пример #2
0
    def prepareIteration(self):
        """Prepare each iterations. All VDIs will be created in this process."""

        step("Creating VDIs and attaching them")

        count = self.vdicount / self.numWorker
        if self.runOnGuest:
            tasks = []
            for i in xrange(self.numWorker):
                tasks.append(
                    xenrt.PTask(self.prepareVDIsOnGuest,
                                self.guests[i * count:(i + 1) * count]))
            xenrt.pfarm(tasks)
            # Just in case.
            if self.vdicount % self.numWorker:
                self.prepareVDIsOnGuest(
                    self.guests[-(self.vdicount % self.numWorker):])
            log("Created %d VDIs on guests" % (self.vdicount))

        else:
            xenrt.pfarm([
                xenrt.PTask(self.prepareVDIsOnHost, count)
                for i in xrange(self.numWorker)
            ])
            # Just in case.
            if self.vdicount % self.numWorker:
                self.prepareVDIsOnHost(self.vdicount % self.numWorker)
            log("Created %d VDIs: %s" % (self.vdicount, self.vdis))

        log("After creating %d VDIs: %d" %
            (self.vdicount, self.getPhysicalUtil()))

        libsynexec.initialise_master_in_dom0(self.host)
Пример #3
0
    def prepare(self, arglist=[]):
        args = self.parseArgsKeyValue(arglist)
        self.host = self.getDefaultHost()

        self.guests = []
        if "masters" in args:
            log("%s guests are declared." % args["masters"])
            if self.USE_TARGET > 0:
                log("Using %d guests for this TC." % self.USE_TARGET)
                targets = sample(args["masters"].split(","), self.USE_TARGET)
            else:
                targets = args["masters"].split(",")
            self.guests = [self.host.getGuest(name).cloneVM() for name in targets]
            log("Seleted Guests: %s" % targets)
        else:
            num = len(self.TARGET)
            targets = self.TARGET
            if self.USE_TARGET > 0:
                log("Using %d guests for this TC." % num)
                num = self.USE_TARGET
                targets = sample(self.TARGET, num)
                log("Seleted Distros: %s" % targets)
            log("Creating %d guests of %s distros." % (num, targets))
            tasks = [xenrt.PTask(self.createGuest, distro) for distro in targets]
            xenrt.pfarm(tasks)
            log("Created %s guests." % ([g.name for g in self.guests],))

        for g in self.guests:
            self.uninstallOnCleanup(g)
            g.setState("DOWN")
            g.setHost(choice(self.getDefaultPool().getHosts()))
            g.start()

        if not self.LEAVE_DEFAULT:
            self.enableRemoteExecAPI()
Пример #4
0
 def run(self, arglist=None):
     # Install the VMs on the two hosts in parallel
     xenrt.pfarm ([
         xenrt.PTask(self.createServers, self.serverHost),
         xenrt.PTask(self.createClients, self.clientHost)
     ])
     self.invokeClients(self.clientHost)
Пример #5
0
    def prepare(self, arglist=[]):
        winguests = []
        linguests = []
        self.workloads = []
        i = 0
        while True:
            g = self.getGuest("winclone-%d" % i)
            if not g:
                break
            winguests.append(g)
            i += 1
        i = 0
        while True:
            g = self.getGuest("linclone-%d" % i)
            if not g:
                break
            linguests.append(g)
            i += 1

        pWindows = map(lambda x: xenrt.PTask(self.startWindowsWorkload, x),
                       winguests)
        pLinux = map(lambda x: xenrt.PTask(self.startLinuxWorkload, x),
                     linguests)
        xenrt.pfarm(pWindows, interval=10)
        xenrt.pfarm(pLinux, interval=10)
Пример #6
0
    def prepareIteration(self):
        """Prepare each iterations. All VDIs will be created in this process."""

        step("Creating VDIs and attaching them")

        count = self.vdicount / self.numWorker
        if self.runOnGuest:
            tasks = []
            for i in xrange(self.numWorker):
                tasks.append(xenrt.PTask(self.prepareVDIsOnGuest, self.guests[i * count:(i + 1) * count]))
            xenrt.pfarm(tasks)
            # Just in case.
            if self.vdicount % self.numWorker:
                self.prepareVDIsOnGuest(self.guests[-(self.vdicount % self.numWorker):])
            log("Created %d VDIs on guests" % (self.vdicount))

        else:
            xenrt.pfarm([xenrt.PTask(self.prepareVDIsOnHost, count) for i in xrange(self.numWorker)])
            # Just in case.
            if self.vdicount % self.numWorker:
                self.prepareVDIsOnHost(self.vdicount % self.numWorker)
            log("Created %d VDIs: %s" % (self.vdicount, self.vdis))

        log("After creating %d VDIs: %d" % (self.vdicount, self.getPhysicalUtil()))

        libsynexec.initialise_master_in_dom0(self.host)
Пример #7
0
 def setup(self, reinstall=False, formatDisk=True):
     # Do a full setup of the melio tools
     tasks = [xenrt.PTask(self.installMelio, reinstall=reinstall)]
     if self.iscsiHost not in self.hosts:
         tasks.append(xenrt.PTask(self.createLun))
     xenrt.pfarm(tasks)
     self.setupISCSITarget()
     if formatDisk:
         self.setupMelioDisk()
Пример #8
0
    def doInstanceOperations(self,
                             instances,
                             threads,
                             iterations=1,
                             func=None,
                             timestamps=True):
        """This is a separate function so that a derived class can override self.instances"""

        if func is None:
            func = self.doOperation

        self.instances = instances

        # We'll store failed instances here so we don't just bail out at the first failure
        self.failedInstances = []

        # Each iteration will wait for the completion of the previous iteration before going again
        for i in range(iterations):
            # The Instance operation may want to complete asynchronously (e.g. finish booting).
            # It can append a completion thread here, and at the end we'll wait for them all to complete before finishing
            self.completionThreads = []
            # Create a list which is the indexes (in self.instances) of the instances to perform operations on.
            self.instancesToOp = range(len(self.instances))
            # Shuffle the instances for a more realistic workload
            random.shuffle(self.instancesToOp)

            if timestamps is True:
                self.addTiming("TIME_ITERATION%d_START:%.3f" %
                               (i, xenrt.util.timenow(float=True)))

            # Start the worker threads
            pOp = map(lambda x: xenrt.PTask(self.doInstanceWorker, func),
                      range(threads))

            # Wait for them to complete. The worker threads will wait for the completion threads.
            xenrt.pfarm(pOp)

            if timestamps is True:
                self.addTiming("TIME_ITERATION%d_COMPLETE:%.3f" %
                               (i, xenrt.util.timenow(float=True)))

        try:
            if len(self.failedInstances) > 0:
                raise xenrt.XRTFailure(
                    "Failed to perform operation on %d/%d instances - %s" %
                    (len(self.failedInstances), len(self.instances), ", ".join(
                        self.failedInstances)))
        finally:
            # Verify that all of the hosts and instances are still functional. Required???
            pass
Пример #9
0
    def doVMOperations(self, vms, threads, iterations=1, func=None, timestamps=True):

        if func is None:
            func = self.doOperation

        # We'll store failed VMs here so we don't just bail out at the first failure

        self.vms = vms

        self.failedVMs = []
        self.removedVMs = []

        # Each iteration will wait for the completion of the previous iteration before going again
        for i in range(iterations):
            # The VM operation may want to complete asynchronously (e.g. finish booting).
            # It can append a completion thread here, and at the end we'll wait for them all to complete before finishing
            self.completionThreads = []
            # Create a list which is the indexes (in self.vms) of the vms to perform operations on.
            self.vmsToOp = range(len(self.vms))
            # Shuffle the VMs for a more realistic workload
            random.shuffle(self.vmsToOp)
            if timestamps is True:
                self.addTiming("TIME_ITERATION%d_START:%.3f" % (i, xenrt.util.timenow(float=True)))
            # Start the worker threads
            pOp = map(lambda x: xenrt.PTask(self.doVMWorker, func), range(threads))

            # Wait for them to complete. The worker threads will wait for the completion threads.
            xenrt.pfarm(pOp)
            if timestamps is True:
                self.addTiming("TIME_ITERATION%d_COMPLETE:%.3f" % (i, xenrt.util.timenow(float=True)))

            # Do any post-iteration cleanup (e.g. deleting old base disks)
            self.postIterationCleanup()
            if timestamps is True:
                self.addTiming("TIME_ITERATION%d_CLEANUPCOMPLETE:%.3f" % (i, xenrt.util.timenow(float=True)))

        try:
            if len(self.failedVMs) > 0:
                raise xenrt.XRTFailure("Failed to perform operation on %d/%d VMs - %s" % (len(self.failedVMs), len(self.vms), ", ".join(self.failedVMs)))
        finally:
            # Verify that all of the guests are still functional
            if not xenrt.TEC().lookup("NO_HOST_VERIFY", False, boolean=True):
                for i in self.removedVMs:
                    self.vms.remove(i)
                self.vmsToOp = range(len(self.vms))
                pVerify = map(lambda x: xenrt.PTask(self.doVMWorker, self.verifyVM), range(threads))
                xenrt.pfarm(pVerify)

                if len(self.failedVMs) > 0:
                    raise xenrt.XRTFailure("Failed to verify VMs %s" % ", ".join(self.failedVMs))
Пример #10
0
    def vmInstall(self, lvmoFCSRuuid):
        """Installs guests in parallel using XenRT pfarm."""

        xenrt.TEC().logverbose("Installing %d guests in parallel." % (self.NO_OF_VMS))

        rootDiskSRUUIDs = lvmoFCSRuuid[:self.NO_OF_VMS]
        xenrt.TEC().logverbose("rootDiskSRUUIDs: %s" % rootDiskSRUUIDs)
        xenrt.TEC().logverbose("length of rootDiskSRUUIDs: %s" % len(rootDiskSRUUIDs))

        timeNow = xenrt.util.timenow()
        hostIndexDevider = (self.NO_OF_VMS / self.NO_OF_HOSTS)
        pTasks = map(lambda x: xenrt.PTask(self.hosts[x/hostIndexDevider].createBasicGuest,
                                            distro=self.DISTRO,
                                            memory=self.VMMEMORY,
                                            sr=rootDiskSRUUIDs[x]),
                                            range(self.NO_OF_VMS))
        xenrt.TEC().logverbose("Guest installation pTasks are %s" % pTasks)
        self.guests = xenrt.pfarm(pTasks)
        xenrt.TEC().logverbose("Time taken to install %d guests in parallel on %d hosts with %d LUNs mapped: %s seconds." % 
                                    (self.NO_OF_VMS, self.NO_OF_HOSTS, self.NO_OF_VDIS, (xenrt.util.timenow() - timeNow)))

        if (len(self.guests) != self.NO_OF_VMS):
            raise xenrt.XRTFailure("The requisite of the test demands %d guests." % (self.NO_OF_VMS))
        else:
            xenrt.TEC().logverbose("Number of guests installed for the test: %d" % len(self.guests))
Пример #11
0
    def runCase(self, func):
        """
        Utility function to run jobs simutanously.

        @param func: a refrence to member function that accept a guest as a param
        """

        # Use sub case to run command on all guest simultanously.
        tasks = []
        for guest in self.guests:
            tasks.append(xenrt.PTask(func, guest))
        xenrt.pfarm(tasks)
        for task in tasks:
            if task.exception:
                log("Sub task (%s with %s) has exception: %s" % \
                    (task.func.__name__, task.args[0].getName(), task.exception))
Пример #12
0
    def runCase(self, func):
        """
        Utility function to run jobs simutanously.

        @param func: a refrence to member function that accept a guest as a param
        """

        # Use sub case to run command on all guest simultanously.
        tasks = []
        for guest in self.guests:
            tasks.append(xenrt.PTask(func, guest))
        xenrt.pfarm(tasks)
        for task in tasks:
            if task.exception:
                log("Sub task (%s with %s) has exception: %s" % \
                    (task.func.__name__, task.args[0].getName(), task.exception))
Пример #13
0
    def prepare(self, arglist=[]):

        self.host = self.getDefaultHost()
        self.pool = self.host.pool
        if self.pool:
            self.host = self.pool.master
        self.sr = self.host.lookupDefaultSR()
        self.timeout = 180 * 60 # timeout of 3 hours per iteration.

        self.__obtainTestVars(arglist, printAfterObtain = True)

        self.guests = []
        if self.runOnGuest:
            xenrt.pfarm([xenrt.PTask(self.prepareGuests, host) for host in self.pool.getHosts()])
        else:
            log("Test are running on the host.")
Пример #14
0
    def run(self, arglist):
        # Run a single guest Point to Point Network test vs. a single guest Private Network test
        # The Point to Point should be at least 30% faster than the Privaet Network
        p2p_rate = self._runNetperf(self.ddkNetfront[0], "192.168.50.1")
        xenrt.TEC().logverbose("Point To Point Network throughput is %0.2f" % p2p_rate)
        private_rate = self._runNetperf(self.ddkNetfront[0], "192.168.100.1")
        xenrt.TEC().logverbose("Private Network throughput is %0.2f" % private_rate)
        if(p2p_rate < private_rate):
            raise xenrt.XRTFailure("Point to Point Network throughput was less than Private Network throughput")    
        diff_percent = (((p2p_rate - private_rate)/private_rate)*100)
        xenrt.TEC().logverbose("P2P Network is %0.2f %% faster" % diff_percent)
        if (diff_percent < 30):
            raise xenrt.XRTFailure("Point to Point Network wasn't 30% faster than Private Network")    

        # Run multiple netperf clients against the Point to Point Network
        # Make sure that the dom0 CPU usage isn't over 20% during these tests
        self.netperfRates = []
        pTasks = map(lambda x:xenrt.PTask(self._runNetperfThread, x, "192.168.50.1"), self.ddkNetfront)
        pTasks.append(xenrt.PTask(self._dom0PerfTest, 20))
        xenrt.pfarm(pTasks)
        p2p_avg_rate = 0
        for p2p_rate in self.netperfRates:
            p2p_avg_rate += p2p_rate
            xenrt.TEC().logverbose("Point to Point Network throughput is %0.2f" % p2p_rate)
        p2p_avg_rate = p2p_avg_rate / len(self.netperfRates)
        xenrt.TEC().logverbose("Average Point to Point Network throughput is %0.2f" % p2p_avg_rate)
        
        # Run multiple netperf clients against the Private Network
        self.netperfRates = []
        pTasks = map(lambda x:xenrt.PTask(self._runNetperfThread, x, "192.168.100.1"), self.ddkNetfront)
        pTasks.append(xenrt.PTask(self._dom0PerfTest, 200))
        xenrt.pfarm(pTasks)
        priv_avg_rate = 0
        for priv_rate in self.netperfRates:
            priv_avg_rate += priv_rate
            xenrt.TEC().logverbose("Private Network throughput is %0.2f" % priv_rate)
        priv_avg_rate = priv_avg_rate / len(self.netperfRates)
        xenrt.TEC().logverbose("Average Private throughput is %0.2f" % priv_avg_rate)

        if(p2p_avg_rate < priv_avg_rate):
            raise xenrt.XRTFailure("Point to Point Network average throughput was less than Private Network average throughput")    
        diff_percent = (((p2p_avg_rate - priv_avg_rate)/priv_avg_rate)*100)
        xenrt.TEC().logverbose("P2P Network average is %0.2f %% faster" % diff_percent)
        if (diff_percent < 30):
            raise xenrt.XRTFailure("Point to Point Network average wasn't 30% faster than Private Network average")    
Пример #15
0
    def run(self, arglist=None):
        args = self.parseArgsKeyValue(arglist)
        self.vendor = args['vendor']
        gold = args['gold']
        gpucount = int(args['gpucount'])
        host = self.getDefaultHost()

        pStart = [
            xenrt.PTask(
                self.startVM, xenrt.TEC().registry.guestGet(
                    "%s-clone%d" % (gold, x))) for x in range(gpucount)]
        xenrt.pfarm(pStart)

        # Let the GPU workloads run for a bit
        xenrt.sleep(5)
        for i in range(gpucount):
            vm = xenrt.TEC().registry.guestGet("%s-clone%d" % (gold, i))
            vm.shutdown()
Пример #16
0
    def run(self, arglist):
        threading.stack_size(65536)
        threads = None

        args = self.parseArgsKeyValue(arglist)
        threads = int(args['threads'])
        instances = int(args['instances'])

        # Generate the list of VM names, which host they will run on and where they're clones from
        # The name will be of the format clonex.y:
        #   x = zone the VM will run on
        #   y = index of the VM on the zone

        self.vmSpecs = [("clone-%s-%d" % (self.zones[x % len(self.zones)], x/len(self.zones)), self.zones[x % len(self.zones)]) for x in range(instances)]

        # We'll run this with a limited number of workers (threads).
        # Each worker thread will pull a VM Spec from the list, clone it, then move onto the next one. The threads will complete when all VMs are cloned
        pClone = map(lambda x: xenrt.PTask(self.doClones), range(threads))
        xenrt.pfarm(pClone)
Пример #17
0
    def prepare(self, arglist=[]):

        self.host = self.getDefaultHost()
        self.pool = self.host.pool
        if self.pool:
            self.host = self.pool.master
        self.sr = self.host.lookupDefaultSR()
        self.timeout = 180 * 60  # timeout of 3 hours per iteration.

        self.__obtainTestVars(arglist, printAfterObtain=True)

        self.guests = []
        if self.runOnGuest:
            xenrt.pfarm([
                xenrt.PTask(self.prepareGuests, host)
                for host in self.pool.getHosts()
            ])
        else:
            log("Test are running on the host.")
Пример #18
0
    def doInstanceOperations(self, instances, threads, iterations=1, func=None, timestamps=True):
        """This is a separate function so that a derived class can override self.instances"""

        if func is None:
            func = self.doOperation

        self.instances = instances
        
        # We'll store failed instances here so we don't just bail out at the first failure
        self.failedInstances = []
        
        # Each iteration will wait for the completion of the previous iteration before going again        
        for i in range(iterations):
            # The Instance operation may want to complete asynchronously (e.g. finish booting).
            # It can append a completion thread here, and at the end we'll wait for them all to complete before finishing
            self.completionThreads = []
            # Create a list which is the indexes (in self.instances) of the instances to perform operations on.
            self.instancesToOp = range(len(self.instances))
            # Shuffle the instances for a more realistic workload
            random.shuffle(self.instancesToOp)

            if timestamps is True:
                self.addTiming("TIME_ITERATION%d_START:%.3f" % (i, xenrt.util.timenow(float=True)))

            # Start the worker threads
            pOp = map(lambda x: xenrt.PTask(self.doInstanceWorker, func), range(threads))
            
            # Wait for them to complete. The worker threads will wait for the completion threads.
            xenrt.pfarm(pOp)

            if timestamps is True:
                self.addTiming("TIME_ITERATION%d_COMPLETE:%.3f" % (i, xenrt.util.timenow(float=True)))

        try:
            if len(self.failedInstances) > 0:
                raise xenrt.XRTFailure("Failed to perform operation on %d/%d instances - %s" %
                            (len(self.failedInstances), len(self.instances), ", ".join(self.failedInstances)))
        finally:
            # Verify that all of the hosts and instances are still functional. Required???
            pass
Пример #19
0
    def startIOZoneParallely(self):
        """Install IOZone in each guests parallelly."""

        xenrt.TEC().logverbose("Starting IOZone parallelly in %d guests." % (self.NO_OF_VMS))

        timeNow = xenrt.util.timenow()
        iozoneTasks = map(lambda x: xenrt.PTask(self.installIOZoneAndRun,
                                            self.guests[x]),
                                            range(len(self.guests)))
        xenrt.TEC().logverbose("Guest IOZone Installation pTasks are %s" % iozoneTasks)
        iozoneFarm = xenrt.pfarm(iozoneTasks)
        xenrt.TEC().logverbose("Time taken to install IOZone tool on %d guests in parallel: %s seconds." % 
                                    (self.NO_OF_VMS, (xenrt.util.timenow() - timeNow)))
Пример #20
0
    def run(self, arglist):
        threading.stack_size(65536)
        threads = 2
        instancesCount = 4

        # Get the sequence variables.
        if arglist and len(arglist) > 0:
            for arg in arglist:
                l = string.split(arg, "=", 1)
                if l[0] == "threads":
                    threads = int(l[1])
                if l[0] == "instances":
                    instancesCount = int(l[1])                 

        # Generate the list of instance names.
        self.instanceSpecs = map(lambda x: ("clone-%d" % x, self.goldTemplate), range(instancesCount))

        # We'll run this with a limited number of workers (threads).
        # Each worker thread will pull a instance Spec from the list, create from the template, and 
        # then move onto the next one. The threads will complete when all instances are created from template.
        pClone = map(lambda x: xenrt.PTask(self.createInstancesFromTemplate), range(threads))
        xenrt.pfarm(pClone)
Пример #21
0
    def prepare(self, arglist=[]):
        args = self.parseArgsKeyValue(arglist)
        self.host = self.getDefaultHost()

        self.guests = []
        if "masters" in args:
            log("%s guests are declared." % args["masters"])
            if self.USE_TARGET > 0:
                log("Using %d guests for this TC." % self.USE_TARGET)
                targets = sample(args["masters"].split(","), self.USE_TARGET)
            else:
                targets = args["masters"].split(",")
            self.guests = [
                self.host.getGuest(name).cloneVM() for name in targets
            ]
            log("Seleted Guests: %s" % targets)
        else:
            num = len(self.TARGET)
            targets = self.TARGET
            if self.USE_TARGET > 0:
                log("Using %d guests for this TC." % num)
                num = self.USE_TARGET
                targets = sample(self.TARGET, num)
                log("Seleted Distros: %s" % targets)
            log("Creating %d guests of %s distros." % (num, targets))
            tasks = [
                xenrt.PTask(self.createGuest, distro) for distro in targets
            ]
            xenrt.pfarm(tasks)
            log("Created %s guests." % ([g.name for g in self.guests], ))

        for g in self.guests:
            self.uninstallOnCleanup(g)
            g.setState("DOWN")
            g.setHost(choice(self.getDefaultPool().getHosts()))
            g.start()

        if not self.LEAVE_DEFAULT:
            self.enableRemoteExecAPI()
Пример #22
0
    def run(self, arglist):
        threading.stack_size(65536)
        threads = None

        args = self.parseArgsKeyValue(arglist)
        threads = int(args['threads'])
        instances = int(args['instances'])

        # Generate the list of VM names, which host they will run on and where they're clones from
        # The name will be of the format clonex.y:
        #   x = zone the VM will run on
        #   y = index of the VM on the zone

        self.vmSpecs = [
            ("clone-%s-%d" %
             (self.zones[x % len(self.zones)], x / len(self.zones)),
             self.zones[x % len(self.zones)]) for x in range(instances)
        ]

        # We'll run this with a limited number of workers (threads).
        # Each worker thread will pull a VM Spec from the list, clone it, then move onto the next one. The threads will complete when all VMs are cloned
        pClone = map(lambda x: xenrt.PTask(self.doClones), range(threads))
        xenrt.pfarm(pClone)
Пример #23
0
    def prepare(self, arglist=[]):
        winguests = []
        linguests = []
        self.workloads = []
        i = 0
        while True:
            g = self.getGuest("winclone-%d" % i)
            if not g:
                break
            winguests.append(g)
            i+=1
        i = 0
        while True:
            g = self.getGuest("linclone-%d" % i)
            if not g:
                break
            linguests.append(g)
            i+=1

        pWindows = map(lambda x: xenrt.PTask(self.startWindowsWorkload, x), winguests)
        pLinux = map(lambda x: xenrt.PTask(self.startLinuxWorkload, x), linguests)
        xenrt.pfarm(pWindows, interval=10)
        xenrt.pfarm(pLinux, interval=10)
Пример #24
0
    def run(self, arglist):
        threading.stack_size(65536)
        threads = 2
        instancesCount = 4

        # Get the sequence variables.
        if arglist and len(arglist) > 0:
            for arg in arglist:
                l = string.split(arg, "=", 1)
                if l[0] == "threads":
                    threads = int(l[1])
                if l[0] == "instances":
                    instancesCount = int(l[1])

        # Generate the list of instance names.
        self.instanceSpecs = map(lambda x: ("clone-%d" % x, self.goldTemplate),
                                 range(instancesCount))

        # We'll run this with a limited number of workers (threads).
        # Each worker thread will pull a instance Spec from the list, create from the template, and
        # then move onto the next one. The threads will complete when all instances are created from template.
        pClone = map(lambda x: xenrt.PTask(self.createInstancesFromTemplate),
                     range(threads))
        xenrt.pfarm(pClone)
Пример #25
0
 def prepare(self, arglist):
     self.ddkNetfront = []
     self.host = self.getDefaultHost()
     # Create the Point to Point network
     self.p2p_network = self._createP2PNetwork()
     xenrt.TEC().logverbose("Point to Point Network: %s" % self.p2p_network)
     # Create a Private Network
     self.private_network = self._createPrivateNetwork()
     xenrt.TEC().logverbose("Private Network: %s" % self.private_network)
     # Create and Setup the NetBack server VM
     self.ddkNetback = self._setupNetBack()
     # Create and Setup the NetFront client VMs
     pTasks = map(lambda x: xenrt.PTask(self._setupNetFront, x + 2),
                  range(self.NUM_CLIENT_VMS))
     self.ddkNetfront = xenrt.pfarm(pTasks)
     # Get the management PIF for collecting logs
     self.mng_pif_uuid = self.host.parseListForUUID(
         "pif-list", "management", "true",
         "host-uuid=%s" % self.host.getMyHostUUID()).strip()
Пример #26
0
 def prepare(self, arglist):
     self.ddkNetfront = []
     self.host = self.getDefaultHost()
     # Create the Point to Point network
     self.p2p_network = self._createP2PNetwork()
     xenrt.TEC().logverbose("Point to Point Network: %s" % self.p2p_network)
     # Create a Private Network
     self.private_network = self._createPrivateNetwork()
     xenrt.TEC().logverbose("Private Network: %s" % self.private_network)
     # Create and Setup the NetBack server VM
     self.ddkNetback = self._setupNetBack()
     # Create and Setup the NetFront client VMs
     pTasks = map(lambda x: xenrt.PTask(self._setupNetFront, x+2), range(self.NUM_CLIENT_VMS))
     self.ddkNetfront = xenrt.pfarm(pTasks)
     # Get the management PIF for collecting logs
     self.mng_pif_uuid = self.host.parseListForUUID("pif-list",
                                                    "management",
                                                    "true",
                                                    "host-uuid=%s" %
                                                    self.host.getMyHostUUID()).strip()
Пример #27
0
    def run(self, arglist=None):
        step("Initialize and Enrol Phone Home for every pool")
        [s.initializeService(self.pool) for s in self.services]

        xenrt.pfarm([
            xenrt.PTask(self.services[0].activateService, p)
            for p in self.pools
        ],
                    wait=True,
                    exception=True)

        step("Attach all hosts via XenCenter in every guest")
        for p in self.pools:
            xenrt.pfarm([
                xenrt.PTask(self.attachXenCenterToMulHost, g, p.master)
                for g in self.guests
            ],
                        wait=True,
                        exception=True)
        self.pause("Mayur paused it")
        step("Set the same upload schedule for every pool")
        self.triggerTimes = xenrt.pfarm([
            xenrt.PTask(self.services[0].triggerService, p, self.options)
            for p in self.pools
        ],
                                        wait=True,
                                        exception=True)
        xenrt.log("Upload Schedule are as follows %s" % self.triggerTimes)

        step("Verify all the pool uploads")
        self.uploadResults = xenrt.pfarm([
            xenrt.PTask(self.services[0].verifyService, self.pools[i],
                        self.triggerTimes[i])
            for i in range(0, len(self.pools))
        ],
                                         wait=True,
                                         exception=True)
        xenrt.log("Upload Results are as follows %s" % self.uploadResults)

        if False in self.uploadResults:
            raise xenrt.XRTFailure(
                "Upload Failure observed for some pools %s" %
                self.uploadResults)
        else:
            xenrt.log("All scheduled uploads were successful")
Пример #28
0
    def run(self,arglist=None):
        step("Initialize and Enrol Phone Home for every pool")
        [s.initializeService(self.pool) for s in self.services]

        xenrt.pfarm([xenrt.PTask(self.services[0].activateService,p) for p in self.pools],wait = True,exception= True)

        step("Attach all hosts via XenCenter in every guest")
        for p in self.pools :
            xenrt.pfarm([xenrt.PTask(self.attachXenCenterToMulHost,g,p.master) for g in self.guests],wait = True,exception= True)
        self.pause("Mayur paused it")
        step("Set the same upload schedule for every pool")
        self.triggerTimes=xenrt.pfarm([xenrt.PTask(self.services[0].triggerService,p,self.options) for p in self.pools],wait = True,exception= True)
        xenrt.log("Upload Schedule are as follows %s" %self.triggerTimes)

        step("Verify all the pool uploads")
        self.uploadResults=xenrt.pfarm([xenrt.PTask(self.services[0].verifyService,self.pools[i],self.triggerTimes[i])for i in range(0,len(self.pools))],wait = True,exception= True)
        xenrt.log("Upload Results are as follows %s" %self.uploadResults)

        if False in self.uploadResults:
            raise xenrt.XRTFailure("Upload Failure observed for some pools %s"%self.uploadResults)
        else:
            xenrt.log("All scheduled uploads were successful")
Пример #29
0
    def run(self, arglist):
        # Run a single guest Point to Point Network test vs. a single guest Private Network test
        # The Point to Point should be at least 30% faster than the Privaet Network
        p2p_rate = self._runNetperf(self.ddkNetfront[0], "192.168.50.1")
        xenrt.TEC().logverbose("Point To Point Network throughput is %0.2f" %
                               p2p_rate)
        private_rate = self._runNetperf(self.ddkNetfront[0], "192.168.100.1")
        xenrt.TEC().logverbose("Private Network throughput is %0.2f" %
                               private_rate)
        if (p2p_rate < private_rate):
            raise xenrt.XRTFailure(
                "Point to Point Network throughput was less than Private Network throughput"
            )
        diff_percent = (((p2p_rate - private_rate) / private_rate) * 100)
        xenrt.TEC().logverbose("P2P Network is %0.2f %% faster" % diff_percent)
        if (diff_percent < 30):
            raise xenrt.XRTFailure(
                "Point to Point Network wasn't 30% faster than Private Network"
            )

        # Run multiple netperf clients against the Point to Point Network
        # Make sure that the dom0 CPU usage isn't over 20% during these tests
        self.netperfRates = []
        pTasks = map(
            lambda x: xenrt.PTask(self._runNetperfThread, x, "192.168.50.1"),
            self.ddkNetfront)
        pTasks.append(xenrt.PTask(self._dom0PerfTest, 20))
        xenrt.pfarm(pTasks)
        p2p_avg_rate = 0
        for p2p_rate in self.netperfRates:
            p2p_avg_rate += p2p_rate
            xenrt.TEC().logverbose(
                "Point to Point Network throughput is %0.2f" % p2p_rate)
        p2p_avg_rate = p2p_avg_rate / len(self.netperfRates)
        xenrt.TEC().logverbose(
            "Average Point to Point Network throughput is %0.2f" %
            p2p_avg_rate)

        # Run multiple netperf clients against the Private Network
        self.netperfRates = []
        pTasks = map(
            lambda x: xenrt.PTask(self._runNetperfThread, x, "192.168.100.1"),
            self.ddkNetfront)
        pTasks.append(xenrt.PTask(self._dom0PerfTest, 200))
        xenrt.pfarm(pTasks)
        priv_avg_rate = 0
        for priv_rate in self.netperfRates:
            priv_avg_rate += priv_rate
            xenrt.TEC().logverbose("Private Network throughput is %0.2f" %
                                   priv_rate)
        priv_avg_rate = priv_avg_rate / len(self.netperfRates)
        xenrt.TEC().logverbose("Average Private throughput is %0.2f" %
                               priv_avg_rate)

        if (p2p_avg_rate < priv_avg_rate):
            raise xenrt.XRTFailure(
                "Point to Point Network average throughput was less than Private Network average throughput"
            )
        diff_percent = (((p2p_avg_rate - priv_avg_rate) / priv_avg_rate) * 100)
        xenrt.TEC().logverbose("P2P Network average is %0.2f %% faster" %
                               diff_percent)
        if (diff_percent < 30):
            raise xenrt.XRTFailure(
                "Point to Point Network average wasn't 30% faster than Private Network average"
            )
Пример #30
0
    def doVMOperations(self,
                       vms,
                       threads,
                       iterations=1,
                       func=None,
                       timestamps=True):

        if func is None:
            func = self.doOperation

        # We'll store failed VMs here so we don't just bail out at the first failure

        self.vms = vms

        self.failedVMs = []
        self.removedVMs = []

        # Each iteration will wait for the completion of the previous iteration before going again
        for i in range(iterations):
            # The VM operation may want to complete asynchronously (e.g. finish booting).
            # It can append a completion thread here, and at the end we'll wait for them all to complete before finishing
            self.completionThreads = []
            # Create a list which is the indexes (in self.vms) of the vms to perform operations on.
            self.vmsToOp = range(len(self.vms))
            # Shuffle the VMs for a more realistic workload
            random.shuffle(self.vmsToOp)
            if timestamps is True:
                self.addTiming("TIME_ITERATION%d_START:%.3f" %
                               (i, xenrt.util.timenow(float=True)))
            # Start the worker threads
            pOp = map(lambda x: xenrt.PTask(self.doVMWorker, func),
                      range(threads))

            # Wait for them to complete. The worker threads will wait for the completion threads.
            xenrt.pfarm(pOp)
            if timestamps is True:
                self.addTiming("TIME_ITERATION%d_COMPLETE:%.3f" %
                               (i, xenrt.util.timenow(float=True)))

            # Do any post-iteration cleanup (e.g. deleting old base disks)
            self.postIterationCleanup()
            if timestamps is True:
                self.addTiming("TIME_ITERATION%d_CLEANUPCOMPLETE:%.3f" %
                               (i, xenrt.util.timenow(float=True)))

        try:
            if len(self.failedVMs) > 0:
                raise xenrt.XRTFailure(
                    "Failed to perform operation on %d/%d VMs - %s" %
                    (len(self.failedVMs), len(self.vms), ", ".join(
                        self.failedVMs)))
        finally:
            # Verify that all of the guests are still functional
            if not xenrt.TEC().lookup("NO_HOST_VERIFY", False, boolean=True):
                for i in self.removedVMs:
                    self.vms.remove(i)
                self.vmsToOp = range(len(self.vms))
                pVerify = map(
                    lambda x: xenrt.PTask(self.doVMWorker, self.verifyVM),
                    range(threads))
                xenrt.pfarm(pVerify)

                if len(self.failedVMs) > 0:
                    raise xenrt.XRTFailure("Failed to verify VMs %s" %
                                           ", ".join(self.failedVMs))
Пример #31
0
            raise xenrt.XRTFailure("Cloning one or many VMs failed with error messages %s" % errors)

        errors = []
        for clone in self.clones:
            xenrt.TEC().progress("Starting VM %s" % clone.getName())
            try:
                clone.start()
            except Exception, e:
                errors.append(clone.getName() + ":" + str(e))

        if len(errors) > 1:
            xenrt.TEC().logverbose("One or many guests failed to start with error messages %s" % errors)
            raise xenrt.XRTFailure("One or many guests failed to start.")

        # Now collect iolatency metrics for every cloned guests parallely.
        results = xenrt.pfarm([xenrt.PTask(self.collectMetrics, clone) for clone in self.clones], exception=False)
        log("Threads returned: %s" % results)

        exceptions = 0
        for result in results:
            if result:
                exceptions += 1
                warning("Found exception: %s" % result)

        if exceptions:
            raise xenrt.XRTFailure("Failed to run %d / %d io latency tests." % (exceptions, len(results)))

    def postRun(self, arglist=None):
        # Do not remove VMs after the test or otherwise we will not be able to collect the logs in it
        if False: #TODO: perhaps, in the future, add option to remove the vms
            # Removing all cloned VMs after the test run.
Пример #32
0
 def installMelio(self, reinstall=False):
     # Install melio on the cluster (do each host in parallel)
     self.configureClusterFirewall()
     tasks = [xenrt.PTask(self.installMelioOnHost, x, reinstall) for x in self.hosts]
     xenrt.pfarm(tasks)
     self.checkCluster()