Example #1
0
    def invokeClients(self, host):
        # Run synexec master
        proc, port = libsynexec.start_master_on_controller("/bin/sh /root/synexec_cmd", self.jobid, self.numclients)

        # After all the servers have booted, set up synexec slave in each of the client VMs
        for i in range(0, self.numclients):
            # Write a file containing the apachebench command, pointing it at the relevant server VM
            target = self.servervms[i].getIP()
            self.clientvms[i].execguest("echo '%s' > /root/synexec_cmd" % (self.abCmd % target))

            # Wait for the synexec master to tell this VM to run the apachebench command
            libsynexec.start_slave(self.clientvms[i], self.jobid, port)

        # Wait for jobs to complete
        proc.wait()

        # Fetch results from slaves
        for i in range (0, self.numclients):
            logFileRemote = "/root/ab.log"
            logFileLocal = "%s/ab-%d.log" % (xenrt.TEC().getLogdir(), i)
            sftp = self.clientvms[i].sftpClient()
            sftp.copyFrom(logFileRemote, logFileLocal)

        # Fetch log from master
        results = libsynexec.get_master_log_on_controller(self.jobid)
        self.log("synexec_master", "%s" % results)
Example #2
0
    def runPhase(self, count, op):
        for blocksize in self.blocksizes:
            # TODO we don't support pre-defined access patterns with 'latency', only integer block sizes
            blocksize = int(blocksize)

            # Run synexec master
            proc, port = libsynexec.start_master_on_controller(self.runPhasePrepareCommand(blocksize, op),
                                                               self.jobid, len(self.vm))

            for vm in self.vm:
                libsynexec.start_slave(vm, self.jobid, port)

            proc.wait()

            # Fetch results from slaves
            for vm in self.vm:
                for j in range(self.vbds_per_vm):
                    results = vm.execguest("cat /root/out-%s" % chr(ord('b') + j))
                    for line in results.splitlines():
                        # Log format: Operation(r,w) iteration, blocksize, diskname, VM number on that SR, VBD number, number of bytes processed
                        self.log("slave", "%s %d %d %s %s %d %s" %
                                 (op, count + 1, blocksize, vm.getName().split("-")[0], vm.getName().split("-")[1], j, line))

            # Fetch log from master
            results = libsynexec.get_master_log_on_controller(self.jobid)
            for line in results.splitlines():
                self.log("master", "%d %s" % (blocksize, line))
Example #3
0
    def runPhase(self, count, op):
        for blocksize in self.blocksizes:
            # TODO we don't support pre-defined access patterns with 'latency', only integer block sizes
            blocksize = int(blocksize)

            # Run synexec master
            proc, port = libsynexec.start_master_on_controller(
                self.runPhasePrepareCommand(blocksize, op), self.jobid,
                len(self.vm))

            for vm in self.vm:
                libsynexec.start_slave(vm, self.jobid, port)

            proc.wait()

            # Fetch results from slaves
            for vm in self.vm:
                for j in range(self.vbds_per_vm):
                    results = vm.execguest("cat /root/out-%s" %
                                           chr(ord('b') + j))
                    for line in results.splitlines():
                        # Log format: Operation(r,w) iteration, blocksize, diskname, VM number on that SR, VBD number, number of bytes processed
                        self.log(
                            "slave", "%s %d %d %s %s %d %s" %
                            (op,
                             count + 1, blocksize, vm.getName().split("-")[0],
                             vm.getName().split("-")[1], j, line))

            # Fetch log from master
            results = libsynexec.get_master_log_on_controller(self.jobid)
            for line in results.splitlines():
                self.log("master", "%d %s" % (blocksize, line))
Example #4
0
    def run(self, arglist=None):
        self.changeNrDom0vcpus(self.host, self.dom0vcpus)
        self.loadKernelModule()
        self.host.execdom0("iptables -F")

        guests = self.host.guests.values()

        if self.isNameinGuests(guests, "vm00"):
            # reuse any existing vms
            self.vm = guests
            self.rvms = len(self.vm)
            self.vbds = len(self.vm) * 2
        else:
            # Install 'vm00'
            xenrt.TEC().progress("Installing VM zero")
            self.vm.append(xenrt.lib.xenserver.guest.createVM(\
                    host=self.host,
                    guestname="vm00",
                    distro=self.distro,
                    arch=self.arch,
                    vifs=xenrt.lib.xenserver.Guest.DEFAULT,
                    disks=[ self.edisk ]))
            self.rvms += 1
            self.vbds += 2

            # Copy bins to 'vm00'
            sftp = self.vm[0].sftpClient()
            sftp.copyTo("/home/xenrtd/felipef/latency", "/root/latency")
            libsynexec.initialise_slave(self.vm[0])

            # Copy bins to dom0
            sftp = self.host.sftpClient()
            libsynexec.initialise_master_in_dom0(self.host)

            # Populate the extra disk
            if (self.writefirst == "true"):
                self.vm[0].execguest("dd if=/dev/zero of=/dev/xvdb bs=1M oflag=direct || true")

        if len(self.vm) > self.vms:
            # Shutdown unnecessary VMs
            for i in range(self.vms, len(self.vm)):
                self.backendDetach(self.vm[i]) #must detach any out-of-xapi devices before shutdown
                self.shutdown_vm(self.vm[i])

        if len(self.vm) < self.vms:
            # Shutdown VM for cloning
            self.backendDetach(self.vm[0]) #must detach any out-of-xapi devices before shutdown
            self.shutdown_vm(self.vm[0])

            # Install more VMs as appropriate
            for i in range(len(self.vm), self.vms):
                xenrt.TEC().progress("Installing VM %d" % i)

                # Copies original VM (much quicker than installing another one)
                cloned_vm = self.vm[0].copyVM(name="vm%02d" % i)
                self.vm.append(cloned_vm)
                self.host.addGuest(cloned_vm)
                self.vm[i].start()

                # Populate the extra disk
                if (self.writefirst == "true"):
                    self.vm[i].execguest("dd if=/dev/zero of=/dev/xvdb bs=1M oflag=direct || true")

                # At this point, we added one VM and plugged two more VBDs to the host
                self.rvms += 1
                self.vbds += 2

        # Make sure all VMs are running and have synexec on
        for i in range(0, self.vms):
            self.start_vm(self.vm[i])
            self.backendAttach(self.vm[i])
            libsynexec.start_slave(self.vm[i], self.jobid)

        # Change scheduler of the SRs where the VMs' VBDs are on
        for i in range(0, self.vms):
            sr_uuid = self.getSRofGuest(self.vm[i], self.userdevice)
            self.changeDiskScheduler(self.host, sr_uuid, self.scheduler)

        # Run synexec master
        libsynexec.start_master_in_dom0(self.host, self.latcmd, self.jobid, self.vms)

        # Fetch results from slaves
        for i in range (0, self.vms):
            results = libsynexec.get_slave_log(self.vm[i])
            for line in results.splitlines():
                self.log("concurrent", "%d %d" % (i, int(line)))

        # Fetch log from master
        results = libsynexec.get_master_log(self.host)
        self.log("synexec_master", "%s" % results)
Example #5
0
    def runIperf(self,
                 origin,
                 origindev,
                 dest,
                 destdev,
                 interval=1,
                 duration=30,
                 threads=1,
                 protocol="tcp"):

        prot_switch = None
        if protocol == "tcp": prot_switch = ""
        elif protocol == "udp": prot_switch = "-u"
        else: raise xenrt.XRTError("unknown protocol %s" % (protocol, ))

        dest_endpoints = self.endpoints_of(dest)
        origin_endpoints = self.endpoints_of(origin)
        synexec_session = randrange(1000000000)
        iperf_in_file = "/tmp/iperf.in.%s" % (synexec_session, )
        iperf_out_file = "/tmp/iperf.out.%s" % (synexec_session, )

        if dest.windows:
            raise Exception("Windows endpoint not supported yet")
        else:

            # 1. start iperf servers in each vm in endpoint1s + endpoint1
            for d in dest_endpoints:
                # Start server
                d.execcmd(
                    "nohup iperf %s -s 0<&- &>/dev/null &" %
                    (prot_switch, ))  # should be implemented in startIperf()

            # 1.5. initialise synexec master in endpoint0
            libsynexec.initialise_master_in_guest(origin)

            # 2. start synexec slave in each vm in endpoint0s + endpoint0
            for i in range(len(origin_endpoints)):
                o = origin_endpoints[i]
                d = dest_endpoints[i]
                destIP = self.getIP(d, destdev)
                libsynexec.start_slave(o, synexec_session)
                o.execcmd("echo %s > %s" % (destIP, iperf_in_file))

            # 3. create synexec master script in endpoint 0 to run iperf -c in each slave
            master_script = """/bin/sh :CONF:
#!/bin/sh
DEST_IP=$(cat "%s")
iperf %s -c ${DEST_IP} -i %d -t %d -f m -P %d >%s 2>&1
""" % (iperf_in_file, prot_switch, interval, duration, threads, iperf_out_file)
            self.log(None, "synexec_master_script=%s" % (master_script, ))

            if self.dopause.lower() == "on" or (xenrt.TEC().lookup(
                    "PAUSE_AT_MASTER_ON_PHASE", "None") in self.getPhase()):
                self.pause('paused before running synexec_master'
                           )  # pause the tc and wait for user assistance

            # 4. start synexec master in endpoint0
            # 5. wait for synexec master to finish (=all synexec slaves finished iperf -c)
            master_out = libsynexec.start_master_in_guest(
                origin, master_script, synexec_session, len(origin_endpoints))
            self.log(None, master_out)

            # 6. kill iperf servers in each vm in endpoints1s + endpoint1
            for d in dest_endpoints:
                # Kill server
                d.execcmd("killall iperf || true")
                d.execcmd("killall -9 iperf || true")
            for o in origin_endpoints:
                libsynexec.kill_slave(o)

            # 7. collect the iperf -c output in each endpoint0s + endpoint0
            output = []
            for o in origin_endpoints:
                iperf_out = o.execcmd("cat %s" % (iperf_out_file, ))
                self.log(
                    None, "collect results: endpoint %s: %s=%s" %
                    (o, iperf_out_file, iperf_out))
                output.append(iperf_out)

        return output
    def runIperf(self, origin, origindev, dest, destdev, interval=1, duration=30, threads=1, protocol="tcp"):

        prot_switch = None
        if protocol == "tcp":   prot_switch = ""
        elif protocol == "udp": prot_switch = "-u"
        else: raise xenrt.XRTError("unknown protocol %s" % (protocol,))

        dest_endpoints   = self.endpoints_of(dest)
        origin_endpoints = self.endpoints_of(origin)
        synexec_session  = randrange(1000000000)
        iperf_in_file  = "/tmp/iperf.in.%s" % (synexec_session,)
        iperf_out_file = "/tmp/iperf.out.%s" % (synexec_session,)

        if dest.windows:
            raise Exception("Windows endpoint not supported yet")
        else:

            # 1. start iperf servers in each vm in endpoint1s + endpoint1
            for d in dest_endpoints:
                # Start server
                d.execcmd("nohup iperf %s -s 0<&- &>/dev/null &" % (prot_switch,)) # should be implemented in startIperf()

            # 1.5. initialise synexec master in endpoint0
            libsynexec.initialise_master_in_guest(origin)

            # 2. start synexec slave in each vm in endpoint0s + endpoint0
            for i in range(len(origin_endpoints)):
                o = origin_endpoints[i]
                d   = dest_endpoints[i]
                destIP = self.getIP(d, destdev)
                libsynexec.start_slave(o, synexec_session)
                o.execcmd("echo %s > %s" % (destIP, iperf_in_file))

            # 3. create synexec master script in endpoint 0 to run iperf -c in each slave
            master_script = """/bin/sh :CONF:
#!/bin/sh
DEST_IP=$(cat "%s")
iperf %s -c ${DEST_IP} -i %d -t %d -f m -P %d >%s 2>&1
""" % (iperf_in_file, prot_switch, interval, duration, threads, iperf_out_file)
            self.log(None, "synexec_master_script=%s" % (master_script,))

            if self.dopause.lower() == "on" or (xenrt.TEC().lookup("PAUSE_AT_MASTER_ON_PHASE", "None") in self.getPhase()):
                self.pause('paused before running synexec_master')  # pause the tc and wait for user assistance

            # 4. start synexec master in endpoint0
            # 5. wait for synexec master to finish (=all synexec slaves finished iperf -c)
            master_out = libsynexec.start_master_in_guest(origin, master_script, synexec_session, len(origin_endpoints))
            self.log(None, master_out)

            # 6. kill iperf servers in each vm in endpoints1s + endpoint1
            for d in dest_endpoints:
                # Kill server
                d.execcmd("killall iperf || true")
                d.execcmd("killall -9 iperf || true")
            for o in origin_endpoints:
                libsynexec.kill_slave(o)

            # 7. collect the iperf -c output in each endpoint0s + endpoint0
            output = []
            for o in origin_endpoints:
                iperf_out = o.execcmd("cat %s" % (iperf_out_file,))
                self.log(None, "collect results: endpoint %s: %s=%s" % (o, iperf_out_file, iperf_out))
                output.append(iperf_out)

        return output