def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) self.device = libperf.getArgument(arglist, "device", str, "default") self.vm_ram = libperf.getArgument( arglist, "vm_ram", int, 4096) # note: we need > 1GB to compile some test suites self.distro = libperf.getArgument(arglist, "distro", str, "debian70") self.arch = libperf.getArgument(arglist, "arch", str, "x86-64") self.vcpus = libperf.getArgument(arglist, "vcpus", int, 2) self.rootDiskSizeGB = libperf.getArgument(arglist, "disksize", int, 24) # GB self.postinstall = libperf.getArgument( arglist, "postinstall", str, None) # comma-separated list of guest function names self.dom0vcpus = libperf.getArgument(arglist, "dom0vcpus", int, None) self.multipage = libperf.getArgument(arglist, "multipage", int, None) if self.multipage: is_power2 = self.multipage != 0 and ((self.multipage & (self.multipage - 1)) == 0) if not is_power2: raise ValueError("Multipage %s is not a power of 2" % (self.multipage)) # Optional VM image to use as a template self.vm_image = libperf.getArgument(arglist, "vm_image", str, None) # If vm_image is set, treat it as a distro name if self.vm_image: self.distro = self.vm_image
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse arguments relating to this test self.dditers = libperf.getArgument(arglist, "dditers", int, 20) self.userawvdi = libperf.getArgument(arglist, "userawvdi", bool, False)
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) self.device = libperf.getArgument(arglist, "device", str, "default") self.vm_ram = libperf.getArgument(arglist, "vm_ram", int, 4096) # note: we need > 1GB to compile some test suites self.distro = libperf.getArgument(arglist, "distro", str, "debian70") self.arch = libperf.getArgument(arglist, "arch", str, "x86-64") self.vcpus = libperf.getArgument(arglist, "vcpus", int, 2) self.rootDiskSizeGB = libperf.getArgument(arglist, "disksize", int, 24) # GB self.postinstall = libperf.getArgument(arglist, "postinstall", str, None) # comma-separated list of guest function names self.dom0vcpus = libperf.getArgument(arglist, "dom0vcpus", int, None) self.multipage = libperf.getArgument(arglist, "multipage", int, None) if self.multipage: is_power2 = self.multipage != 0 and ((self.multipage & (self.multipage - 1)) == 0) if not is_power2: raise ValueError("Multipage %s is not a power of 2" % (self.multipage)) # Optional VM image to use as a template self.vm_image = libperf.getArgument(arglist, "vm_image", str, None) # If vm_image is set, treat it as a distro name if self.vm_image: self.distro = self.vm_image
def prepare(self, arglist=None): self.basicPrepare(arglist) # Populate self.guests self.findGuests() self.log(None, "prepare:arglist=%s" % (arglist,)) # Get the two vm endpoints to clone e0 = libperf.getArgument(arglist, "endpoint0", str, None) e1 = libperf.getArgument(arglist, "endpoint1", str, None) self.log(None, "endpoints to clone: e0=%s, e1=%s" % (e0,e1)) if not e0 or not e1: raise xenrt.XRTError("Failed to find an endpoint") self.endpoint0 = self.getGuestOrHostFromName(e0) self.endpoint1 = self.getGuestOrHostFromName(e1) # change number of netback threads if required rebooted_e0 = self.changeNetbackThreads(self.host_of(self.endpoint0)) #may reboot rebooted_e1 = self.changeNetbackThreads(self.host_of(self.endpoint1)) #may reboot if rebooted_e0 or rebooted_e1: self.findGuests() #repopulate guest/host info self.endpoint0 = self.getGuestOrHostFromName(e0) self.endpoint1 = self.getGuestOrHostFromName(e1) #cloning phase # - vms are cloned from the one in the sequence file to fit many vms in the same local sr of a host self.clone(self.endpoint0, self.endpoint0s) self.clone(self.endpoint1, self.endpoint1s)
def prepare(self, arglist=None): self.basicPrepare(arglist) # Populate self.guests self.findGuests() self.log(None, "prepare:arglist=%s" % (arglist, )) # Get the two vm endpoints to clone e0 = libperf.getArgument(arglist, "endpoint0", str, None) e1 = libperf.getArgument(arglist, "endpoint1", str, None) self.log(None, "endpoints to clone: e0=%s, e1=%s" % (e0, e1)) if not e0 or not e1: raise xenrt.XRTError("Failed to find an endpoint") self.endpoint0 = self.getGuestOrHostFromName(e0) self.endpoint1 = self.getGuestOrHostFromName(e1) # change number of netback threads if required rebooted_e0 = self.changeNetbackThreads(self.host_of( self.endpoint0)) #may reboot rebooted_e1 = self.changeNetbackThreads(self.host_of( self.endpoint1)) #may reboot if rebooted_e0 or rebooted_e1: self.findGuests() #repopulate guest/host info self.endpoint0 = self.getGuestOrHostFromName(e0) self.endpoint1 = self.getGuestOrHostFromName(e1) #cloning phase # - vms are cloned from the one in the sequence file to fit many vms in the same local sr of a host self.clone(self.endpoint0, self.endpoint0s) self.clone(self.endpoint1, self.endpoint1s)
def parseArgs(self, arglist): # Parse generic arguments tc_networkthroughput2.TCNetworkThroughputPointToPoint.parseArgs(self, arglist) self.log(None, "parseArgs:arglist=%s" % (arglist,)) self.dom0vcpus = libperf.getArgument(arglist, "dom0vcpus", int, 0) self.nr_vm_pairs = libperf.getArgument(arglist, "vmpairs", int, 1) self.log(None, "nr_vm_pairs=%s" % (self.nr_vm_pairs,))
def parseArgs (self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs (self, arglist) # NB: use the 'dontusemps' argument if you don't want to use MPS # Parse arguments relating to this test self.numvms = libperf.getArgument (arglist, "numvms", int, 50) self.vmname = libperf.getArgument (arglist, "guest", str, None)
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # NB: use the 'dontusemps' argument if you don't want to use MPS # Parse arguments relating to this test self.numvms = libperf.getArgument(arglist, "numvms", int, 50) self.vmname = libperf.getArgument(arglist, "guest", str, None)
def parseArgs(self, arglist): # Parse generic arguments tc_networkthroughput2.TCNetworkThroughputPointToPoint.parseArgs( self, arglist) self.log(None, "parseArgs:arglist=%s" % (arglist, )) self.dom0vcpus = libperf.getArgument(arglist, "dom0vcpus", int, 0) self.nr_vm_pairs = libperf.getArgument(arglist, "vmpairs", int, 1) self.log(None, "nr_vm_pairs=%s" % (self.nr_vm_pairs, ))
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse other arguments self.vms = libperf.getArgument(arglist, "numvms", int, 20) self.edisks = libperf.getArgument(arglist, "edisks", int, 5) self.distro = libperf.getArgument(arglist, "distro", str, "debian60") self.arch = libperf.getArgument(arglist, "arch", str, "x86-32") self.bufsize = libperf.getArgument(arglist, "bufsize", int, 512) self.diskprefix = None # can be "hd" for KVM or "xvd" for Xen
def prepare(self, arglist=None): # Parse generic args self.parseArgs(arglist) # Parse args relating to this test self.log(None, "parseArgs:arglist=%s" % (arglist, )) self.vmname = libperf.getArgument(arglist, "guest", str, None) self.vmimage = libperf.getArgument(arglist, "vmimage", str, None) self.numiters = libperf.getArgument(arglist, "numiters", int, 100) self.useImportedVM = libperf.getArgument(arglist, "useimportedvm", bool, False) self.initialiseHostList() self.configureAllHosts()
def prepare(self, arglist=None): self.basicPrepare(arglist) # Populate self.guests self.findGuests() self.log(None, "prepare:arglist=%s" % (arglist,)) # Get the two communication endpoints e0 = libperf.getArgument(arglist, "endpoint0", str, None) e1 = libperf.getArgument(arglist, "endpoint1", str, None) self.log(None, "endpoints: e0=%s, e1=%s" % (e0,e1)) if not e0 or not e1: raise xenrt.XRTError("Failed to find an endpoint") self.endpoint0 = self.getGuestOrHostFromName(e0) self.endpoint1 = self.getGuestOrHostFromName(e1)
def prepare(self, arglist=None): self.basicPrepare(arglist) # Populate self.guests self.findGuests() self.log(None, "prepare:arglist=%s" % (arglist, )) # Get the two communication endpoints e0 = libperf.getArgument(arglist, "endpoint0", str, None) e1 = libperf.getArgument(arglist, "endpoint1", str, None) self.log(None, "endpoints: e0=%s, e1=%s" % (e0, e1)) if not e0 or not e1: raise xenrt.XRTError("Failed to find an endpoint") self.endpoint0 = self.getGuestOrHostFromName(e0) self.endpoint1 = self.getGuestOrHostFromName(e1)
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse arguments relating to this test self.numdesktops = libperf.getArgument(arglist, "numdesktops", int, 1000) # 800 = 16 hosts x 50 VMs
def prepare(self, arglist=None): self.basicPrepare(arglist) def readArg(name, convert, defaultValue): setattr(self, name, libperf.getArgument(arglist, name, convert, defaultValue)) readArg("network_backend", str, "") # bridge or openvswitch readArg("use_jumbo_frames", bool, False) readArg("comm_bridge", str, "xenbr5") readArg("trans_bridge", str, "xenbr6") readArg("dummy_bridge", str, "xenbr7") readArg("use_irqbalance", bool, False) # only applicable to pre-Cowley readArg("use_gro", bool, False) readArg("use_lro", bool, False) readArg("num_host_runs", int, 10) readArg("host_run_time", int, 60) readArg("host_ping_count", int, 20) readArg("num_host_threads", int, 4) readArg("trySingleDom0Thread", bool, False) readArg("vm_type", str, "demo") # other: "win7" readArg("num_vm_vcpus", int, 1) readArg("num_vm_runs", int, 10) readArg("vm_run_time", int, 60) readArg("num_vm_pairs", int, 4) # 7 is max for q machines due to RAM limit readArg("trySingleVMPair", bool, False) readArg("num_vm_threads", int, 2) readArg("trySingleVMThread", bool, False) readArg("vm_ping_count", int, 20) # TODO: Find better name parameter and values, or get over this completely. self.where = libperf.getArgument (arglist, "run_on", str, "q") # Can also be "perf" self.setupHosts1() self.setupVMpairs() self.setupHosts2()
def parseArgs (self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs (self, arglist) # Parse arguments relating to this test self.numdesktops = libperf.getArgument (arglist, "numdesktops", int, 1000) # 800 = 16 hosts x 50 VMs
def prepare(self, arglist=None): # Parse generic args self.parseArgs(arglist) self.runs = libperf.getArgument(arglist, "runs", int, 1) self.initialiseHostList() self.configureAllHosts()
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse other arguments # Number of measurements to conduct self.measurements = libperf.getArgument(arglist, "measurements", int, 64) # Number of workload thread running self.thread_count = libperf.getArgument(arglist, "thread_count", int, 64) # Delay before doing measurements to allow the workload to settle self.pretest_delay = libperf.getArgument(arglist, "pretest_delay", int, 60) # Command which execution time to measure self.command = libperf.getArgument(arglist, "command", str, "xe vm-list") self.dom0vcpus = libperf.getArgument(arglist, "dom0vcpus", int, None) # Timeout of ssh command executing measurements self.timeout = libperf.getArgument(arglist, "timeout", int, 3600) # Mountpoint path to the builds archive self.mount_path = libperf.getArgument( arglist, "mount_path", str, "backup-storage-cbg2.uk.xensource.com:/containers/builds_archive")
def prepare(self, arglist=None): self.basicPrepare(arglist) # Populate self.guests self.findGuests() self.log(None, "prepare:arglist=%s" % (arglist,)) # Get the two communication endpoints e0 = libperf.getArgument(arglist, "endpoint0", str, None) e1 = libperf.getArgument(arglist, "endpoint1", str, None) self.log(None, "endpoints: e0=%s, e1=%s" % (e0,e1)) if not e0 or not e1: raise xenrt.XRTError("Failed to find an endpoint") self.endpoint0 = self.getGuestOrHostFromName(e0) self.endpoint1 = self.getGuestOrHostFromName(e1) # Postinstall hook for guests for g in self.guests: xenrt.TEC().logverbose("executing post-install functions %s for guest %s" % (self.postinstall, g)) for p in self.postinstall: eval("g.%s()" % (p))
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) self.log(None, "parseArgs:arglist=%s" % (arglist,)) self.npkts = libperf.getArgument(arglist, "npkts", int, 10000) self.size = libperf.getArgument(arglist, "size", int, 1450) self.tolerance = libperf.getArgument(arglist, "tolerance", int, 2) self.gro = libperf.getArgument(arglist, "gro", str, "default") self.dopause = libperf.getArgument(arglist, "pause", str, "off") # Optionally, the sequence file can specify which eth device to use in each endpoint self.e0dev = libperf.getArgument(arglist, "endpoint0dev", int, None) self.e1dev = libperf.getArgument(arglist, "endpoint1dev", int, None) # Optionally, the sequence file can specify IP addresses to use in each endpoint self.e0ip = libperf.getArgument(arglist, "endpoint0ip", str, None) self.e1ip = libperf.getArgument(arglist, "endpoint1ip", str, None)
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) self.log(None, "parseArgs:arglist=%s" % (arglist, )) self.npkts = libperf.getArgument(arglist, "npkts", int, 10000) self.size = libperf.getArgument(arglist, "size", int, 1450) self.tolerance = libperf.getArgument(arglist, "tolerance", int, 2) self.gro = libperf.getArgument(arglist, "gro", str, "default") self.dopause = libperf.getArgument(arglist, "pause", str, "off") # Optionally, the sequence file can specify which eth device to use in each endpoint self.e0dev = libperf.getArgument(arglist, "endpoint0dev", int, None) self.e1dev = libperf.getArgument(arglist, "endpoint1dev", int, None) # Optionally, the sequence file can specify IP addresses to use in each endpoint self.e0ip = libperf.getArgument(arglist, "endpoint0ip", str, None) self.e1ip = libperf.getArgument(arglist, "endpoint1ip", str, None)
def prepare(self, arglist=None): self.basicPrepare(arglist) # Populate self.guests self.findGuests() self.log(None, "prepare:arglist=%s" % (arglist, )) # Get the two communication endpoints e0 = libperf.getArgument(arglist, "endpoint0", str, None) e1 = libperf.getArgument(arglist, "endpoint1", str, None) self.log(None, "endpoints: e0=%s, e1=%s" % (e0, e1)) if not e0 or not e1: raise xenrt.XRTError("Failed to find an endpoint") self.endpoint0 = self.getGuestOrHostFromName(e0) self.endpoint1 = self.getGuestOrHostFromName(e1) # Postinstall hook for guests for g in self.guests: xenrt.TEC().logverbose( "executing post-install functions %s for guest %s" % (self.postinstall, g)) for p in self.postinstall: eval("g.%s()" % (p))
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) self.vm_ram = libperf.getArgument(arglist, "vm_ram", int, 4096) self.distro = libperf.getArgument(arglist, "distro", str, "debian70") self.arch = libperf.getArgument(arglist, "arch", str, "x86-64") self.vcpus = libperf.getArgument(arglist, "vcpus", int, 2) self.postinstall = libperf.getArgument(arglist, "postinstall", str, None) # comma-separated list of guest function names self.dom0vcpus = libperf.getArgument(arglist, "dom0vcpus", int, None)
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse other arguments self.vms = libperf.getArgument(arglist, "numvms", int, 20) self.writefirst = libperf.getArgument(arglist, "writefirst", str, "true") self.bufsize = libperf.getArgument(arglist, "bufsize", int, 32768) self.op = libperf.getArgument(arglist, "op", str, "read") self.distro = libperf.getArgument(arglist, "distro", str, "debian60") self.arch = libperf.getArgument(arglist, "arch", str, "x86-32") self.dom0vcpus = libperf.getArgument(arglist, "dom0vcpus", int, None) self.scheduler = libperf.getArgument(arglist, "scheduler", str, None) self.backend = libperf.getArgument(arglist, "backend", str, None) # Latency program command line self.latcmd = "/root/latency -s %s -b %d /dev/%s 60" % ("-w" if self.op=="write" else "", self.bufsize, self.vmDiskDev()) # Fetch JobID self.jobid = xenrt.TEC().gec.config.lookup("JOBID", None) xenrt.TEC().progress("My JOBID is %s" % self.jobid) self.jobid = int(self.jobid)
def parseArgs(self, arglist): # Performance Test Metrics self.runtime = libperf.getArgument(arglist, "runtime", int, 120) # duration over which to run the throughput test self.snips = libperf.getArgument(arglist, "snips", int, 50) # number of NetScaler clients on the DUT self.servers = libperf.getArgument(arglist, "servers", int, 251) # number of HTTP servers self.clients = libperf.getArgument(arglist, "clients", int, 100) # number of HTTP clients if self.IS_VALID_CLIENTTHREADS : clientThreads = libperf.getArgument(arglist, "clientthreads", str, "50,100,200,300,500").split(",") # various client threads value if self.IS_VALID_CLIENTPARALLELCONN: clientParallelconn = libperf.getArgument(arglist, "clientparallelconn", str, "50,100,200,300,500").split(",") # various client parallelconn value if self.IS_VALID_CLIENTTHREADS and len(clientThreads)!=len(clientParallelconn): raise xenrt.XRTError("We expect number of values in args 'clientthreads' and 'clientparallelconn' to be same.") if self.IS_VALID_CLIENTTHREADS or self.IS_VALID_CLIENTPARALLELCONN: self.clientTnP = zip(clientThreads if self.IS_VALID_CLIENTTHREADS else clientParallelconn, clientParallelconn if self.IS_VALID_CLIENTPARALLELCONN else clientThreads) bw_name = libperf.getArgument(arglist, "bw", str, "blackwidow") # name of the VPX to use for BlackWidow dut_name = libperf.getArgument(arglist, "dut", str, "dut") # name of the VPX to use as the device-under-test self.guest_bw = xenrt.GEC().registry.guestGet(bw_name) self.guest_dut = xenrt.GEC().registry.guestGet(dut_name)
def parseArgs(self, arglist): # Performance Test Metrics self.runtime = libperf.getArgument( arglist, "runtime", int, 120) # duration over which to run the throughput test self.snips = libperf.getArgument( arglist, "snips", int, 50) # number of NetScaler clients on the DUT self.servers = libperf.getArgument(arglist, "servers", int, 251) # number of HTTP servers self.clients = libperf.getArgument(arglist, "clients", int, 100) # number of HTTP clients if self.IS_VALID_CLIENTTHREADS: clientThreads = libperf.getArgument( arglist, "clientthreads", str, "50,100,200,300,500").split( ",") # various client threads value if self.IS_VALID_CLIENTPARALLELCONN: clientParallelconn = libperf.getArgument( arglist, "clientparallelconn", str, "50,100,200,300,500").split( ",") # various client parallelconn value if self.IS_VALID_CLIENTTHREADS and len(clientThreads) != len( clientParallelconn): raise xenrt.XRTError( "We expect number of values in args 'clientthreads' and 'clientparallelconn' to be same." ) if self.IS_VALID_CLIENTTHREADS or self.IS_VALID_CLIENTPARALLELCONN: self.clientTnP = zip( clientThreads if self.IS_VALID_CLIENTTHREADS else clientParallelconn, clientParallelconn if self.IS_VALID_CLIENTPARALLELCONN else clientThreads) bw_name = libperf.getArgument( arglist, "bw", str, "blackwidow") # name of the VPX to use for BlackWidow dut_name = libperf.getArgument( arglist, "dut", str, "dut") # name of the VPX to use as the device-under-test self.guest_bw = xenrt.GEC().registry.guestGet(bw_name) self.guest_dut = xenrt.GEC().registry.guestGet(dut_name)
def prepare(self, arglist=None): self.basicPrepare(arglist) def readArg(name, convert, defaultValue): setattr(self, name, libperf.getArgument(arglist, name, convert, defaultValue)) readArg("network_backend", str, "") # bridge or openvswitch readArg("use_jumbo_frames", bool, False) readArg("comm_bridge", str, "xenbr5") readArg("trans_bridge", str, "xenbr6") readArg("dummy_bridge", str, "xenbr7") readArg("use_irqbalance", bool, False) # only applicable to pre-Cowley readArg("use_gro", bool, False) readArg("use_lro", bool, False) readArg("num_host_runs", int, 10) readArg("host_run_time", int, 60) readArg("host_ping_count", int, 20) readArg("num_host_threads", int, 4) readArg("trySingleDom0Thread", bool, False) readArg("vm_type", str, "demo") # other: "win7" readArg("num_vm_vcpus", int, 1) readArg("num_vm_runs", int, 10) readArg("vm_run_time", int, 60) readArg("num_vm_pairs", int, 4) # 7 is max for q machines due to RAM limit readArg("trySingleVMPair", bool, False) readArg("num_vm_threads", int, 2) readArg("trySingleVMThread", bool, False) readArg("vm_ping_count", int, 20) # TODO: Find better name parameter and values, or get over this completely. self.where = libperf.getArgument(arglist, "run_on", str, "q") # Can also be "perf" self.setupHosts1() self.setupVMpairs() self.setupHosts2()
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse other arguments # Number of measurements to conduct self.measurements = libperf.getArgument(arglist, "measurements", int, 64) # Number of workload thread running self.thread_count = libperf.getArgument(arglist, "thread_count", int, 64) # Delay before doing measurements to allow the workload to settle self.pretest_delay = libperf.getArgument(arglist, "pretest_delay", int, 60) # Command which execution time to measure self.command = libperf.getArgument(arglist, "command", str, "xe vm-list") self.dom0vcpus = libperf.getArgument(arglist, "dom0vcpus", int, None) # Timeout of ssh command executing measurements self.timeout = libperf.getArgument(arglist, "timeout", int, 3600) # Mountpoint path to the builds archive self.mount_path = libperf.getArgument(arglist, "mount_path", str, "backup-storage-cbg2.uk.xensource.com:/containers/builds_archive")
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse other arguments self.distro = libperf.getArgument(arglist, "distro", str, "debian60") self.arch = libperf.getArgument(arglist, "arch", str, "x86-32") self.vmram = libperf.getArgument(arglist, "memory", int, 256) self.vcpus = libperf.getArgument(arglist, "vcpus", int, 1) self.numclients = libperf.getArgument(arglist, "numvms", int, 20) self.numservers = self.numclients # we use the same number of clients as servers self.postinstall = libperf.getArgument(arglist, "postinstall", str, None) # comma-separated list of guest function names self.postinstall = [] if (self.postinstall is None or self.postinstall == "") else self.postinstall.split(",") # Apachebench client command-line self.abCmd = "/usr/bin/ab -n 1000 -c 100 -g /root/ab.log http://%s/" # expects IP address of server # Fetch JobID self.jobid = xenrt.TEC().gec.config.lookup("JOBID", None) xenrt.TEC().progress("My JOBID is %s" % self.jobid) self.jobid = int(self.jobid)
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) self.log(None, "parseArgs:arglist=%s" % (arglist,)) self.interval = libperf.getArgument(arglist, "interval", int, 1) self.threads = libperf.getArgument(arglist, "threads", int, 1) self.duration = libperf.getArgument(arglist, "duration", int, 30) self.protocol = libperf.getArgument(arglist, "protocol", str, "tcp") self.gro = libperf.getArgument(arglist, "gro", str, "default") self.dopause = libperf.getArgument(arglist, "pause", str, "off") self.postinstall = libperf.getArgument(arglist, "postinstall", str, None) # comma-separated list of guest function names self.postinstall = [] if self.postinstall is None else self.postinstall.split(",") # Optionally, the sequence file can specify which eth device to use in each endpoint self.e0devstr = libperf.getArgument(arglist, "endpoint0dev", str, None) self.e1devstr = libperf.getArgument(arglist, "endpoint1dev", str, None) self.e0dev = None self.e1dev = None # Optionally, the sequence file can specify IP addresses to use in each endpoint self.e0ip = libperf.getArgument(arglist, "endpoint0ip", str, None) self.e1ip = libperf.getArgument(arglist, "endpoint1ip", str, None)
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse arguments relating to this test self.numvlans = libperf.getArgument(arglist, "numvlans", int, 500)
def parseArgs(self, arglist): libperf.PerfTestCase.parseArgs(self, arglist) self.fioIters = libperf.getArgument(arglist, "fioiters", int, 5) # Allow someone to specify e.g. lvm_ssd, but when we look for the SR, just look for "lvm" self.srtypeFull = libperf.getArgument(arglist, "srtype", str, "lvm") self.srtype = self.srtypeFull.split("_")[0]
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse other arguments # Specify the disks to use as a list of partitions to use in the form: # /dev/disk/by-id/X,/dev/disk/by-id/Y,... # Use "default" to use the default SR (which will *not* be destroyed) self.devices = libperf.getArgument(arglist, "devices", str, "default").strip().split(",") # blocksizes is a list of either bytes or names of pre-defined access patterns, e.g. "tesco" self.blocksizes = libperf.getArgument(arglist, "blocksizes", str, "512,1024,2048,4096,8192,16384,32768,65536,131072,262144,524288,1048576,2097152,4194304") self.blocksizes = self.blocksizes.strip().split(",") self.queuedepth = libperf.getArgument(arglist, "queue_depth", int, 1) self.multiqueue = libperf.getArgument(arglist, "multiqueue", int, None) self.multipage = libperf.getArgument(arglist, "multipage", int, None) if self.multipage: is_power2 = self.multipage != 0 and ((self.multipage & (self.multipage - 1)) == 0) if not is_power2: raise ValueError("Multipage %s is not a power of 2" % (self.multipage)) # iodepth_batch* options: useful to keep I/O requests for longer from being dequeued, # allowing more time for merges specially when multiqueue and/or multipage are used. self.iodepth_batch = libperf.getArgument(arglist, "iodepth_batch", int, None) self.iodepth_batch_complete = libperf.getArgument(arglist, "iodepth_batch_complete", int, None) self.num_threads = libperf.getArgument(arglist, "num_threads", int, 1) self.vms_per_sr = libperf.getArgument(arglist, "vms_per_sr", int, 1) self.vbds_per_vm = libperf.getArgument(arglist, "vbds_per_vm", int, 1) self.vcpus_per_vm = libperf.getArgument(arglist, "vcpus_per_vm", int, None) self.sequential = libperf.getArgument(arglist, "sequential", toBool, True) # Optional VM image to use as a template self.vm_image = libperf.getArgument(arglist, "vm_image", str, None) # A number in MB; e.g. 1024 self.vm_ram = libperf.getArgument(arglist, "vm_ram", int, None) self.duration = libperf.getArgument(arglist, "duration", int, 60) self.vdi_size = libperf.getArgument(arglist, "vdi_size", str, "5GiB") self.distro = libperf.getArgument(arglist, "distro", str, "debian60") # If vm_image is set, treat it as a distro name if self.vm_image: self.distro = self.vm_image # Benchmark program to use. Windows default: iometer, Linux default: fio if self.distro.startswith("w"): self.bench = libperf.getArgument(arglist, "benchmark", str, "iometer") else: self.bench = libperf.getArgument(arglist, "benchmark", str, "fio") self.postinstall = libperf.getArgument(arglist, "postinstall", str, None) # comma-separated list of guest function names self.arch = libperf.getArgument(arglist, "arch", str, "x86-32") self.dom0vcpus = libperf.getArgument(arglist, "dom0vcpus", int, None) self.write_iterations = libperf.getArgument(arglist, "write_iterations", int, 1) self.read_iterations = libperf.getArgument(arglist, "read_iterations", int, 1) self.zeros = libperf.getArgument(arglist, "zeros", bool, False) self.prepopulate = libperf.getArgument(arglist, "prepopulate", toBool, True) self.vm_disk_scheduler = libperf.getArgument(arglist, "vm_disk_scheduler", str, "default") self.vm_disk_nomerges = libperf.getArgument(arglist, "vm_disk_nomerges", str, "default") # Disk schedulers are specified in the form deviceA=X,deviceB=Y,... # To specify the scheduler for the default SR, use default=Z schedulers = libperf.getArgument(arglist, "disk_schedulers", str, "").strip() self.disk_schedulers = {} if schedulers != "": for pair in schedulers.split(","): pair = pair.split("=") self.disk_schedulers[pair[0]] = pair[1] # Choice of VDI; default, xen-vhd, xen-raw self.vdi_type = libperf.getArgument(arglist, "vdi_type", str, "default") # Choice of backend; default, xen-blkback, xen-tapdisk2 or xen-tapdisk3 self.backend = libperf.getArgument(arglist, "backend", str, "default") if self.backend == "xen-blkback": vdi_type = libperf.getArgument(arglist, "vdi_type", str, None) if vdi_type and vdi_type != "xen-raw": raise ValueError("Cannot use blkback with VHD vdi_type") if self.vdi_type == "xen-raw" or self.vdi_type == "xen-blkback": self.sm_config = "type=raw" else: self.sm_config = None # Fetch JobID self.jobid = xenrt.TEC().gec.config.lookup("JOBID", None) xenrt.TEC().progress("My JOBID is %s" % self.jobid) self.jobid = int(self.jobid)
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse arguments relating to this test self.iters = libperf.getArgument(arglist, "iters", int, 20)
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse specific arguments self.pinvcpus = libperf.getArgument(arglist, "pinvcpus", int, 0)
def readArg(name, convert, defaultValue): setattr(self, name, libperf.getArgument(arglist, name, convert, defaultValue))
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) self.log(None, "parseArgs:arglist=%s" % (arglist, )) self.interval = libperf.getArgument(arglist, "interval", int, 1) self.threads = libperf.getArgument(arglist, "threads", int, 1) self.duration = libperf.getArgument(arglist, "duration", int, 30) self.protocol = libperf.getArgument(arglist, "protocol", str, "tcp") self.gro = libperf.getArgument(arglist, "gro", str, "default") self.dopause = libperf.getArgument(arglist, "pause", str, "off") self.postinstall = libperf.getArgument( arglist, "postinstall", str, None) # comma-separated list of guest function names self.postinstall = [] if self.postinstall is None else self.postinstall.split( ",") # Optionally, the sequence file can specify which eth device to use in each endpoint self.e0devstr = libperf.getArgument(arglist, "endpoint0dev", str, None) self.e1devstr = libperf.getArgument(arglist, "endpoint1dev", str, None) self.e0dev = None self.e1dev = None # Optionally, the sequence file can specify IP addresses to use in each endpoint self.e0ip = libperf.getArgument(arglist, "endpoint0ip", str, None) self.e1ip = libperf.getArgument(arglist, "endpoint1ip", str, None)
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse arguments relating to this test self.messages = libperf.getArgument(arglist, "messages", int, 100000)
def parseArgs(self, arglist): # Parse generic arguments libperf.PerfTestCase.parseArgs(self, arglist) # Parse other arguments # Specify the disks to use as a list of partitions to use in the form: # /dev/disk/by-id/X,/dev/disk/by-id/Y,... # Use "default" to use the default SR (which will *not* be destroyed) self.devices = libperf.getArgument(arglist, "devices", str, "default").strip().split(",") # blocksizes is a list of either bytes or names of pre-defined access patterns, e.g. "tesco" self.blocksizes = libperf.getArgument( arglist, "blocksizes", str, "512,1024,2048,4096,8192,16384,32768,65536,131072,262144,524288,1048576,2097152,4194304" ) self.blocksizes = self.blocksizes.strip().split(",") self.queuedepth = libperf.getArgument(arglist, "queue_depth", int, 1) self.multiqueue = libperf.getArgument(arglist, "multiqueue", int, None) self.multipage = libperf.getArgument(arglist, "multipage", int, None) if self.multipage: is_power2 = self.multipage != 0 and ((self.multipage & (self.multipage - 1)) == 0) if not is_power2: raise ValueError("Multipage %s is not a power of 2" % (self.multipage)) # iodepth_batch* options: useful to keep I/O requests for longer from being dequeued, # allowing more time for merges specially when multiqueue and/or multipage are used. self.iodepth_batch = libperf.getArgument(arglist, "iodepth_batch", int, None) self.iodepth_batch_complete = libperf.getArgument( arglist, "iodepth_batch_complete", int, None) self.num_threads = libperf.getArgument(arglist, "num_threads", int, 1) self.vms_per_sr = libperf.getArgument(arglist, "vms_per_sr", int, 1) self.vbds_per_vm = libperf.getArgument(arglist, "vbds_per_vm", int, 1) self.vcpus_per_vm = libperf.getArgument(arglist, "vcpus_per_vm", int, None) self.sequential = libperf.getArgument(arglist, "sequential", toBool, True) # Optional VM image to use as a template self.vm_image = libperf.getArgument(arglist, "vm_image", str, None) # A number in MB; e.g. 1024 self.vm_ram = libperf.getArgument(arglist, "vm_ram", int, None) self.duration = libperf.getArgument(arglist, "duration", int, 60) self.vdi_size = libperf.getArgument(arglist, "vdi_size", str, "5GiB") self.distro = libperf.getArgument(arglist, "distro", str, "debian60") # If vm_image is set, treat it as a distro name if self.vm_image: self.distro = self.vm_image # Benchmark program to use. Windows default: iometer, Linux default: fio if self.distro.startswith("w"): self.bench = libperf.getArgument(arglist, "benchmark", str, "iometer") else: self.bench = libperf.getArgument(arglist, "benchmark", str, "fio") self.postinstall = libperf.getArgument( arglist, "postinstall", str, None) # comma-separated list of guest function names self.arch = libperf.getArgument(arglist, "arch", str, "x86-32") self.dom0vcpus = libperf.getArgument(arglist, "dom0vcpus", int, None) self.write_iterations = libperf.getArgument(arglist, "write_iterations", int, 1) self.read_iterations = libperf.getArgument(arglist, "read_iterations", int, 1) self.zeros = libperf.getArgument(arglist, "zeros", bool, False) self.prepopulate = libperf.getArgument(arglist, "prepopulate", toBool, True) self.vm_disk_scheduler = libperf.getArgument(arglist, "vm_disk_scheduler", str, "default") self.vm_disk_nomerges = libperf.getArgument(arglist, "vm_disk_nomerges", str, "default") # Disk schedulers are specified in the form deviceA=X,deviceB=Y,... # To specify the scheduler for the default SR, use default=Z schedulers = libperf.getArgument(arglist, "disk_schedulers", str, "").strip() self.disk_schedulers = {} if schedulers != "": for pair in schedulers.split(","): pair = pair.split("=") self.disk_schedulers[pair[0]] = pair[1] # Choice of VDI; default, xen-vhd, xen-raw self.vdi_type = libperf.getArgument(arglist, "vdi_type", str, "default") # Choice of backend; default, xen-blkback, xen-tapdisk2 or xen-tapdisk3 self.backend = libperf.getArgument(arglist, "backend", str, "default") if self.backend == "xen-blkback": vdi_type = libperf.getArgument(arglist, "vdi_type", str, None) if vdi_type and vdi_type != "xen-raw": raise ValueError("Cannot use blkback with VHD vdi_type") if self.vdi_type == "xen-raw" or self.vdi_type == "xen-blkback": self.sm_config = "type=raw" else: self.sm_config = None # Fetch JobID self.jobid = xenrt.TEC().gec.config.lookup("JOBID", None) xenrt.TEC().progress("My JOBID is %s" % self.jobid) self.jobid = int(self.jobid)