def init(self): """ Initialization * assigns vm1 and vm2 into cgroups and sets the properties * creates a new virtio device and adds it into vms """ if get_device_driver() != 'virtio': logging.warn("The main disk for this VM is non-virtio, keep in " "mind that this particular subtest will add a new " "virtio_blk disk to it") if self.dd_cmd is None: raise error.TestError("Corrupt class, aren't you trying to run " "parent _TestBlkioBandwidth() function?") if len(self.vms) < 2: raise error.TestError("Test needs at least 2 vms.") # cgroups pwd = [] blkio = self.blkio blkio.initialize(self.modules) for i in range(2): pwd.append(blkio.mk_cgroup()) blkio.set_cgroup(self.vms[i].get_shell_pid(), pwd[i]) # Move all existing threads into cgroup for tmp in utils.get_children_pids(self.vms[i].get_shell_pid()): blkio.set_cgroup(int(tmp), pwd[i]) self.blkio.set_property("blkio.weight", 100, pwd[0]) self.blkio.set_property("blkio.weight", 1000, pwd[1]) for i in range(2): (host_file, device) = add_file_drive(vms[i], "virtio") self.files.append(host_file) self.devices.append(device)
def assign_vm_into_cgroup(vm, cgroup, pwd=None): """ Assigns all threads of VM into cgroup @param vm: desired VM @param cgroup: cgroup handler @param pwd: desired cgroup's pwd, cgroup index or None for root cgroup """ if isinstance(pwd, int): pwd = cgroup.cgroups[pwd] cgroup.set_cgroup(vm.get_shell_pid(), pwd) for pid in utils.get_children_pids(vm.get_shell_pid()): cgroup.set_cgroup(int(pid), pwd)
def init(self): """ Initialization * creates a new scsi_debug device * prepares one cgroup and assign vm to it """ # Only create the host /dev/sd? device (self.files, self.devices) = add_scsi_drive(self.vm) rm_drive(self.vm, host_file=None, device=self.devices) self.devices = None # We don't want to mess cleanup time.sleep(3) dev = "%s:%s" % get_maj_min(self.files) self.cgroup.initialize(self.modules) self.cgroup.mk_cgroup() self.cgroup.set_cgroup(self.vm.get_shell_pid(), self.cgroup.cgroups[0]) for pid in utils.get_children_pids(self.vm.get_shell_pid()): self.cgroup.set_cgroup(int(pid), self.cgroup.cgroups[0]) # Test dictionary # Beware of persistence of some setting to another round!!! self.permissions = [ {'property' : 'deny', 'value' : 'a', 'check_value' : '', 'result' : False}, {'property' : 'allow', 'value' : 'b %s rm' % dev, 'check_value' : True, 'result' : False}, {'property' : 'allow', 'value' : 'b %s w' % dev, 'check_value' : 'b %s rwm' % dev, 'result' : True}, {'property' : 'deny', 'value' : 'b %s r' % dev, 'check_value' : 'b %s wm' % dev, 'result' : False}, {'property' : 'deny', 'value' : 'b %s wm' % dev, 'check_value' : '', 'result' : False}, {'property' : 'allow', 'value' : 'a', 'check_value' : 'a *:* rwm', 'result' : True}, ]
def run(self): """ Actual test: * executes self.dd_cmd in vm while limiting it's throughput using different cgroups (or in a special case only one). At the end it verifies the throughputs. """ out = [] sessions = [] sessions.append(self.vm.wait_for_login(timeout=30)) sessions.append(self.vm.wait_for_login(timeout=30)) sessions[0].sendline(self.dd_cmd) for i in range(len(self.cgroups)): logging.info("Limiting speed to: %s", (self.speeds[i])) # Assign all threads of vm self.cgroup.set_cgroup(self.vm.get_shell_pid(), self.cgroups[i]) for pid in utils.get_children_pids(self.vm.get_shell_pid()): self.cgroup.set_cgroup(int(pid), self.cgroups[i]) # Standard test-time is 60s. If the slice time is less than 30s, # test-time is prolonged to 30s per slice. time.sleep(max(60/len(self.speeds), 30)) sessions[1].sendline("rm -f /tmp/cgroup_lock; killall -9 dd") out.append(sessions[0].read_up_to_prompt()) sessions[0].sendline(self.dd_cmd) time.sleep(random()*0.05) sessions[1].sendline("rm -f /tmp/cgroup_lock; killall -9 dd") # Verification re_dd = (r'(\d+) bytes \(\d+\.*\d* \w*\) copied, (\d+\.*\d*) s, ' '\d+\.*\d* \w./s') err = [] for i in range(len(out)): out[i] = [int(int(_[0])/float(_[1])) for _ in re.findall(re_dd, out[i])] if not out[i]: raise error.TestFail("Not enough samples; please increase" "throughput speed or testing time;" "\nsamples: %s" % (out[i])) # First samples might be corrupted, use only last sample when # not enough data. (which are already an avg of 3xBS) warn = False if len(out[i]) < 3: warn = True out[i] = [out[i][-1]] count = len(out[i]) out[i].sort() # out = [min, med, max, number_of_samples] out[i] = [out[i][0], out[i][count/2], out[i][-1], count] if warn: logging.warn("Not enough samples, using the last one (%s)", out[i]) if ((self.speeds[i] != 0) and (distance(out[i][1], self.speeds[i]) > 0.1)): logging.error("The throughput didn't match the requirements" "(%s !~ %s)", out[i], self.speeds[i]) err.append(i) if self.speeds.count(0) > 1: unlimited = [] for i in range(len(self.speeds)): if self.speeds[i] == 0: unlimited.append(out[i][1]) self.speeds[i] = "(inf)" avg = sum(unlimited) / len(unlimited) if avg == 0: logging.warn("Average unlimited speed is 0 (%s)", out) else: for speed in unlimited: if distance(speed, avg) > 0.1: logging.warning("Unlimited speeds variates during " "the test: %s", unlimited) break out_speeds = ["%s ~ %s" % (out[i][1], self.speeds[i]) for i in range(len(self.speeds))] if err: if len(out) == 1: raise error.TestFail("Actual throughput: %s, theoretical: " "%s" % (out[0][1], self.speeds[0])) elif len(err) == len(out): raise error.TestFail("All throughput limits were broken " "(%s)" % (out_speeds)) else: raise error.TestFail("Limits (%s) were broken (%s)" % (err, out_speeds)) return ("All throughputs matched their limits (%s)" % out_speeds)