def run_once(self): super(basic, self).run_once() containers = [] cidfile = self._nonexisting_path(self.tmpdir, "cidfile-") subargs = self.config.get('run_options_csv').split(',') containers.append(self._init_container(subargs, cidfile, 'sh', None, InteractiveAsyncDockerCmd)) name = self.sub_stuff['containers'][0] self.failif(utils.wait_for(lambda: os.path.isfile(cidfile), 9) is None, "cidfile didn't appear in 9s after container execution") cont = self._get_container_by_name(name) long_id = cont.long_id self._check_cidfile(long_id, cidfile) # cidfile already exists (running container) containers.append(self._init_container(subargs, cidfile, 'true', lambda x: mustfail(x, 125))) self._check_failure_cidfile_present(containers[-1]) # cidfile already exists (exited container) # FIXME: this occasionally throws SIGPIPE, presumably because # container isn't fully ready. This is a tough one to solve. containers[0].stdin("exit\n") containers[0].wait(10) containers[0].close() containers.append(self._init_container(subargs, cidfile, 'true', lambda x: mustfail(x, 125))) self._check_failure_cidfile_present(containers[-1]) # restart container with cidfile mustpass(dockercmd.DockerCmd(self, 'start', [name]).execute()) is_alive = lambda: 'Up' in self._get_container_by_name(name).status self.failif(utils.wait_for(is_alive, 10) is None, "Container %s " "was not restarted in 10 seconds." % name) self._check_cidfile(long_id, cidfile) self.sub_stuff['dc'].kill_container_by_name(name) self._check_cidfile(long_id, cidfile)
def postprocess(self): super(simple, self).postprocess() name = self.sub_stuff['container_name'] logs = AsyncDockerCmd(self, "logs", ['-f', name]) logs.execute() utils.wait_for(lambda: logs.stdout.count("\n") == 2, 5, step=0.1) out = logs.stdout self.failif(out.count("\n") != 2, "The container was executed twice, " "there should be 2 lines with start dates, but is " "%s.\nContainer output:\n%s" % (out.count("\n"), out)) mustpass(DockerCmd(self, "kill", [name]).execute())
def _start_container(self, name): """ Create, store in self.sub_stuff and execute container """ self.sub_stuff['container_name'] = name if self.config.get('run_options_csv'): subargs = [arg for arg in self.config['run_options_csv'].split(',')] else: subargs = [] subargs.append("--name %s" % name) fin = DockerImage.full_name_from_defaults(self.config) subargs.append(fin) subargs.append("bash") subargs.append("-c") subargs.append("'echo STARTED: $(date); while :; do sleep 0.1; done'") container = AsyncDockerCmd(self, 'run', subargs) container.execute() utils.wait_for(lambda: container.stdout.startswith("STARTED"), 5, step=0.1)
def postprocess(self): super(simple, self).postprocess() name = self.sub_stuff['container_name'] def started_twice(): result = mustpass(DockerCmd(self, "logs", [name]).execute()) return result.stdout.count("STARTED") == 2 ok = utils.wait_for(started_twice, 10, step=0.5) self.failif(not ok, "Timed out waiting for second STARTED message.\n") mustpass(DockerCmd(self, "kill", [name]).execute())
def _get_container_by_name(self, name): """ Runs 'docker ps' until desired named container is found """ found = [[]] # python2 hack for scope within closure def find_container(): found[0] = self.sub_stuff['dc'].list_containers_with_name(name) return len(found[0]) == 1 ok = utils.wait_for(find_container, 10) self.failif(not ok, "Timed out waiting for container '%s'" % name) return found[0][0]
def postprocess(self): super(restart_check_mess_after_stop, self).postprocess() g_path = self.sub_stuff["graph_path"] wait = lambda: g_path not in self.list_mounted_dirs().stdout utils.wait_for(wait, 60) res = self.list_mounted_dirs() self.failif(res.exit_status != 0, "Mount command was not successful\n%s" % res) self.failif(g_path in res.stdout, "All folders mounted by docker should be after docker" "finish unmounted.:\n%s" % res.stdout) self.failif(not os.path.exists(self.sub_stuff["graph_path"]), "Docker working directory (graph) not exists: %s" % os.path.exists(self.sub_stuff["graph_path"])) res = self.rm_graph_dir() self.failif(res.exit_status != 0, "rm command was not successful\n%s" % res) self.sub_stuff["containers"].remove(self.sub_stuff["cont1_name"])
def _check_signal(self, container_out, _check, signal, timeout): """ Check container for $signal check output presence """ _idx = container_out.idx check = _check % signal output_matches = lambda: check in container_out.get(_idx) # Wait until the signal gets logged if wait_for(output_matches, timeout, step=0) is None: msg = ("Signal %s not handled inside container.\nExpected " "output:\n %s\nActual container output:\n %s" % (signal, check, "\n ".join(container_out.get(_idx)))) self.logdebug(msg) raise xceptions.DockerTestFail("Unhandled signal(s), see debug" "log for more details")
def _check_signal(self, container_out, _check, signal, timeout): """ Check container for $signal check output presence """ _idx = container_out.idx check = _check % signal output_matches = lambda: check in container_out.get(_idx) # Wait until the signal gets logged if wait_for(output_matches, timeout, step=0) is None: msg = ( "Signal %s not handled inside container.\nExpected " "output:\n %s\nActual container output:\n %s" % (signal, check, "\n ".join(container_out.get(_idx))) ) self.logdebug(msg) raise xceptions.DockerTestFail("Unhandled signal(s), see debug" "log for more details")
def _check_signal(self, container_out, _check, signal, timeout): """ Inverse container check for $signal check output presence """ _idx = container_out.idx check = _check % signal output_matches = lambda: check in container_out.get(_idx) # Wait until the signal gets logged if wait_for(output_matches, timeout, step=0) is not None: msg = ("Signal %s present inside container althought according to" " documentation it should not be forwarded/handled." "Container output:\n %s" % (signal, "\n ".join(container_out.get(_idx)))) self.logdebug(msg) raise xceptions.DockerTestFail("Signal documented as non-" "forwardable present in the output")
def run_once(self): super(restart_container_autorestart_base, self).run_once() fin = DockerImage.full_name_from_defaults(self.config) args1 = ["--name=%s" % (self.sub_stuff["cont1_name"])] args1.append(fin) if self.config.get('interruptable'): args1 += ["python", "-c", "'import signal; " "signal.signal(signal.SIGTERM, exit); signal.pause()'"] else: args1 += ["bash", "-c", '"while [ true ]; do sleep 1; done"'] self.sub_stuff["bash1"] = self.dkr_cmd.async("run", args1) # Wait for container creation c_name = self.sub_stuff["cont1_name"] wait_cont = lambda: self.conts.list_containers_with_name(c_name) != [] ret = utils.wait_for(wait_cont, 240) self.failif(ret is None, "Unable to start container.") self.daemon_restat()
def init_container(self, volume=None, volumes_from=None): """ Starts container """ subargs = config.get_as_list(self.config['run_options_csv']) if volume: subargs.append("--volume %s" % volume) if volumes_from: subargs.append("--volumes-from %s" % volumes_from) dc = self.sub_stuff['dc'] name = dc.get_unique_name() self.sub_stuff['containers'].append(name) subargs.append("--name %s" % name) fin = DockerImage.full_name_from_defaults(self.config) subargs.append(fin) subargs.append("sh") read_fd, write_fd = os.pipe() self.sub_stuff['fds'].append(write_fd) self.sub_stuff['fds'].append(read_fd) dkrcmd = dockercmd.AsyncDockerCmd(self, 'run', subargs) dkrcmd.execute(read_fd) dkrcmd.stdin = write_fd os.close(read_fd) # no longer needed # This could take a while if image needs pulling find_name = lambda: str(dc.list_containers_with_name(name)) != [] self.failif_ne(utils.wait_for(find_name, timeout=self.config['docker_timeout']), True, # None == timeout "Container %s not in 'docker ps' output within" " timeout: %s" % (name, dc.list_containers())) # Don't match echo of the 'echo' command itself self.logdebug('Confirming %s is responding', name) os.write(write_fd, "echo -e '%s'\n" % self.OCTAL_READY) dkrcmd.wait_for_ready() self.logdebug('Confirmed') return dkrcmd
def run(test, params, env): """ Qemu balloon device stress test: 1) boot guest with balloon device 2) enable driver verifier in guest 3) reboot guest (optional) 4) check device using right driver in guest. 5) play online video in guest 6) balloon memory in monitor in loop 7) check vm alive :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ error.context("Boot guest with balloon device", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() default_memory = int(params.get("default_memory", 8192)) unit = vm.monitor.protocol == "qmp" and 10485760 or 1 timeout = float(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) # for media player configuration if params.get("pre_cmd"): session.cmd(params.get("pre_cmd")) error.context("enable driver verifier in guest", logging.info) enable_driver_verifier_cmd = params.get("enable_driver_verifier_cmd") if enable_driver_verifier_cmd: session.cmd(enable_driver_verifier_cmd, timeout=timeout, ignore_all_errors=True) if params.get("need_reboot", "") == "yes": session = vm.reboot() error.context("verify ballon device driver", logging.info) driver_verifier_cmd = params.get("driver_verifier_cmd") output = session.cmd_output(driver_verifier_cmd, timeout=timeout) driver_name = params["driver_name"] if not re.search(r"%s" % driver_name, output, re.M): msg = "Verify device driver failed, " msg += "guest report driver is %s, " % output msg += "expect is '%s'" % driver_name raise error.TestFail(msg) error.context("Play video in guest", logging.info) play_video_cmd = params["play_video_cmd"] session.sendline(play_video_cmd) # need to wait for wmplayer loading remote video time.sleep(float(params.get("loading_timeout", 60))) check_playing_cmd = params["check_playing_cmd"] fuc = lambda: session.cmd_status(check_playing_cmd) == 0 running = utils.wait_for(fuc, first=5.0, timeout=600) if not running: raise error.TestError("Video do not playing") error.context("balloon vm memory in loop", logging.info) repeat_times = int(params.get("repeat_times", 10)) logging.info("repeat times: %d" % repeat_times) magnification = int(params.get("magnification", 512)) logging.info("memory decrease magnification: %d" % magnification) start = magnification * unit end = default_memory * unit step = start while repeat_times: for memory in xrange(start, end, step): logging.debug("ballon vm mem to: %s B" % memory) vm.monitor.send_args_cmd("balloon value=%s" % memory) vm.monitor.query("ballon") logging.debug("ballon vm mem to: %s B" % memory) memory = end - memory vm.monitor.send_args_cmd("balloon value=%s" % memory) vm.monitor.query("ballon") repeat_times -= 1 error.context("verify guest still alive", logging.info) session.cmd(params["stop_player_cmd"]) vm.verify_alive() if session: session.close()
def run(test, params, env): """ Test start domain with nwfilter rules. 1) Prepare parameters. 2) Prepare nwfilter rule and update domain interface to apply. 3) Start domain and check rule. 4) Clean env """ # Prepare parameters filter_name = params.get("filter_name", "testcase") status_error = "yes" == params.get("status_error", "no") bug_url = params.get("bug_url", "") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Prepare vm filterref parameters dict list filterref_dict = {} filterref_dict['name'] = filter_name if not libvirt_version.version_compare(1, 2, 6): raise error.TestNAError("Bug %s not fixed on current build" % bug_url) # backup vm and filter xml vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_filter = libvirt_xml.NwfilterXML() filterxml = backup_filter.new_from_filter_dumpxml(filter_name) libvirtd = utils_libvirtd.LibvirtdSession() def nwfilter_sync_loop(filter_name, filerxml): """ Undefine filter and redefine filter from xml in loop """ for i in range(2400): virsh.nwfilter_undefine(filter_name, ignore_status=True) time.sleep(0.1) virsh.nwfilter_define(filterxml.xml, ignore_status=True) def vm_start_destory_loop(vm): """ Start and destroy vm in loop """ for i in range(2400): vm.start() time.sleep(0.1) vm.destroy(gracefully=False) try: libvirtd.start() # Update first vm interface with filter vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] vmxml.del_device(iface_xml) new_iface = interface.Interface('network') new_iface.xml = iface_xml.xml new_filterref = new_iface.new_filterref(**filterref_dict) new_iface.filterref = new_filterref logging.debug("new interface xml is: %s" % new_iface) vmxml.add_device(new_iface) vmxml.sync() filter_thread = threading.Thread(target=nwfilter_sync_loop, args=(filter_name, filterxml)) vm_thread = threading.Thread(target=vm_start_destory_loop, args=(vm,)) filter_thread.start() time.sleep(0.3) vm_thread.start() ret = utils.wait_for(lambda: not libvirtd.is_working(), timeout=240, step=1) filter_thread.join() vm_thread.join() if ret: raise error.TestFail("Libvirtd hang, %s" % bug_url) finally: libvirtd.exit() # Clean env if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm and filter. vmxml_backup.sync() virsh.nwfilter_undefine(filter_name, ignore_status=True) virsh.nwfilter_define(filterxml.xml, ignore_status=True)
def run(test, params, env): """ Test update filter rules when domain is running. 1) Prepare parameters. 2) Add filter to domain interface. 3) Start domain. 4) Update filter rule and check 5) Cleanup """ # Prepare parameters filter_name = params.get("filter_name", "testcase") check_cmd = params.get("check_cmd") expect_match = params.get("expect_match") check_vm_cmd = params.get("check_vm_cmd") vm_expect_match = params.get("vm_expect_match") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) filterref_dict = {} filterref_dict['name'] = filter_name # backup vm xml vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) new_filter = libvirt_xml.NwfilterXML() filter_backup = new_filter.new_from_filter_dumpxml(filter_name) try: # Update first vm interface with filter vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] vmxml.del_device(iface_xml) new_iface = interface.Interface('network') new_iface.xml = iface_xml.xml new_filterref = new_iface.new_filterref(**filterref_dict) new_iface.filterref = new_filterref logging.debug("new interface xml is: %s" % new_iface) vmxml.add_device(new_iface) vmxml.sync() # Start vm vm.start() session = vm.wait_for_login() vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] iface_target = iface_xml.target['dev'] logging.debug("iface target dev name is %s", iface_target) # Update filter rule by nwfilter-define filterxml = utlv.create_nwfilter_xml(params) # Define filter xml virsh.nwfilter_define(filterxml.xml, debug=True) # Check ebtables on host after filter update if "DEVNAME" in check_cmd: check_cmd = check_cmd.replace("DEVNAME", iface_target) ret = utils.wait_for(lambda: not utils.system(check_cmd, ignore_status=True), timeout=30) if not ret: raise error.TestFail("Rum command '%s' failed" % check_cmd) out = utils.system_output(check_cmd, ignore_status=False) if expect_match and not re.search(expect_match, out): raise error.TestFail("'%s' not found in output: %s" % (expect_match, out)) # Check in vm if check_vm_cmd: output = session.cmd_output(check_vm_cmd) logging.debug("cmd output: %s", output) if vm_expect_match and not re.search(vm_expect_match, output): raise error.TestFail("'%s' not found in output: %s" % (vm_expect_match, output)) finally: # Clean env if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync() # Restore created filter virsh.nwfilter_undefine(filter_name, debug=True) virsh.nwfilter_define(filter_backup.xml, debug=True)
def run(test, params, env): """ Test start domain with nwfilter rules. 1) Prepare parameters. 2) Prepare nwfilter rule and update domain interface to apply. 3) Start domain and check rule. 4) Clean env """ # Prepare parameters filter_name = params.get("filter_name", "testcase") attach_option = params.get("attach_option", "") check_cmd = params.get("check_cmd") expect_match = params.get("expect_match") attach_twice_invalid = "yes" == params.get("attach_twice_invalid", "no") status_error = "yes" == params.get("status_error", "no") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Prepare vm filterref parameters dict list filterref_dict = {} filterref_dict['name'] = filter_name # Prepare interface parameters iface_type = 'network' iface_source = {'network': 'default'} iface_target = params.get("iface_target", 'vnet1') # backup vm xml vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirtd = utils_libvirtd.Libvirtd() try: # Prepare interface xml for attach new_iface = interface.Interface(type_name=iface_type) new_iface.source = iface_source new_iface.target = {'dev': iface_target} new_filterref = new_iface.new_filterref(**filterref_dict) new_iface.filterref = new_filterref logging.debug("new interface xml is: %s" % new_iface) # Attach interface to vm ret = virsh.attach_device(vm_name, new_iface.xml, flagstr=attach_option, debug=True, ignore_status=True) utlv.check_exit_status(ret, status_error) if attach_twice_invalid: ret = virsh.attach_device(vm_name, new_iface.xml, flagstr=attach_option, debug=True, ignore_status=True) utlv.check_exit_status(ret, status_error) if not libvirtd.is_running(): raise error.TestFail("libvirtd not running after attach " "interface.") # Check iptables or ebtables on host if check_cmd: if "DEVNAME" in check_cmd: check_cmd = check_cmd.replace("DEVNAME", iface_target) ret = utils.wait_for(lambda: not utils.system(check_cmd, ignore_status=True), timeout=30) if not ret: raise error.TestFail("Rum command '%s' failed" % check_cmd) out = utils.system_output(check_cmd, ignore_status=False) if expect_match and not re.search(expect_match, out): raise error.TestFail("'%s' not found in output: %s" % (expect_match, out)) finally: if attach_twice_invalid: libvirtd.restart() # Clean env if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync()
def run(test, params, env): """ Test start domain with nwfilter rules. 1) Prepare parameters. 2) Prepare nwfilter rule and update domain interface to apply. 3) Start domain and check rule. 4) Clean env """ # Prepare parameters filter_name = params.get("filter_name", "testcase") exist_filter = params.get("exist_filter", "no-mac-spoofing") check_cmd = params.get("check_cmd") expect_match = params.get("expect_match") status_error = "yes" == params.get("status_error", "no") mount_noexec_tmp = "yes" == params.get("mount_noexec_tmp", "no") kill_libvirtd = "yes" == params.get("kill_libvirtd", "no") bug_url = params.get("bug_url", "") ipset_command = params.get("ipset_command") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Prepare vm filterref parameters dict list filter_param_list = [] params_key = [] for i in params.keys(): if 'parameter_name_' in i: params_key.append(i) params_key.sort() for i in range(len(params_key)): params_dict = {} params_dict['name'] = params[params_key[i]] params_dict['value'] = params['parameter_value_%s' % i] filter_param_list.append(params_dict) filterref_dict = {} filterref_dict['name'] = filter_name filterref_dict['parameters'] = filter_param_list # backup vm xml vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirtd = utils_libvirtd.Libvirtd() device_name = None try: rule = params.get("rule") if rule: # Create new filter xml filterxml = utlv.create_nwfilter_xml(params) # Define filter xml virsh.nwfilter_define(filterxml.xml, debug=True) # Update first vm interface with filter vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] vmxml.del_device(iface_xml) new_iface = interface.Interface('network') new_iface.xml = iface_xml.xml new_filterref = new_iface.new_filterref(**filterref_dict) new_iface.filterref = new_filterref logging.debug("new interface xml is: %s" % new_iface) vmxml.add_device(new_iface) vmxml.sync() if mount_noexec_tmp: device_name = utlv.setup_or_cleanup_iscsi(is_setup=True) utlv.mkfs(device_name, 'ext4') cmd = "mount %s /tmp -o noexec,nosuid" % device_name utils.run(cmd) if ipset_command: try: os_dep.command("ipset") except ValueError: ret = utils.run("yum install ipset -y") if ret.exit_status: raise error.TestNAError("Can't install ipset on host") utils.run(ipset_command) # Run command try: vm.start() vm.wait_for_serial_login() vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] iface_target = iface_xml.target['dev'] logging.debug("iface target dev name is %s", iface_target) # Check iptables or ebtables on host if check_cmd: if "DEVNAME" in check_cmd: check_cmd = check_cmd.replace("DEVNAME", iface_target) ret = utils.wait_for(lambda: not utils.system(check_cmd, ignore_status=True), timeout=30) if not ret: raise error.TestFail("Rum command '%s' failed" % check_cmd) out = utils.system_output(check_cmd, ignore_status=False) if expect_match and not re.search(expect_match, out): raise error.TestFail("'%s' not found in output: %s" % (expect_match, out)) except virt_vm.VMStartError, e: # Starting VM failed. if not status_error: raise error.TestFail("Test failed in positive case.\n error:" " %s\n%s" % (e, bug_url)) if kill_libvirtd: cmd = "kill -SIGTERM `pidof libvirtd`" utils.run(cmd) ret = utils.wait_for(lambda: not libvirtd.is_running(), timeout=30) if not ret: raise error.TestFail("Failed to kill libvirtd. %s" % bug_url)