Esempio n. 1
0
 def postprocess(self):
     super(systemd_run, self).postprocess()
     host = 'localhost'
     port = 4321
     utils.wait_for(lambda: not is_port_free(port, host), 10)
     time_from_socket = self.read_socket(host, port)
     self.failif(not time_from_socket.isdigit(),
                 "Data received from container is non-numeric: '%s'"
                 % time_from_socket)
Esempio n. 2
0
 def postprocess(self):
     super(systemd_run, self).postprocess()
     host = 'localhost'
     port = 4321
     utils.wait_for(lambda: not is_port_free(port, host), 10)
     time_from_socket = self.read_socket(host, port)
     self.failif(
         not time_from_socket.isdigit(),
         "Data received from container is non-numeric: '%s'" %
         time_from_socket)
Esempio n. 3
0
 def run_once(self):
     host = 'localhost'
     port = 4321
     self.sysd_action('daemon-reload')
     self.sysd_action('start', self.config['sysd_unit_file'])
     utils.wait_for(lambda: not is_port_free(port, host), 10)
     time_from_socket = self.read_socket(host, port)
     self.failif(not time_from_socket.isdigit(),
                 "Data received from container is non-numeric: '%s'"
                 % time_from_socket)
Esempio n. 4
0
 def get_container_ip(self, cont_name):
     # Wait untils container is ready.
     got_json = lambda: self.get_jason(cont_name)
     utils.wait_for(got_json,
                    120,
                    text='Waiting on container %s start' % cont_name)
     json = self.get_jason(cont_name)
     if len(json) == 1:
         netset = json[0].get("NetworkSettings")
         if netset is not None:
             return netset.get("IPAddress")  # Could return None
     return None
Esempio n. 5
0
    def run_once(self):
        super(liverestore, self).run_once()

        self._start_container()
        self._verify_that_container_is_running()

        # Restart docker daemon. It should be nearly instantaneous; if it
        # takes ~90 seconds it could be another symptom of rhbz1424709.
        # The warning message might help someone diagnose a later failure.
        # 2018-04-04 also restart docker-containerd. This is currently
        # only meaningful for Fedora 28; the service doesn't exist on RHEL.
        # So don't actually check exit status.
        self.stuff['dockerd_pid_orig'] = docker_daemon.pid()
        self.stuff['container_pid_orig'] = self._container_pid()
        t0 = time.time()
        utils.run('systemctl restart docker-containerd.service',
                  ignore_status=True)
        docker_daemon.restart()
        t1 = time.time()
        if t1 - t0 > 30:
            self.logwarning("docker restart took %d seconds", t1 - t0)

        # Wait until docker is back
        def _docker_is_active():
            result = docker_daemon.systemd_action('is-active').stdout.strip()
            self.logdebug("is-active -> %s" % result)
            return result == 'active'

        self.failif(
            utils.wait_for(_docker_is_active, 15, step=1) is None,
            "Timed out waiting for docker daemon ")
 def init_save_images(self):
     # If images w/ same id as remote image already exist, save then remove
     di = self.sub_stuff['img']
     imgs = di.list_imgs_with_full_name(self.config["remote_image_fqin"])
     if imgs:
         long_id = imgs[0].long_id
         existing_images = di.list_imgs_with_image_id(long_id)
         self.sub_stuff['saved_images'] = os.path.join(self.tmpdir,
                                                       str(long_id))
         subargs = ['--output', self.sub_stuff['saved_images']]
         for img in existing_images:
             self.loginfo("Going to save image %s" % img.full_name)
             subargs.append(img.full_name)
         self.loginfo("Saving images...")
         mustpass(DockerCmd(self, 'save', subargs).execute())
         self.loginfo("Removing images...")
         subargs = ['--force']
         subargs += [img.full_name for img in existing_images]
         mustpass(DockerCmd(self, 'rmi', subargs).execute())
         # Wait for images to actually go away
         _fn = lambda: self.long_id_in_images(long_id)
         gone = utils.wait_for(_fn, 60, step=1,
                               text="Waiting for image removal")
         self.logdebug("Current images: %s", di.list_imgs())
         if not gone:
             raise DockerTestFail("Timeout waiting for removal of %s"
                                  % long_id)
Esempio n. 7
0
def run(test, params, env):
    """
    Kill started qemu VM process with different signals and check
    the status of the VM changes accordingly.
    """
    vm_name = params.get('main_vm')
    sig_name = params.get('signal', 'SIGSTOP')
    vm_state = params.get('vm_state', 'running')
    expect_stop = params.get('expect_stop', 'yes') == 'yes'
    vm = env.get_vm(vm_name)

    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        if vm_state == 'running':
            pass
        elif vm_state == 'paused':
            vm.pause()
        elif vm_state == 'pmsuspended':
            vm.prepare_guest_agent()
            vm.pmsuspend()
        else:
            raise error.TestError("Unhandled VM state %s" % vm_state)

        os.kill(vm.get_pid(), getattr(signal, sig_name))

        stopped = bool(utils.wait_for(lambda: vm.state() == 'shut off', 10))
        if stopped != expect_stop:
            raise error.TestFail('Expected VM stop is "%s", got "%s"'
                                 % (expect_stop, vm.state()))
    finally:
        xml_backup.sync()
Esempio n. 8
0
 def init_save_images(self):
     # If images w/ same id as remote image already exist, save then remove
     di = self.sub_stuff['img']
     imgs = di.list_imgs_with_full_name(self.config["remote_image_fqin"])
     if imgs:
         long_id = imgs[0].long_id
         existing_images = di.list_imgs_with_image_id(long_id)
         self.sub_stuff['saved_images'] = os.path.join(self.tmpdir,
                                                       str(long_id))
         subargs = ['--output', self.sub_stuff['saved_images']]
         for img in existing_images:
             self.loginfo("Going to save image %s" % img.full_name)
             subargs.append(img.full_name)
         self.loginfo("Saving images...")
         mustpass(DockerCmd(self, 'save', subargs).execute())
         self.loginfo("Removing images...")
         subargs = ['--force']
         subargs += [img.full_name for img in existing_images]
         mustpass(DockerCmd(self, 'rmi', subargs, verbose=True).execute())
         # Wait for images to actually go away
         _fn = lambda: self.long_id_in_images(long_id)
         gone = utils.wait_for(_fn, 60, step=1,
                               text="Waiting for image removal")
         self.logdebug("Current images: %s", di.list_imgs())
         if not gone:
             raise DockerTestFail("Timeout waiting for removal of %s"
                                  % long_id)
Esempio n. 9
0
 def wait_start(self):
     self.sub_stuff['dkrcmd'].execute()
     self.loginfo("Waiting up to %s seconds for container start",
                  self.config['docker_timeout'])
     self.failif(not utils.wait_for(func=self.cidfile_has_cid,
                                    timeout=self.config['docker_timeout'],
                                    text=("\t\tWaiting for container to "
                                          "start")))
Esempio n. 10
0
 def wait_start(self):
     self.sub_stuff['dkrcmd'].execute()
     self.loginfo("Waiting up to %s seconds for container start",
                  self.config['docker_timeout'])
     self.failif(not utils.wait_for(func=self.cidfile_has_cid,
                                    timeout=self.config['docker_timeout'],
                                    text=("\t\tWaiting for container to "
                                          "start")))
Esempio n. 11
0
    def postprocess(self):
        net_device = self.sub_stuff['net_device']
        container_rules = lambda: self.read_iptable_rules(net_device)
        added_rules = utils.wait_for(container_rules, 10, step=0.1)
        self.failif(not added_rules, "No rules added when container started.")
        self.loginfo("Container %s\niptable rule list %s:" %
                     (self.sub_stuff['name'], added_rules))

        NoFailDockerCmd(self, 'stop',
                        ["-t 0", self.sub_stuff['name']]).execute()

        container_rules = lambda: not self.read_iptable_rules(net_device)
        removed_rules = utils.wait_for(container_rules, 10, step=0.1)
        self.failif(not removed_rules, "Container %s iptable rules not "
                    "removed in 10s after stop. Rules:\n%s"
                    % (self.sub_stuff['name'],
                       self.read_iptable_rules(net_device)))
Esempio n. 12
0
    def postprocess(self):
        super(sigproxy_base, self).postprocess()
        self._check_results()

        # stop the container
        container_name = self.sub_stuff['container_name']
        mustpass(DockerCmd(self, "kill", [container_name]).execute())
        container = self.sub_stuff['container_cmd']
        if not utils.wait_for(lambda: container.done, 5, step=0.1):
            raise DockerTestFail("Unable to kill container after test...")
Esempio n. 13
0
    def postprocess(self):
        super(sigproxy_base, self).postprocess()
        self._check_results()

        # stop the container
        container_name = self.sub_stuff['container_name']
        mustpass(DockerCmd(self, "kill", [container_name]).execute())
        container = self.sub_stuff['container_cmd']
        if not utils.wait_for(lambda: container.done, 5, step=0.1):
            raise DockerTestFail("Unable to kill container after test...")
Esempio n. 14
0
 def wait_for_output(check, output, stderr=False):
     """ Wait until check in the new output """
     idx = output.idx
     if stderr:
         output_matches = lambda: check in output.geterr(idx)
     else:
         output_matches = lambda: check in output.get(idx)
     if utils.wait_for(output_matches, 10, step=0.01) is None:
         return -1
     return 0
Esempio n. 15
0
 def wait_container(self):
     wait_stop = self.config["wait_stop"]
     self.logdebug("Waiting up to %d seconds for container to exit", wait_stop)
     dkrcmd = self.sub_stuff["dkrcmd"]
     self.failif(
         not utils.wait_for(
             func=self.container_finished, timeout=wait_stop, text=("\t\tWaiting for container to " "exit")
         ),
         "Container did not exit w/in timeout: stdout '%s' " "stderr '%s'" % (dkrcmd.stdout, dkrcmd.stderr),
     )
     cmdresult = self.sub_stuff["cmdresult"] = dkrcmd.wait()
     self.logdebug("Result: %s", cmdresult)
Esempio n. 16
0
 def wait_container(self):
     wait_stop = self.config['wait_stop']
     self.logdebug("Waiting up to %d seconds for container to exit",
                   wait_stop)
     dkrcmd = self.sub_stuff['dkrcmd']
     self.failif(
         not utils.wait_for(func=self.container_finished,
                            timeout=wait_stop,
                            text=("\t\tWaiting for container to "
                                  "exit")),
         "Container did not exit w/in timeout: stdout '%s' "
         "stderr '%s'" % (dkrcmd.stdout, dkrcmd.stderr))
     cmdresult = self.sub_stuff['cmdresult'] = dkrcmd.wait()
     self.logdebug("Result: %s", cmdresult)
Esempio n. 17
0
    def postprocess(self):
        super(cp_symlink, self).postprocess()

        # Container should terminate on its own
        self.failif(not utils.wait_for(func=self._container_done,
                                       timeout=5,
                                       text="\tWaiting for container to exit"),
                    "Container did not exit! Perhaps /stop did not copy?")
        cmdresult = self.sub_stuff['dkrcmd'].wait(1)
        OutputGood(cmdresult)
        self.failif(cmdresult.exit_status == 4,
                    "File was not copied into container:%s" % self.config['destdir'])
        self.failif(cmdresult.exit_status == 5,
                    "File was copied, but content is bad: %s" % cmdresult.stdout)
        self.failif_ne(cmdresult.exit_status, 0,
                       "Unexpected error running command: %s" % cmdresult)
Esempio n. 18
0
    def wait_for_stop(self, timeout=60, step=0.1):
        """
        Wait for libvirtd to stop.

        :param timeout: Max wait time
        :param step: Checking interval
        """
        logging.debug('Waiting for libvirtd to stop')
        if self.gdb:
            return self.gdb.wait_for_stop(timeout=timeout)
        else:
            return utils.wait_for(
                lambda: not self.running,
                timeout=timeout,
                step=step,
            )
Esempio n. 19
0
def wait_for_output(output_fn, pattern, timeout=60, timestep=0.2):
    r"""
    Wait for matched_string in async_process.stdout max for time==timeout.

    :param output_fn: function which returns data for matching.
    :type output_fn: function
    :param pattern: string which should be found in stdout.
    :return: True if pattern matches process_output else False
    """
    if not callable(output_fn):
        raise TypeError("Output function type %s value %s is not a callable" %
                        (output_fn.__class__.__name__, str(output_fn)))
    regex = re.compile(pattern)
    _fn = lambda: regex.findall(output_fn()) != []
    res = utils.wait_for(_fn, timeout, step=timestep)
    if res:
        return True
    return False
Esempio n. 20
0
def wait_for_output(output_fn, pattern, timeout=60, timestep=0.2):
    r"""
    Wait for matched_string in async_process.stdout max for time==timeout.

    :param process_output_fn: function which returns data for matching.
    :type process_output_fn: function
    :param pattern: string which should be found in stdout.
    :return: True if pattern matches process_output else False
    """
    if not callable(output_fn):
        raise TypeError("Output function type %s value %s is not a callable"
                        % (output_fn.__class__.__name__, str(output_fn)))
    regex = re.compile(pattern)
    _fn = lambda: regex.findall(output_fn()) != []
    res = utils.wait_for(_fn, timeout, step=timestep)
    if res:
        return True
    return False
Esempio n. 21
0
        def test(self):
            super(test_multihost_copy, self).test()
            copy_timeout = int(params.get("copy_timeout", 480))
            checksum_timeout = int(params.get("checksum_timeout", 180))

            pid = None
            sync_id = {'src': self.srchost,
                       'dst': self.dsthost,
                       "type": "file_trasfer"}
            filename = "orig"

            if self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                vm.monitor.migrate_set_speed("1G")
                session = vm.wait_for_login(timeout=login_timeout)
                cdrom_dev_list = list_guest_cdroms(session)
                logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
                cdrom = cdrom_dev_list[-1]
                mount_point = get_cdrom_mount_point(session, cdrom, params)
                mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point)
                src_file = params["src_file"] % (mount_point, filename)
                dst_file = params["dst_file"] % filename
                copy_file_cmd = params[
                    "copy_file_cmd"] % (mount_point, filename)
                remove_file_cmd = params["remove_file_cmd"] % filename
                md5sum_cmd = params["md5sum_cmd"]
                if params["os_type"] != "windows":
                    error.context("Mount and copy data")
                    session.cmd(mount_cmd, timeout=30)

                error.context("File copying test")
                session.cmd(copy_file_cmd)

                pid = disk_copy(vm, src_file, dst_file, copy_timeout)

            sync = SyncData(self.mig.master_id(), self.mig.hostid,
                            self.mig.hosts, sync_id, self.mig.sync_server)

            pid = sync.sync(pid, timeout=cdrom_prepare_timeout)[self.srchost]

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                error.context("Wait for copy finishing.")

                def is_copy_done():
                    if params["os_type"] == "windows":
                        cmd = "tasklist /FI \"PID eq %s\"" % pid
                    else:
                        cmd = "ps -p %s" % pid
                    return session.cmd_status(cmd) != 0
                if utils.wait_for(is_copy_done, timeout=copy_timeout) is None:
                    raise error.TestFail("Wait for file copy finish timeout")

                error.context("Compare file on disk and on cdrom")
                f1_hash = session.cmd("%s %s" % (md5sum_cmd, dst_file),
                                      timeout=checksum_timeout).split()[0]
                f2_hash = session.cmd("%s %s" % (md5sum_cmd, src_file),
                                      timeout=checksum_timeout).split()[0]
                if f1_hash.strip() != f2_hash.strip():
                    raise error.TestFail("On disk and on cdrom files are"
                                         " different, md5 mismatch")
                session.cmd(remove_file_cmd)

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'Finish_cdrom_test', login_timeout)
Esempio n. 22
0
 def wait_start(self):
     self.stuff['dkrcmd'].execute()
     self.failif(not utils.wait_for(func=self.cidfile_has_cid,
                                    timeout=self.config['docker_timeout'],
                                    text="Waiting for container to start"))
Esempio n. 23
0
 def wait_start(self):
     self.stuff['dkrcmd'].execute()
     self.failif(not utils.wait_for(func=self.cidfile_has_cid,
                                    timeout=self.config['docker_timeout'],
                                    text="Waiting for container to start"))
Esempio n. 24
0
    def run_once(self):
        def wait_for_output(check, output, stderr=False):
            """ Wait until check in the new output """
            idx = output.idx
            if stderr:
                output_matches = lambda: check in output.geterr(idx)
            else:
                output_matches = lambda: check in output.get(idx)
            if utils.wait_for(output_matches, 10, step=0.01) is None:
                return -1
            return 0

        def error_msg(log_us):
            """ Format a nice string from dictionary """
            out = ["%s\n%s" % (key, value)
                   for key, value in log_us.iteritems()]
            return "\n\n".join(out)

        def _output_matches(cmd1, cmd2):
            """ Compares the output of stdout&stderr """
            out1 = cmd1.stdout.splitlines() + cmd1.stderr.splitlines()
            out1 = set((_ for _ in out1 if not _.startswith('[debug]')))
            out2 = cmd2.stdout.splitlines() + cmd2.stderr.splitlines()
            out2 = set((_ for _ in out2 if not _.startswith('[debug]')))
            return out1 == out2

        super(simple_base, self).run_once()
        log_us = {}
        # Create container
        dkrcmd, name = self._init_container(self.sub_stuff['subargs'], 'bash')
        log_us['container'] = dkrcmd
        dkrcmd.execute()
        self.wait_exists(name)
        # Create docker logs --follow
        log1 = AsyncDockerCmd(self, 'logs', ['--follow', name],
                              verbose=False)
        self.sub_stuff['async_processes'].append(log1)
        log_us['log1'] = log1
        log1.execute()
        log1_out = Output(log1)
        # Generate output to stdout
        for _ in xrange(5):
            prefix = utils.generate_random_string(5)
            dkrcmd.stdin("PREFIX='%s'\n" % prefix)
            line = utils.generate_random_string(10)
            dkrcmd.stdin("echo $PREFIX: %s\n" % line)
            line = "%s: %s" % (prefix, line)
            self.failif(wait_for_output(line, log1_out),
                        "Stdout '%s' did not occur in log1 output in 10s:\n%s"
                        % (line, error_msg(log_us)))
        # Start docker logs without follow and compare output
        log2 = DockerCmd(self, 'logs', [name], verbose=False)
        log_us['log2'] = log2
        log2.execute()
        match = lambda: _output_matches(log1, log2)
        self.failif(not utils.wait_for(match, 5), "Outputs of log1 and "
                    "log2 are not the same:\n%s" % error_msg(log_us))
        # Generate output to stderr
        for _ in xrange(5):
            prefix = utils.generate_random_string(5)
            dkrcmd.stdin("PREFIX='%s'\n" % prefix)
            line = utils.generate_random_string(10)
            dkrcmd.stdin(">&2 echo $PREFIX: %s\n" % line)
            line = "%s: %s" % (prefix, line)
            self.failif(wait_for_output(line, log1_out,
                                        self.sub_stuff['stderr']),
                        "Output '%s' did not occur in log1 output in 10s:\n%s"
                        % (line, error_msg(log_us)))
        self.failif(_output_matches(log1, log2), 'Outputs log1 and log2 are '
                    "the same even thought new input was generated and log2 "
                    "was executed without --follow:\n%s" % error_msg(log_us))
        # Stop the container
        dkrcmd.stdin('exit\n')
        dkrcmd.close()
        # Wait for docker logs exit
        log1.wait(10)
        # Start docker logs without follow and compare output
        log3 = DockerCmd(self, 'logs', [name], verbose=False)
        log_us['log3'] = log3
        log3.execute()
        match = lambda: _output_matches(log1, log3)
        self.failif(not utils.wait_for(match, 5), "Outputs of log1 and "
                    "log3 are not the same:\n%s" % error_msg(log_us))
Esempio n. 25
0
        def test(self):
            super(test_multihost_copy, self).test()
            copy_timeout = int(params.get("copy_timeout", 480))
            checksum_timeout = int(params.get("checksum_timeout", 180))

            pid = None
            sync_id = {
                'src': self.srchost,
                'dst': self.dsthost,
                "type": "file_trasfer"
            }
            filename = "orig"

            if self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                vm.monitor.migrate_set_speed("1G")
                session = vm.wait_for_login(timeout=login_timeout)
                cdrom_dev_list = list_guest_cdroms(session)
                logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
                cdrom = cdrom_dev_list[-1]
                mount_point = get_cdrom_mount_point(session, cdrom, params)
                mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point)
                src_file = params["src_file"] % (mount_point, filename)
                dst_file = params["dst_file"] % filename
                copy_file_cmd = params["copy_file_cmd"] % (mount_point,
                                                           filename)
                remove_file_cmd = params["remove_file_cmd"] % filename
                md5sum_cmd = params["md5sum_cmd"]
                if params["os_type"] != "windows":
                    error.context("Mount and copy data")
                    session.cmd(mount_cmd, timeout=30)

                error.context("File copying test")
                session.cmd(copy_file_cmd)

                pid = disk_copy(vm, src_file, dst_file, copy_timeout)

            sync = SyncData(self.mig.master_id(), self.mig.hostid,
                            self.mig.hosts, sync_id, self.mig.sync_server)

            pid = sync.sync(pid, timeout=cdrom_prepare_timeout)[self.srchost]

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                error.context("Wait for copy finishing.")

                def is_copy_done():
                    if params["os_type"] == "windows":
                        cmd = "tasklist /FI \"PID eq %s\"" % pid
                    else:
                        cmd = "ps -p %s" % pid
                    return session.cmd_status(cmd) != 0

                if utils.wait_for(is_copy_done, timeout=copy_timeout) is None:
                    raise error.TestFail("Wait for file copy finish timeout")

                error.context("Compare file on disk and on cdrom")
                f1_hash = session.cmd("%s %s" % (md5sum_cmd, dst_file),
                                      timeout=checksum_timeout).split()[0]
                f2_hash = session.cmd("%s %s" % (md5sum_cmd, src_file),
                                      timeout=checksum_timeout).split()[0]
                if f1_hash.strip() != f2_hash.strip():
                    raise error.TestFail("On disk and on cdrom files are"
                                         " different, md5 mismatch")
                session.cmd(remove_file_cmd)

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'Finish_cdrom_test', login_timeout)