예제 #1
0
 def check_status_with_value(action_list, file_name):
     """
     Check the status of khugepaged when set value to specify file.
     """
     for (a, r) in action_list:
         logging.info("Writing path %s: %s, expected khugepage rc: %s ",
                      file_name, a, r)
         try:
             file_object = open(file_name, "w")
             file_object.write(a)
             file_object.close()
         except IOError, error_detail:
             logging.info("IO Operation on path %s failed: %s",
                          file_name, error_detail)
         time.sleep(5)
         try:
             utils.run('pgrep khugepaged', verbose=False)
             if r != 0:
                 raise THPKhugepagedError("Khugepaged still alive when"
                                          "transparent huge page is "
                                          "disabled")
         except error.CmdError:
             if r == 0:
                 raise THPKhugepagedError("Khugepaged could not be set to"
                                          "status %s" % a)
예제 #2
0
파일: kvm_stat.py 프로젝트: Poohby/autotest
    def initialize(self):
        """
        Gets path of kvm_stat and verifies if debugfs needs to be mounted.
        """
        self.is_enabled = False

        kvm_stat_installed = False
        try:
            self.stat_path = os_dep.command('kvm_stat')
            kvm_stat_installed = True
        except ValueError:
            logging.error('Command kvm_stat not present')

        if kvm_stat_installed:
            try:
                utils.run("%s --batch" % self.stat_path)
                self.is_enabled = True
            except error.CmdError, e:
                if 'debugfs' in str(e):
                    try:
                        utils.run('mount -t debugfs debugfs /sys/kernel/debug')
                    except error.CmdError, e:
                        logging.error('Failed to mount debugfs:\n%s', str(e))
                else:
                    logging.error('Failed to execute kvm_stat:\n%s', str(e))
예제 #3
0
    def _build(self):
        make_jobs = utils.count_cpus()
        cfg = './configure'
        self.modules_build_succeed = False
        if self.kmod_srcdir:
            logging.info('Building KVM modules')
            os.chdir(self.kmod_srcdir)
            module_build_steps = [cfg,
                                  'make clean',
                                  'make sync LINUX=%s' % self.kernel_srcdir,
                                  'make']
        elif self.kernel_srcdir:
            logging.info('Building KVM modules')
            os.chdir(self.userspace_srcdir)
            cfg += ' --kerneldir=%s' % self.host_kernel_srcdir
            module_build_steps = [cfg,
                            'make clean',
                            'make -C kernel LINUX=%s sync' % self.kernel_srcdir]
        else:
            module_build_steps = []

        try:
            if module_build_steps:
                for step in module_build_steps:
                    utils.run(step)
                self.modules_build_succeed = True
        except error.CmdError, e:
            logging.error("KVM modules build failed to build: %s" % e)
예제 #4
0
    def _clean_previous_installs(self):
        kill_qemu_processes()
        removable_packages = ""
        for pkg in self.pkg_list:
            removable_packages += " %s" % pkg

        utils.run("yum remove -y %s" % removable_packages)
예제 #5
0
    def preseed_initrd(self):
        """
        Puts a preseed file inside a gz compressed initrd file.

        Debian and Ubuntu use preseed as the OEM install mechanism. The only
        way to get fully automated setup without resorting to kernel params
        is to add a preseed.cfg file at the root of the initrd image.
        """
        logging.debug("Remastering initrd.gz file with preseed file")
        dest_fname = 'preseed.cfg'
        remaster_path = os.path.join(self.image_path, "initrd_remaster")
        if not os.path.isdir(remaster_path):
            os.makedirs(remaster_path)

        base_initrd = os.path.basename(self.initrd)
        os.chdir(remaster_path)
        utils.run("gzip -d < ../%s | cpio --extract --make-directories "
                  "--no-absolute-filenames" % base_initrd, verbose=DEBUG)
        utils.run("cp %s %s" % (self.unattended_file, dest_fname),
                  verbose=DEBUG)

        if self.params.get("vm_type") == "libvirt":
            utils.run("find . | cpio -H newc --create > ../%s.img" %
                      base_initrd.rstrip(".gz"), verbose=DEBUG)
        else:
            utils.run("find . | cpio -H newc --create | gzip -9 > ../%s" %
                      base_initrd, verbose=DEBUG)

        os.chdir(self.image_path)
        utils.run("rm -rf initrd_remaster", verbose=DEBUG)
        contents = open(self.unattended_file).read()

        logging.debug("Unattended install contents:")
        for line in contents.splitlines():
            logging.debug(line)
예제 #6
0
    def _build(self):
        make_jobs = utils.count_cpus()
        cfg = './configure'
        if self.kmod_srcdir:
            logging.info('Building KVM modules')
            os.chdir(self.kmod_srcdir)
            module_build_steps = [cfg,
                                  'make clean',
                                  'make sync LINUX=%s' % self.kernel_srcdir,
                                  'make']
        elif self.kernel_srcdir:
            logging.info('Building KVM modules')
            os.chdir(self.userspace_srcdir)
            cfg += ' --kerneldir=%s' % self.host_kernel_srcdir
            module_build_steps = [cfg,
                            'make clean',
                            'make -C kernel LINUX=%s sync' % self.kernel_srcdir]
        else:
            module_build_steps = []

        for step in module_build_steps:
            utils.run(step)

        logging.info('Building KVM userspace code')
        os.chdir(self.userspace_srcdir)
        cfg += ' --prefix=%s' % self.prefix
        if "--disable-strip" in self.configure_options:
            cfg += ' --disable-strip'
        if self.extra_configure_options:
            cfg += ' %s' % self.extra_configure_options
        utils.system(cfg)
        utils.system('make clean')
        utils.system('make -j %s' % make_jobs)
예제 #7
0
    def setup_url(self):
        """
        Download the vmlinuz and initrd.img from URL.
        """
        # it's only necessary to download kernel/initrd if running bare qemu
        if self.vm_type == 'kvm':
            error.context("downloading vmlinuz/initrd.img from %s" % self.url)
            os.chdir(self.image_path)
            kernel_cmd = "wget -q %s/%s/%s" % (self.url,
                                               self.boot_path,
                                               os.path.basename(self.kernel))
            initrd_cmd = "wget -q %s/%s/%s" % (self.url,
                                               self.boot_path,
                                               os.path.basename(self.initrd))

            if os.path.exists(self.kernel):
                os.remove(self.kernel)
            if os.path.exists(self.initrd):
                os.remove(self.initrd)

            utils.run(kernel_cmd, verbose=DEBUG)
            utils.run(initrd_cmd, verbose=DEBUG)

        elif self.vm_type == 'libvirt':
            logging.info("Not downloading vmlinuz/initrd.img from %s, "
                         "letting virt-install do it instead")

        else:
            logging.info("No action defined/needed for the current virt "
                         "type: '%s'" % self.vm_type)
예제 #8
0
    def setup_cdrom(self):
        """
        Mount cdrom and copy vmlinuz and initrd.img.
        """
        error.context("Copying vmlinuz and initrd.img from install cdrom %s" %
                      self.cdrom_cd1)
        m_cmd = ('mount -t iso9660 -v -o loop,ro %s %s' %
                 (self.cdrom_cd1, self.cdrom_cd1_mount))
        utils.run(m_cmd)

        try:
            if not os.path.isdir(self.image_path):
                os.makedirs(self.image_path)
            kernel_fetch_cmd = ("cp %s/%s/%s %s" %
                                (self.cdrom_cd1_mount, self.boot_path,
                                 os.path.basename(self.kernel), self.kernel))
            utils.run(kernel_fetch_cmd)
            initrd_fetch_cmd = ("cp %s/%s/%s %s" %
                                (self.cdrom_cd1_mount, self.boot_path,
                                 os.path.basename(self.initrd), self.initrd))
            utils.run(initrd_fetch_cmd)
            if self.unattended_file.endswith('.preseed'):
                self.preseed_initrd()
            # Virtinstall command needs files named "vmlinuz" and "initrd.img"
            elif self.params.get("vm_type") == "libvirt":
                os.chdir(self.image_path)
                base_kernel = os.path.basename(self.kernel)
                base_initrd = os.path.basename(self.initrd)
                if base_kernel != 'vmlinuz':
                    utils.run("mv %s vmlinuz" % base_kernel)
                if base_initrd != 'initrd.img':
                    utils.run("mv %s initrd.img" % base_initrd)

        finally:
            cleanup(self.cdrom_cd1_mount)
예제 #9
0
    def setup_url_auto(self):
        """
        Configures the builtin web server for serving content
        """
        global _url_auto_content_server_thread
        global _url_auto_content_server_thread_event

        logging.debug("starting unattended content web server")

        if self.params.get('cdrom_cd1'):
            # setup and mount cdrom contents to be served by http server
            m_cmd = ('mount -t iso9660 -v -o loop,ro %s %s' %
                     (self.cdrom_cd1, self.cdrom_cd1_mount))
            utils.run(m_cmd)

        self.url_auto_content_port = virt_utils.find_free_port(
            8100,
            8199,
            self.url_auto_content_ip)

        if _url_auto_content_server_thread is None:
            _url_auto_content_server_thread_event = threading.Event()
            _url_auto_content_server_thread = threading.Thread(
                target=virt_http_server.http_server,
                args=(self.url_auto_content_port, self.cdrom_cd1_mount,
                      terminate_auto_content_server_thread))
            _url_auto_content_server_thread.start()

        auto_content_url = 'http://%s:%s' % (self.url_auto_content_ip,
                                             self.url_auto_content_port)
        self.params['auto_content_url'] = auto_content_url
예제 #10
0
def run_enospc(test, params, env):
    """
    ENOSPC test

    1) Create a virtual disk on lvm
    2) Boot up guest with two disks
    3) Continually write data to second disk
    4) Check images and extend second disk when no space
    5) Continue paused guest
    6) Repeat step 3~5 several times

    @param test: KVM test object.
    @param params: Dictionary with the test parameters.
    @param env: Dictionary with test environment.
    """
    enospc_config = EnospcConfig(test, params)
    enospc_config.setup()
    vm = env.get_vm(params["main_vm"])
    vm.create()
    login_timeout = int(params.get("login_timeout", 360))
    session_serial = vm.wait_for_serial_login(timeout=login_timeout)

    vgtest_name = params.get("vgtest_name")
    lvtest_name = params.get("lvtest_name")
    logical_volume = "/dev/%s/%s" % (vgtest_name, lvtest_name)

    drive_format = params.get("drive_format")
    if drive_format == "virtio":
        devname = "/dev/vdb"
    elif drive_format == "ide":
        output = session_serial.cmd_output("dir /dev")
        devname = "/dev/" + re.findall("([sh]db)\s", output)[0]
    elif drive_format == "scsi":
        devname = "/dev/sdb"
    cmd = params.get("background_cmd")
    cmd %= devname
    logging.info("Sending background cmd '%s'", cmd)
    session_serial.sendline(cmd)

    iterations = int(params.get("repeat_time", 40))
    i = 0
    pause_n = 0
    while i < iterations:
        status = vm.monitor.cmd("info status")
        logging.debug(status)
        if "paused" in status:
            pause_n += 1
            logging.info("Checking all images in use by the VM")
            for image_name in vm.params.objects("images"):
                image_params = vm.params.object_params(image_name)
                try:
                    virt_vm.check_image(image_params, test.bindir)
                except (virt_vm.VMError, error.TestWarn), e:
                    logging.error(e)
            logging.info("Guest paused, extending Logical Volume size")
            try:
                utils.run("lvextend -L +200M %s" % logical_volume)
            except error.CmdError, e:
                logging.debug(e.result_obj.stdout)
            vm.monitor.cmd("cont")
예제 #11
0
 def plot_2d_graphs(self):
     """
     For each one of the throughput parameters, generate a set of gnuplot
     commands that will create a parametric surface with file size vs.
     record size vs. throughput.
     """
     datasource_2d = os.path.join(self.output_dir, '2d-datasource-file')
     for index, label in zip(range(2, 15), _LABELS[2:]):
         commands_path = os.path.join(self.output_dir, '2d-%s.do' % label)
         commands = ""
         commands += "set title 'Iozone performance: %s'\n" % label
         commands += "set logscale x\n"
         commands += "set xlabel 'File size (KB)'\n"
         commands += "set ylabel 'Througput (MB/s)'\n"
         commands += "set terminal png small size 450 350\n"
         commands += "set output '%s'\n" % os.path.join(self.output_dir,
                                                        '2d-%s.png' % label)
         commands += ("plot '%s' using 1:%s title '%s' with lines \n" %
                      (datasource_2d, index, label))
         commands_file = open(commands_path, 'w')
         commands_file.write(commands)
         commands_file.close()
         try:
             utils.run("%s %s" % (self.gnuplot, commands_path))
         except error.CmdError, e:
             logging.error("Problem plotting from commands file %s: %s",
                           commands_file, str(e))
예제 #12
0
    def init_077_rbd_dev_create(self):
        self.rbd_dev_ids = {}
        for id_ in roles_of_type(self.my_roles, "client"):
            if not (self.client_is_type(id_, "rbd") and self.get_client_config(id_, "rbd_kernel_mount")):
                continue

            image_name = "testimage{id}".format(id=id_)
            secret = self.get_secret(id_)

            with open("/sys/bus/rbd/add", "w") as add_file:
                add_file.write(
                    "{mons} name={name},secret={secret} rbd {image}".format(
                        mons=",".join(self.get_mons().values()), name=id_, secret=secret, image=image_name
                    )
                )

            basepath = "/sys/bus/rbd/devices"
            for dev_id in os.listdir(basepath):
                devpath = os.path.join(basepath, dev_id)
                name = utils.run("cat {file}".format(file=os.path.join(devpath, "name")))
                name = name.stdout.rstrip("\n")
                major = utils.run("cat {file}".format(file=os.path.join(devpath, "major")))
                major = int(major.stdout.rstrip("\n"))

                if name == image_name:
                    try:
                        os.stat("/dev/rbd")
                    except OSError as err:
                        import errno

                        assert err.errno == errno.ENOENT
                        os.mkdir("/dev/rbd")

                    os.mknod("/dev/rbd/{image}".format(image=image_name), 0600 | stat.S_IFBLK, os.makedev(major, 0))
                    self.rbd_dev_ids[image_name] = dev_id
예제 #13
0
def kill_qemu_processes():
    """
    Kills all qemu processes, also kills all processes holding /dev/kvm down.
    """
    logging.debug("Killing any qemu processes that might be left behind")
    utils.run("pkill qemu", ignore_status=True)
    # Let's double check to see if some other process is holding /dev/kvm
    if os.path.isfile("/dev/kvm"):
        utils.run("fuser -k /dev/kvm", ignore_status=True)
예제 #14
0
    def _load_modules(self, mod_list):
        """
        Load the KVM modules

        May be overridden by subclasses.
        """
        logging.info("Loading KVM modules")
        for module in mod_list:
            utils.run("modprobe %s" % module)
예제 #15
0
    def _install(self):
        os.chdir(self.userspace.srcdir)
        utils.run('make install')

        if self.path_to_roms:
            install_roms(self.path_to_roms, self.prefix)
        self.install_unittests()
        create_symlinks(test_bindir=self.test_bindir, prefix=self.prefix,
                        bin_list=None,
                        unittest=self.unittest_prefix)
예제 #16
0
 def fetch_and_patch(self):
     if not self.repo:
         return
     virt_utils.get_git_branch(self.repo, self.branch, self.srcdir,
                              self.commit, self.lbranch)
     os.chdir(self.srcdir)
     for patch in self.patches:
         utils.get_file(patch, os.path.join(self.srcdir,
                                            os.path.basename(patch)))
         utils.run('patch -p1 < %s' % os.path.basename(patch))
예제 #17
0
파일: libvirt_vm.py 프로젝트: csdb/autotest
def libvirtd_restart():
    """
    Restart libvirt daemon.
    """
    try:
        utils.run("service libvirtd restart")
        logging.debug("Restarted libvirtd successfuly")
        return True
    except error.CmdError, detail:
        logging.error("Failed to restart libvirtd: %s" % detail)
        return False
예제 #18
0
    def close(self):
        error.context("Creating unattended install CD image %s" % self.path)
        g_cmd = ('mkisofs -o %s -max-iso9660-filenames '
                 '-relaxed-filenames -D --input-charset iso8859-1 '
                 '%s' % (self.path, self.mount))
        utils.run(g_cmd, verbose=DEBUG)

        os.chmod(self.path, 0755)
        cleanup(self.mount)
        logging.debug("unattended install CD image %s successfuly created",
                      self.path)
예제 #19
0
 def _build(self):
     make_jobs = utils.count_cpus()
     os.chdir(self.srcdir)
     # For testing purposes, it's better to build qemu binaries with
     # debugging symbols, so we can extract more meaningful stack traces.
     cfg = "./configure --prefix=%s" % self.prefix
     if "--disable-strip" in self.configure_options:
         cfg += " --disable-strip"
     steps = [cfg, "make clean", "make -j %s" % make_jobs]
     logging.info("Building KVM")
     for step in steps:
         utils.run(step)
예제 #20
0
 def _install(self):
     os.chdir(self.srcdir)
     logging.info("Installing KVM userspace")
     if self.repo_type == 1:
         utils.run("make -C qemu install")
     elif self.repo_type == 2:
         utils.run("make install")
     if self.path_to_roms:
         install_roms(self.path_to_roms, self.prefix)
     self.install_unittests()
     create_symlinks(test_bindir=self.test_bindir,
                     prefix=self.prefix,
                     unittest=self.unittest_prefix)
예제 #21
0
 def _remove_entries_partition(self):
     """
     Removes the entries under /dev/mapper for the partition associated
     to the loopback device.
     """
     logging.debug('Removing the entry on /dev/mapper for %s loop dev',
                   self.loop)
     try:
         cmd = 'kpartx -d %s' % self.loop
         utils.run(cmd)
     except error.CmdError, e:
         e_msg = 'Error removing entries for loop %s: %s' % (self.loop, e)
         raise error.AutotestError(e_msg)
예제 #22
0
 def _detach_img_loop(self):
     """
     Detaches the image file from the loopback device.
     """
     logging.debug('Detaching image %s from loop device %s', self.img,
                   self.loop)
     try:
         cmd = 'losetup -d %s' % self.loop
         utils.run(cmd)
     except error.CmdError, e:
         e_msg = ('Error detaching image %s from loop device %s: %s' %
                 (self.img, self.loop, e))
         raise error.AutotestError(e_msg)
예제 #23
0
파일: cpuflags.py 프로젝트: Poohby/autotest
 def run_stress(timeout, flags, smp):
     """
     Run stress on vm for timeout time.
     """
     ret = False
     flags = check_cpuflags_work(flags)
     try:
         utils.run("./cpuflags-test --stress %s%s" %
                   (smp, virt_utils.kvm_flags_to_stresstests(flags[0])),
                   timeout)
     except error.CmdError:
         ret = True
     return ret
예제 #24
0
def cleanup(dir):
    """
    If dir is a mountpoint, do what is possible to unmount it. Afterwards,
    try to remove it.

    @param dir: Directory to be cleaned up.
    """
    error.context("cleaning up unattended install directory %s" % dir)
    if os.path.ismount(dir):
        utils.run("fuser -k %s" % dir, ignore_status=True)
        utils.run("umount %s" % dir)
    if os.path.isdir(dir):
        shutil.rmtree(dir)
예제 #25
0
    def _create_disk_img(self, img_path, size):
        """
        Creates a disk image using dd.

        @param img_path: Path to the desired image file.
        @param size: Size of the desired image in Bytes.
        @returns: Path of the image created.
        """
        logging.debug('Creating disk image %s, size = %d Bytes', img_path, size)
        try:
            cmd = 'dd if=/dev/zero of=%s bs=1024 count=%d' % (img_path, size)
            utils.run(cmd)
        except error.CmdError, e:
            e_msg = 'Error creating disk image %s: %s' % (img_path, e)
            raise error.AutotestError(e_msg)
예제 #26
0
 def run(self, logdir):
     env = os.environ.copy()
     if "PATH" not in env:
         env["PATH"] = "/usr/bin:/bin"
     logf_path = os.path.join(logdir, self.logf)
     stdin = open(os.devnull, "r")
     stderr = open(os.devnull, "w")
     stdout = open(logf_path, "w")
     try:
         subprocess.call(self.cmd, stdin=stdin, stdout=stdout, stderr=stderr, shell=True, env=env)
     finally:
         for f in (stdin, stdout, stderr):
             f.close()
         if self._compress_log and os.path.exists(logf_path):
             utils.run('gzip -9 "%s"' % logf_path, ignore_status=True, verbose=False)
예제 #27
0
 def close(self):
     error.context("Creating unattended install CD image %s" % self.path)
     f = open(os.path.join(self.mount, 'isolinux', 'isolinux.cfg'), 'w')
     f.write('default /isolinux/vmlinuz append initrd=/isolinux/initrd.img '
             '%s\n' % self.extra_params)
     f.close()
     m_cmd = ('mkisofs -o %s -b isolinux/isolinux.bin -c isolinux/boot.cat '
              '-no-emul-boot -boot-load-size 4 -boot-info-table -f -R -J '
              '-V -T %s' % (self.path, self.mount))
     utils.run(m_cmd)
     os.chmod(self.path, 0755)
     cleanup(self.mount)
     cleanup(self.source_cdrom)
     logging.debug("unattended install CD image %s successfully created",
                   self.path)
예제 #28
0
    def run_once(self):
        tests_fail = []
        tests_pass = []
        # Begin this cpu hotplug test big guru.
        os.chdir(self.srcdir)
        result_cmd = utils.run('./runtests.sh', stdout_tee=sys.stdout)
        for line in result_cmd.stdout.splitlines():
            match = re.findall('^([\w:\.]+)\s+([A-Z]+):(.*)$', line)
            if match:
                info = {}
                info['testname'] = match[0][0]
                info['status'] = match[0][1]
                info['reason'] = match[0][2]
                if info['status'] == 'FAIL':
                    logging.info("%s: %s -> %s",
                                 info['testname'], info['status'],
                                 info['reason'])
                    tests_fail.append(info)
                elif info['status'] == 'PASS':
                    logging.info("%s: %s -> %s",
                                 info['testname'], info['status'],
                                 info['reason'])
                    tests_pass.append(info)

        if tests_fail:
            raise error.TestFail("%d from %d tests FAIL" %
                                 (len(tests_fail),
                                  len(tests_pass) + len(tests_fail)))
        else:
            logging.info("All %d tests PASS" % len(tests_pass))
예제 #29
0
    def setup_url(self):
        """
        Download the vmlinuz and initrd.img from URL.
        """
        error.context("downloading vmlinuz and initrd.img from %s" % self.url)
        os.chdir(self.image_path)
        kernel_fetch_cmd = "wget -q %s/%s/%s" % (self.url, self.boot_path, os.path.basename(self.kernel))
        initrd_fetch_cmd = "wget -q %s/%s/%s" % (self.url, self.boot_path, os.path.basename(self.initrd))

        if os.path.exists(self.kernel):
            os.remove(self.kernel)
        if os.path.exists(self.initrd):
            os.remove(self.initrd)

        utils.run(kernel_fetch_cmd)
        utils.run(initrd_fetch_cmd)
예제 #30
0
 def _start_dhcp_server(self):
     utils.run("service dnsmasq stop")
     utils.run("dnsmasq --strict-order --bind-interfaces "
               "--listen-address %s.1 --dhcp-range %s.2,%s.254 "
               "--dhcp-lease-max=253 "
               "--dhcp-no-override "
               "--pid-file=/tmp/dnsmasq.pid "
               "--log-facility=/tmp/dnsmasq.log" %
               (self.subnet, self.subnet, self.subnet))
     self.dhcp_server_pid = None
     try:
         self.dhcp_server_pid = int(open('/tmp/dnsmasq.pid', 'r').read())
     except ValueError:
         raise PrivateBridgeError(self.brname)
     logging.debug("Started internal DHCP server with PID %s",
                   self.dhcp_server_pid)
예제 #31
0
    def run_once(self, size='800x600', hasty=False, min_score=None):
        dep = 'glmark2'
        dep_dir = os.path.join(self.autodir, 'deps', dep)
        self.job.install_pkg(dep, 'dep', dep_dir)

        glmark2 = os.path.join(self.autodir, 'deps/glmark2/glmark2')
        if not os.path.exists(glmark2):
            raise error.TestFail('Failed: Could not find test binary.')

        glmark2_data = os.path.join(self.autodir, 'deps/glmark2/data')

        options = []
        options.append('--data-path %s' % glmark2_data)
        options.append('--size %s' % size)
        options.append('--annotate')
        if hasty:
            options.append('-b :duration=0.2')
        else:
            options.append('-b :duration=2')
        cmd = glmark2 + ' ' + ' '.join(options)

        if os.environ.get('CROS_FACTORY'):
            from autotest_lib.client.cros import factory_setup_modules
            from cros.factory.test import ui
            ui.start_reposition_thread('^glmark')

        # TODO(ihf): Switch this test to use perf.PerfControl like
        #            graphics_GLBench once it is stable. crbug.com/344766.
        if not hasty:
            if not utils.wait_for_idle_cpu(60.0, 0.1):
                if not utils.wait_for_idle_cpu(20.0, 0.2):
                    raise error.TestFail('Failed: Could not get idle CPU.')
            if not utils.wait_for_cool_machine():
                raise error.TestFail('Failed: Could not get cold machine.')

        # In this test we are manually handling stderr, so expected=True.
        # Strangely autotest takes CmdError/CmdTimeoutError as warning only.
        try:
            result = utils.run(cmd,
                               stderr_is_expected=True,
                               stdout_tee=utils.TEE_TO_LOGS,
                               stderr_tee=utils.TEE_TO_LOGS)
        except error.CmdError:
            raise error.TestFail('Failed: CmdError running %s' % cmd)
        except error.CmdTimeoutError:
            raise error.TestFail('Failed: CmdTimeout running %s' % cmd)

        logging.info(result)
        for line in result.stderr.splitlines():
            if line.startswith('Error:'):
                # Line already starts with 'Error: ", not need to prepend.
                raise error.TestFail(line)

        # Numbers in hasty mode are not as reliable, so don't send them to
        # the dashboard etc.
        if not hasty:
            keyvals = {}
            score = None
            test_re = re.compile(GLMARK2_TEST_RE)
            for line in result.stdout.splitlines():
                match = test_re.match(line)
                if match:
                    test = '%s.%s' % (match.group('scene'),
                                      match.group('options'))
                    test = test.translate(description_table,
                                          description_delete)
                    frame_time = match.group('frametime')
                    keyvals[test] = frame_time
                    self.output_perf_value(description=test,
                                           value=frame_time,
                                           units='ms',
                                           higher_is_better=False)
                else:
                    # glmark2 output the final performance score as:
                    #  glmark2 Score: 530
                    match = re.findall(GLMARK2_SCORE_RE, line)
                    if match:
                        score = int(match[0])
            if score is None:
                raise error.TestFail('Failed: Unable to read benchmark score')
            # Output numbers for plotting by harness.
            logging.info('GLMark2 score: %d', score)
            if os.environ.get('CROS_FACTORY'):
                from autotest_lib.client.cros import factory_setup_modules
                from cros.factory.event_log import EventLog
                EventLog('graphics_GLMark2').Log('glmark2_score', score=score)
            keyvals['glmark2_score'] = score
            self.write_perf_keyval(keyvals)
            self.output_perf_value(description='Score',
                                   value=score,
                                   units='score',
                                   higher_is_better=True)

            if min_score is not None and score < min_score:
                raise error.TestFail(
                    'Failed: Benchmark score %d < %d (minimum score '
                    'requirement)' % (score, min_score))
예제 #32
0
def run_netperf(test, params, env):
    """
    Network stress test with netperf.

    1) Boot up a VM with multiple nics.
    2) Launch netserver on guest.
    3) Execute multiple netperf clients on host in parallel
       with different protocols.
    4) Output the test result.

    @param test: KVM test object.
    @param params: Dictionary with the test parameters.
    @param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    login_timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=login_timeout)
    session.close()
    session_serial = vm.wait_for_serial_login(timeout=login_timeout)

    netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2")
    setup_cmd = params.get("setup_cmd")

    firewall_flush = "iptables -F"
    session_serial.cmd_output(firewall_flush)
    try:
        utils.run("iptables -F")
    except:
        pass

    for i in params.get("netperf_files").split():
        vm.copy_files_to(os.path.join(netperf_dir, i), "/tmp")

    try:
        session_serial.cmd(firewall_flush)
    except aexpect.ShellError:
        logging.warning("Could not flush firewall rules on guest")

    session_serial.cmd(setup_cmd % "/tmp", timeout=200)
    session_serial.cmd(params.get("netserver_cmd") % "/tmp")

    if "tcpdump" in env and env["tcpdump"].is_alive():
        # Stop the background tcpdump process
        try:
            logging.debug("Stopping the background tcpdump")
            env["tcpdump"].close()
        except:
            pass

    def netperf(i=0):
        guest_ip = vm.get_address(i)
        logging.info("Netperf_%s: netserver %s" % (i, guest_ip))
        result_file = os.path.join(test.resultsdir,
                                   "output_%s_%s" % (test.iteration, i))
        list_fail = []
        result = open(result_file, "w")
        result.write("Netperf test results\n")

        for p in params.get("protocols").split():
            packet_size = params.get("packet_size", "1500")
            for size in packet_size.split():
                cmd = params.get("netperf_cmd") % (netperf_dir, p, guest_ip,
                                                   size)
                logging.info("Netperf_%s: protocol %s" % (i, p))
                try:
                    netperf_output = utils.system_output(cmd,
                                                         retain_output=True)
                    result.write("%s\n" % netperf_output)
                except:
                    logging.error("Test of protocol %s failed", p)
                    list_fail.append(p)

        result.close()
        if list_fail:
            raise error.TestFail("Some netperf tests failed: %s" %
                                 ", ".join(list_fail))

    try:
        logging.info("Setup and run netperf clients on host")
        utils.run(setup_cmd % netperf_dir)

        bg = []
        nic_num = len(params.get("nics").split())
        for i in range(nic_num):
            bg.append(virt_utils.Thread(netperf, (i, )))
            bg[i].start()

        completed = False
        while not completed:
            completed = True
            for b in bg:
                if b.isAlive():
                    completed = False
    finally:
        try:
            for b in bg:
                if b:
                    b.join()
        finally:
            session_serial.cmd_output("killall netserver")
예제 #33
0
파일: jumbo.py 프로젝트: ghat/honor7x
def run_jumbo(test, params, env):
    """
    Test the RX jumbo frame function of vnics:

    1) Boot the VM.
    2) Change the MTU of guest nics and host taps depending on the NIC model.
    3) Add the static ARP entry for guest NIC.
    4) Wait for the MTU ok.
    5) Verify the path MTU using ping.
    6) Ping the guest with large frames.
    7) Increment size ping.
    8) Flood ping the guest with large frames.
    9) Verify the path MTU.
    10) Recover the MTU.

    @param test: KVM test object.
    @param params: Dictionary with the test parameters.
    @param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
    mtu = params.get("mtu", "1500")
    flood_time = params.get("flood_time", "300")
    max_icmp_pkt_size = int(mtu) - 28

    ifname = vm.get_ifname(0)
    ip = vm.get_address(0)
    if ip is None:
        raise error.TestError("Could not get the IP address")

    try:
        # Environment preparation
        ethname = virt_test_utils.get_linux_ifname(session, vm.get_mac_address(0))

        logging.info("Changing the MTU of guest ...")
        guest_mtu_cmd = "ifconfig %s mtu %s" % (ethname , mtu)
        session.cmd(guest_mtu_cmd)

        logging.info("Chaning the MTU of host tap ...")
        host_mtu_cmd = "ifconfig %s mtu %s" % (ifname, mtu)
        utils.run(host_mtu_cmd)

        logging.info("Add a temporary static ARP entry ...")
        arp_add_cmd = "arp -s %s %s -i %s" % (ip, vm.get_mac_address(0), ifname)
        utils.run(arp_add_cmd)

        def is_mtu_ok():
            s, o = virt_test_utils.ping(ip, 1, interface=ifname,
                                       packetsize=max_icmp_pkt_size,
                                       hint="do", timeout=2)
            return s == 0

        def verify_mtu():
            logging.info("Verify the path MTU")
            s, o = virt_test_utils.ping(ip, 10, interface=ifname,
                                       packetsize=max_icmp_pkt_size,
                                       hint="do", timeout=15)
            if s != 0 :
                logging.error(o)
                raise error.TestFail("Path MTU is not as expected")
            if virt_test_utils.get_loss_ratio(o) != 0:
                logging.error(o)
                raise error.TestFail("Packet loss ratio during MTU "
                                     "verification is not zero")

        def flood_ping():
            logging.info("Flood with large frames")
            virt_test_utils.ping(ip, interface=ifname,
                                packetsize=max_icmp_pkt_size,
                                flood=True, timeout=float(flood_time))

        def large_frame_ping(count=100):
            logging.info("Large frame ping")
            s, o = virt_test_utils.ping(ip, count, interface=ifname,
                                       packetsize=max_icmp_pkt_size,
                                       timeout=float(count) * 2)
            ratio = virt_test_utils.get_loss_ratio(o)
            if ratio != 0:
                raise error.TestFail("Loss ratio of large frame ping is %s" %
                                     ratio)

        def size_increase_ping(step=random.randrange(90, 110)):
            logging.info("Size increase ping")
            for size in range(0, max_icmp_pkt_size + 1, step):
                logging.info("Ping %s with size %s", ip, size)
                s, o = virt_test_utils.ping(ip, 1, interface=ifname,
                                           packetsize=size,
                                           hint="do", timeout=1)
                if s != 0:
                    s, o = virt_test_utils.ping(ip, 10, interface=ifname,
                                               packetsize=size,
                                               adaptive=True, hint="do",
                                               timeout=20)

                    if virt_test_utils.get_loss_ratio(o) > int(params.get(
                                                      "fail_ratio", 50)):
                        raise error.TestFail("Ping loss ratio is greater "
                                             "than 50% for size %s" % size)

        logging.info("Waiting for the MTU to be OK")
        wait_mtu_ok = 10
        if not virt_utils.wait_for(is_mtu_ok, wait_mtu_ok, 0, 1):
            logging.debug(commands.getoutput("ifconfig -a"))
            raise error.TestError("MTU is not as expected even after %s "
                                  "seconds" % wait_mtu_ok)

        # Functional Test
        verify_mtu()
        large_frame_ping()
        size_increase_ping()

        # Stress test
        flood_ping()
        verify_mtu()

    finally:
        # Environment clean
        session.close()
        logging.info("Removing the temporary ARP entry")
        utils.run("arp -d %s -i %s" % (ip, ifname))
예제 #34
0
    def run_once(self,
                 filename=None,
                 file_size=FILE_SIZE,
                 chunk_size=CHUNK_SIZE,
                 trim_ratio=TRIM_RATIO):
        """
        Executes the test and logs the output.
        @param file_name:  file/disk name to test
                           default: spare partition of internal disk
        @param file_size:  size of data to test. default: 1GB
        @param chunk_size: size of chunk to calculate hash/trim. default: 64KB
        @param trim_ratio: list of ratio of file size to trim data
                           default: [0, 0.25, 0.5, 0.75, 1]
        """

        if not filename:
            self._diskname = utils.get_fixed_dst_drive()
            if self._diskname == utils.get_root_device():
                self._filename = utils.get_free_root_partition()
            else:
                self._filename = self._diskname
        else:
            self._filename = filename
            self._diskname = utils.get_disk_from_filename(filename)

        if file_size == 0:
            fulldisk = True
            file_size = utils.get_disk_size(self._filename)
            if file_size == 0:
                cmd = (
                    '%s seem to have 0 storage block. Is the media present?' %
                    filename)
                raise error.TestError(cmd)
        else:
            fulldisk = False

        # Make file size multiple of 4 * chunk size
        file_size -= file_size % (4 * chunk_size)

        if fulldisk:
            fio_file_size = 0
        else:
            fio_file_size = file_size

        logging.info('filename: %s, filesize: %d', self._filename, file_size)

        self._verify_trim_support(chunk_size)

        # Calculate hash value for zero'ed and one'ed data
        cmd = str('dd if=/dev/zero bs=%d count=1 | %s' %
                  (chunk_size, self.HASH_CMD))
        zero_hash = utils.run(cmd).stdout.strip()

        cmd = str("dd if=/dev/zero bs=%d count=1 | tr '\\0' '\\xff' | %s" %
                  (chunk_size, self.HASH_CMD))
        one_hash = utils.run(cmd).stdout.strip()

        trim_hash = ""

        # Write random data to disk
        chunk_count = file_size / chunk_size
        cmd = str('dd if=/dev/urandom of=%s bs=%d count=%d oflag=direct' %
                  (self._filename, chunk_size, chunk_count))
        utils.run(cmd)

        ref_hash = self._get_hash(chunk_count, chunk_size)

        # Check read speed/latency when reading real data.
        self.job.run_test('hardware_StorageFio',
                          disable_sysinfo=True,
                          filesize=fio_file_size,
                          requirements=[('4k_read_qd32', [])],
                          tag='before_trim')

        # Generate random order of chunk to trim
        trim_order = list(range(0, chunk_count))
        random.shuffle(trim_order)
        trim_status = [False] * chunk_count

        # Init stat variable
        data_verify_count = 0
        data_verify_match = 0
        trim_verify_count = 0
        trim_verify_zero = 0
        trim_verify_one = 0
        trim_verify_non_delete = 0
        trim_deterministic = True

        last_ratio = 0
        for ratio in trim_ratio:

            # Do trim
            begin_trim_chunk = int(last_ratio * chunk_count)
            end_trim_chunk = int(ratio * chunk_count)
            fd = os.open(self._filename, os.O_RDWR, 0666)
            for chunk in trim_order[begin_trim_chunk:end_trim_chunk]:
                self._do_trim(fd, chunk * chunk_size, chunk_size)
                trim_status[chunk] = True
            os.close(fd)
            last_ratio = ratio

            cur_hash = self._get_hash(chunk_count, chunk_size)

            trim_verify_count += int(ratio * chunk_count)
            data_verify_count += chunk_count - int(ratio * chunk_count)

            # Verify hash
            for cur, ref, trim in zip(cur_hash, ref_hash, trim_status):
                if trim:
                    if not trim_hash:
                        trim_hash = cur
                    elif cur != trim_hash:
                        trim_deterministic = False

                    if cur == zero_hash:
                        trim_verify_zero += 1
                    elif cur == one_hash:
                        trim_verify_one += 1
                    elif cur == ref:
                        trim_verify_non_delete += 1
                else:
                    if cur == ref:
                        data_verify_match += 1

        keyval = dict()
        keyval['data_verify_count'] = data_verify_count
        keyval['data_verify_match'] = data_verify_match
        keyval['trim_verify_count'] = trim_verify_count
        keyval['trim_verify_zero'] = trim_verify_zero
        keyval['trim_verify_one'] = trim_verify_one
        keyval['trim_verify_non_delete'] = trim_verify_non_delete
        keyval['trim_deterministic'] = trim_deterministic
        self.write_perf_keyval(keyval)

        # Check read speed/latency when reading from trimmed data.
        self.job.run_test('hardware_StorageFio',
                          disable_sysinfo=True,
                          filesize=fio_file_size,
                          requirements=[('4k_read_qd32', [])],
                          tag='after_trim')

        if data_verify_match < data_verify_count:
            reason = 'Fail to verify untrimmed data.'
            msg = utils.get_storage_error_msg(self._diskname, reason)
            raise error.TestFail(msg)

        if trim_verify_zero < trim_verify_count:
            reason = 'Trimmed data are not zeroed.'
            msg = utils.get_storage_error_msg(self._diskname, reason)
            if utils.is_disk_scsi(self._diskname):
                if utils.verify_hdparm_feature(self._diskname,
                                               self.hdparm_rzat):
                    msg += ' Disk claim deterministic read zero after trim.'
                    raise error.TestFail(msg)
            raise error.TestNAError(msg)
예제 #35
0
    # To avoid problems, let's make the test use the current AUTODIR
    # (autotest client path) location
    autotest_path = os.environ['AUTODIR']

    # tar the contents of bindir/autotest
    cmd = "tar cvjf %s %s/*" % (compressed_autotest_path, autotest_path)
    # Until we have nested virtualization, we don't need the kvm test :)
    cmd += " --exclude=%s/tests/kvm" % autotest_path
    cmd += " --exclude=%s/results" % autotest_path
    cmd += " --exclude=%s/tmp" % autotest_path
    cmd += " --exclude=%s/control*" % autotest_path
    cmd += " --exclude=*.pyc"
    cmd += " --exclude=*.svn"
    cmd += " --exclude=*.git"
    utils.run(cmd)

    # Copy autotest.tar.bz2
    copy_if_hash_differs(vm, compressed_autotest_path,
                         compressed_autotest_path)

    # Extract autotest.tar.bz2
    extract(vm, compressed_autotest_path, "/")

    vm.copy_files_to(control_path, os.path.join(autotest_path, 'control'))

    # Run the test
    logging.info("Running autotest control file %s on guest, timeout %ss",
                 os.path.basename(control_path), timeout)
    session.cmd("cd %s" % autotest_path)
    try:
예제 #36
0
 def setup(self):
     logging.debug("Starting enospc setup")
     error.context("performing enospc setup")
     display_attributes(self)
     # Double check if there aren't any leftovers
     self.cleanup()
     try:
         utils.run("%s create -f raw %s 10G" %
                   (self.qemu_img_binary, self.raw_file_path))
         # Associate a loopback device with the raw file.
         # Subject to race conditions, that's why try here to associate
         # it with the raw file as quickly as possible
         l_result = utils.run("losetup -f")
         utils.run("losetup -f %s" % self.raw_file_path)
         self.loopback = l_result.stdout.strip()
         # Add the loopback device configured to the list of pvs
         # recognized by LVM
         utils.run("pvcreate %s" % self.loopback)
         utils.run("vgcreate %s %s" % (self.vgtest_name, self.loopback))
         # Create an lv inside the vg with starting size of 200M
         utils.run("lvcreate -L 200M -n %s %s" %
                   (self.lvtest_name, self.vgtest_name))
         # Create a 10GB qcow2 image in the logical volume
         utils.run("%s create -f qcow2 %s 10G" %
                   (self.qemu_img_binary, self.lvtest_device))
         # Let's symlink the logical volume with the image name that autotest
         # expects this device to have
         os.symlink(self.lvtest_device, self.qcow_file_path)
     except Exception, e:
         self.cleanup()
         raise
예제 #37
0
 def cleanup(self):
     error.context("performing enospc cleanup")
     if os.path.isfile(self.lvtest_device):
         utils.run("fuser -k %s" % self.lvtest_device)
         time.sleep(2)
     l_result = utils.run("lvdisplay")
     # Let's remove all volumes inside the volume group created
     if self.lvtest_name in l_result.stdout:
         utils.run("lvremove -f %s" % self.lvtest_device)
     # Now, removing the volume group itself
     v_result = utils.run("vgdisplay")
     if self.vgtest_name in v_result.stdout:
         utils.run("vgremove -f %s" % self.vgtest_name)
     # Now, if we can, let's remove the physical volume from lvm list
     if self.loopback:
         p_result = utils.run("pvdisplay")
         if self.loopback in p_result.stdout:
             utils.run("pvremove -f %s" % self.loopback)
     l_result = utils.run('losetup -a')
     if self.loopback and (self.loopback in l_result.stdout):
         try:
             utils.run("losetup -d %s" % self.loopback)
         except error.CmdError:
             logging.error("Failed to liberate loopback %s", self.loopback)
     if os.path.islink(self.qcow_file_path):
         os.remove(self.qcow_file_path)
     if os.path.isfile(self.raw_file_path):
         os.remove(self.raw_file_path)
예제 #38
0
def setup(top_dir):
    # The copy the gtest files from the SYSROOT to the client
    gtest = utils.run(os.environ['SYSROOT'] + '/usr/bin/gtest-config --libdir')
    os.chdir(os.environ['SYSROOT'] + '/' + gtest.stdout.rstrip())
    utils.run('cp libgtest* ' + top_dir)
예제 #39
0
 def stop_service(self):
     """
     Stops the NFS server.
     """
     utils.run(self.stop_cmd)
예제 #40
0
    def run(self,
            command,
            timeout=3600,
            ignore_status=False,
            stdout_tee=utils.TEE_TO_LOGS,
            stderr_tee=utils.TEE_TO_LOGS,
            connect_timeout=30,
            ssh_failure_retry_ok=False,
            options='',
            stdin=None,
            verbose=True,
            args=()):
        """Run a command on the servo host.

        Extends method `run` in SSHHost. If the servo host is a remote device,
        it will call `run` in SSHost without changing anything.
        If the servo host is 'localhost', it will call utils.system_output.

        @param command: The command line string.
        @param timeout: Time limit in seconds before attempting to
                        kill the running process. The run() function
                        will take a few seconds longer than 'timeout'
                        to complete if it has to kill the process.
        @param ignore_status: Do not raise an exception, no matter
                              what the exit code of the command is.
        @param stdout_tee/stderr_tee: Where to tee the stdout/stderr.
        @param connect_timeout: SSH connection timeout (in seconds)
                                Ignored if host is 'localhost'.
        @param options: String with additional ssh command options
                        Ignored if host is 'localhost'.
        @param ssh_failure_retry_ok: when True and ssh connection failure is
                                     suspected, OK to retry command (but not
                                     compulsory, and likely not needed here)
        @param stdin: Stdin to pass (a string) to the executed command.
        @param verbose: Log the commands.
        @param args: Sequence of strings to pass as arguments to command by
                     quoting them in " and escaping their contents if necessary.

        @returns: A utils.CmdResult object.

        @raises AutoservRunError if the command failed.
        @raises AutoservSSHTimeout SSH connection has timed out. Only applies
                when servo host is not 'localhost'.

        """
        run_args = {
            'command': command,
            'timeout': timeout,
            'ignore_status': ignore_status,
            'stdout_tee': stdout_tee,
            'stderr_tee': stderr_tee,
            'stdin': stdin,
            'verbose': verbose,
            'args': args
        }
        if self.is_localhost():
            if self._sudo_required:
                run_args['command'] = 'sudo -n sh -c "%s"' % utils.sh_escape(
                    command)
            try:
                return utils.run(**run_args)
            except error.CmdError as e:
                logging.error(e)
                raise error.AutoservRunError('command execution error',
                                             e.result_obj)
        else:
            run_args['connect_timeout'] = connect_timeout
            run_args['options'] = options
            return super(ServoHost, self).run(**run_args)
예제 #41
0
def check_image(params, root_dir):
    """
    Check an image using the appropriate tools for each virt backend.

    @param params: Dictionary containing the test parameters.
    @param root_dir: Base directory for relative filenames.

    @note: params should contain:
           image_name -- the name of the image file, without extension
           image_format -- the format of the image (qcow2, raw etc)

    @raise VMImageCheckError: In case qemu-img check fails on the image.
    """
    vm_type = params.get("vm_type")
    if vm_type == 'kvm':
        image_filename = get_image_filename(params, root_dir)
        logging.debug("Checking image file %s", image_filename)
        qemu_img_cmd = virt_utils.get_path(
            root_dir, params.get("qemu_img_binary", "qemu-img"))
        image_is_qcow2 = params.get("image_format") == 'qcow2'
        if os.path.exists(image_filename) and image_is_qcow2:
            # Verifying if qemu-img supports 'check'
            q_result = utils.run(qemu_img_cmd, ignore_status=True)
            q_output = q_result.stdout
            check_img = True
            if not "check" in q_output:
                logging.error("qemu-img does not support 'check', "
                              "skipping check")
                check_img = False
            if not "info" in q_output:
                logging.error("qemu-img does not support 'info', "
                              "skipping check")
                check_img = False
            if check_img:
                try:
                    utils.system("%s info %s" % (qemu_img_cmd, image_filename))
                except error.CmdError:
                    logging.error("Error getting info from image %s",
                                  image_filename)

                cmd_result = utils.run("%s check %s" %
                                       (qemu_img_cmd, image_filename),
                                       ignore_status=True)
                # Error check, large chances of a non-fatal problem.
                # There are chances that bad data was skipped though
                if cmd_result.exit_status == 1:
                    for e_line in cmd_result.stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    for e_line in cmd_result.stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    if params.get("backup_image_on_check_error",
                                  "no") == "yes":
                        backup_image(params, root_dir, 'backup', False)
                    raise error.TestWarn(
                        "qemu-img check error. Some bad data "
                        "in the image may have gone unnoticed")
                # Exit status 2 is data corruption for sure, so fail the test
                elif cmd_result.exit_status == 2:
                    for e_line in cmd_result.stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    for e_line in cmd_result.stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    if params.get("backup_image_on_check_error",
                                  "no") == "yes":
                        backup_image(params, root_dir, 'backup', False)
                    raise VMImageCheckError(image_filename)
                # Leaked clusters, they are known to be harmless to data
                # integrity
                elif cmd_result.exit_status == 3:
                    raise error.TestWarn("Leaked clusters were noticed during "
                                         "image check. No data integrity "
                                         "problem was found though.")

                # Just handle normal operation
                if params.get("backup_image", "no") == "yes":
                    backup_image(params, root_dir, 'backup', True)

        else:
            if not os.path.exists(image_filename):
                logging.debug("Image file %s not found, skipping check",
                              image_filename)
            elif not image_is_qcow2:
                logging.debug("Image file %s not qcow2, skipping check",
                              image_filename)
예제 #42
0
def run_file_transfer(test, params, env):
    """
    Test ethrnet device function by ethtool

    1) Boot up a VM.
    2) Create a large file by dd on host.
    3) Copy this file from host to guest.
    4) Copy this file from guest to host.
    5) Check if file transfers ended good.

    @param test: KVM test object.
    @param params: Dictionary with the test parameters.
    @param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    login_timeout = int(params.get("login_timeout", 360))

    session = vm.wait_for_login(timeout=login_timeout)

    dir_name = test.tmpdir
    transfer_timeout = int(params.get("transfer_timeout"))
    transfer_type = params.get("transfer_type")
    tmp_dir = params.get("tmp_dir", "/tmp/")
    clean_cmd = params.get("clean_cmd", "rm -f")
    filesize = int(params.get("filesize", 4000))
    count = int(filesize / 10)
    if count == 0:
        count = 1
    cmd = "dd if=/dev/zero of=%s/a.out bs=10M count=%d" % (dir_name,
                                                           count)
    guest_path = tmp_dir + "b.out"

    try:
        logging.info("Creating %dMB file on host", filesize)
        utils.run(cmd)

        if transfer_type == "remote":
            logging.info("Transfering file host -> guest, timeout: %ss",
                         transfer_timeout)
            t_begin = time.time()
            vm.copy_files_to("%s/a.out" % dir_name, guest_path,
                             timeout=transfer_timeout)
            t_end = time.time()
            throughput = filesize / (t_end - t_begin)
            logging.info("File transfer host -> guest succeed, "
                         "estimated throughput: %.2fMB/s", throughput)

            logging.info("Transfering file guest -> host, timeout: %ss",
                         transfer_timeout)
            t_begin = time.time()
            vm.copy_files_from(guest_path, "%s/c.out" % dir_name,
                               timeout=transfer_timeout)
            t_end = time.time()
            throughput = filesize / (t_end - t_begin)
            logging.info("File transfer guest -> host succeed, "
                         "estimated throughput: %.2fMB/s", throughput)
        else:
            raise error.TestError("Unknown test file transfer mode %s" %
                                  transfer_type)

        for f in ['a.out', 'c.out']:
            p = os.path.join(dir_name, f)
            size = os.path.getsize(p)
            logging.debug('Size of %s: %sB', f, size)

        md5_orig = utils.hash_file("%s/a.out" % dir_name, method="md5")
        md5_new = utils.hash_file("%s/c.out" % dir_name, method="md5")

        if md5_orig != md5_new:
            raise error.TestFail("File changed after transfer host -> guest "
                                 "and guest -> host")

    finally:
        logging.info('Cleaning temp file on guest')
        clean_cmd += " %s" % guest_path
        session.cmd(clean_cmd)
        logging.info('Cleaning temp files on host')
        try:
            os.remove('%s/a.out' % dir_name)
            os.remove('%s/c.out' % dir_name)
        except OSError:
            pass
        session.close()
예제 #43
0
 def find_exit():
     """Polling function for end of output."""
     interrupt_cmd = ('grep "interrupt to exit" %s | wc -l' %
                      self.fh.name)
     line_count = utils.run(interrupt_cmd).stdout.strip()
     return line_count != '0'
    def run_once(self, options='', hasty=False):
        dep = 'glbench'
        dep_dir = os.path.join(self.autodir, 'deps', dep)
        self.job.install_pkg(dep, 'dep', dep_dir)

        options += self.blacklist

        # Run the test, saving is optional and helps with debugging
        # and reference image management. If unknown images are
        # encountered one can take them from the outdir and copy
        # them (after verification) into the reference image dir.
        exefile = os.path.join(self.autodir, 'deps/glbench/glbench')
        outdir = self.outputdir
        options += ' -save -outdir=' + outdir
        # Using the -hasty option we run only a subset of tests without waiting
        # for thermals to normalize. Test should complete in 15-20 seconds.
        if hasty:
            options += ' -hasty'

        cmd = '%s %s' % (exefile, options)
        summary = None
        try:
            if hasty:
                # On BVT the test will not monitor thermals so we will not verify its
                # correct status using PerfControl
                summary = utils.run(cmd,
                                    stderr_is_expected=False,
                                    stdout_tee=utils.TEE_TO_LOGS,
                                    stderr_tee=utils.TEE_TO_LOGS).stdout
            else:
                self.report_temperature_critical('temperature_critical')
                self.report_temperature('temperature_1_start')
                # Wrap the test run inside of a PerfControl instance to make machine
                # behavior more consistent.
                with perf.PerfControl() as pc:
                    if not pc.verify_is_valid():
                        raise error.TestFail('Failed: %s' %
                                             pc.get_error_reason())
                    self.report_temperature('temperature_2_before_test')

                    # Run the test. If it gets the CPU too hot pc should notice.
                    summary = utils.run(cmd,
                                        stderr_is_expected=False,
                                        stdout_tee=utils.TEE_TO_LOGS,
                                        stderr_tee=utils.TEE_TO_LOGS).stdout
                    if not pc.verify_is_valid():
                        raise error.TestFail('Failed: %s' %
                                             pc.get_error_reason())
        except error.CmdError:
            raise error.TestFail('Failed: CmdError running %s' % cmd)
        except error.CmdTimeoutError:
            raise error.TestFail('Failed: CmdTimeout running %s' % cmd)

        # Write a copy of stdout to help debug failures.
        results_path = os.path.join(self.outputdir, 'summary.txt')
        f = open(results_path, 'w+')
        f.write('# ---------------------------------------------------\n')
        f.write('# [' + cmd + ']\n')
        f.write(summary)
        f.write('\n# -------------------------------------------------\n')
        f.write('# [graphics_GLBench.py postprocessing]\n')

        # Analyze the output. Sample:
        ## board_id: NVIDIA Corporation - Quadro FX 380/PCI/SSE2
        ## Running: ../glbench -save -outdir=img
        #swap_swap = 221.36 us [swap_swap.pixmd5-20dbc...f9c700d2f.png]
        results = summary.splitlines()
        if not results:
            f.close()
            raise error.TestFail(
                'Failed: No output from test. Check /tmp/' +
                'test_that_latest/graphics_GLBench/summary.txt' +
                ' for details.')

        # The good images, the silenced and the zombie/recurring failures.
        reference_imagenames = self.load_imagenames(self.reference_images_file)
        knownbad_imagenames = self.load_imagenames(self.knownbad_images_file)
        fixedbad_imagenames = self.load_imagenames(self.fixedbad_images_file)

        # Check if we saw GLBench end as expected (without crashing).
        test_ended_normal = False
        for line in results:
            if line.strip().startswith('@TEST_END'):
                test_ended_normal = True

        # Analyze individual test results in summary.
        keyvals = {}
        failed_tests = {}
        for line in results:
            if not line.strip().startswith('@RESULT: '):
                continue
            keyval, remainder = line[9:].split('[')
            key, val = keyval.split('=')
            testname = key.strip()
            score, unit = val.split()
            testrating = float(score)
            imagefile = remainder.split(']')[0]

            higher = self.unit_higher_is_better.get(unit)
            if higher is None:
                raise error.TestFail('Failed: Unknown test unit "%s" for %s' %
                                     (unit, testname))

            if not hasty:
                # Prepend unit to test name to maintain backwards compatibility with
                # existing per data.
                perf_value_name = '%s_%s' % (unit, testname)
                self.output_perf_value(description=perf_value_name,
                                       value=testrating,
                                       units=unit,
                                       higher_is_better=higher,
                                       graph=perf_value_name)
                # Add extra value to the graph distinguishing different boards.
                variant = utils.get_board_with_frequency_and_memory()
                desc = '%s-%s' % (perf_value_name, variant)
                self.output_perf_value(description=desc,
                                       value=testrating,
                                       units=unit,
                                       higher_is_better=higher,
                                       graph=perf_value_name)

            # Classify result image.
            if testrating == -1.0:
                # Tests that generate GL Errors.
                glerror = imagefile.split('=')[1]
                f.write('# GLError ' + glerror +
                        ' during test (perf set to -3.0)\n')
                keyvals[testname] = -3.0
                failed_tests[testname] = 'GLError'
            elif testrating == 0.0:
                # Tests for which glbench does not generate a meaningful perf score.
                f.write('# No score for test\n')
                keyvals[testname] = 0.0
            elif imagefile in fixedbad_imagenames:
                # We know the image looked bad at some point in time but we thought
                # it was fixed. Throw an exception as a reminder.
                keyvals[testname] = -2.0
                f.write('# fixedbad [' + imagefile +
                        '] (setting perf as -2.0)\n')
                failed_tests[testname] = imagefile
            elif imagefile in knownbad_imagenames:
                # We have triaged the failure and have filed a tracking bug.
                # Don't throw an exception and remind there is a problem.
                keyvals[testname] = -1.0
                f.write('# knownbad [' + imagefile +
                        '] (setting perf as -1.0)\n')
                # This failure is whitelisted so don't add to failed_tests.
            elif imagefile in reference_imagenames:
                # Known good reference images (default).
                keyvals[testname] = testrating
            elif imagefile == 'none':
                # Tests that do not write images can't fail because of them.
                keyvals[testname] = testrating
            elif self.is_no_checksum_test(testname):
                # TODO(ihf): these really should not write any images
                keyvals[testname] = testrating
            else:
                # Completely unknown images. Raise a failure.
                keyvals[testname] = -2.0
                failed_tests[testname] = imagefile
                f.write('# unknown [' + imagefile +
                        '] (setting perf as -2.0)\n')
        f.close()
        if not hasty:
            self.report_temperature('temperature_3_after_test')
            self.write_perf_keyval(keyvals)

        # Raise exception if images don't match.
        if failed_tests:
            logging.info('Some images are not matching their reference in %s.',
                         self.reference_images_file)
            logging.info('Please verify that the output images are correct '
                         'and if so copy them to the reference directory.')
            raise error.TestFail(
                'Failed: Some images are not matching their '
                'references. Check /tmp/'
                'test_that_latest/graphics_GLBench/summary.txt'
                ' for details.')

        if not test_ended_normal:
            raise error.TestFail(
                'Failed: No end marker. Presumed crash/missing images.')
예제 #45
0
 def cleanup(self):
     """Clean up tmp dirs created by the container."""
     # DeployConfigManager uses sudo to create some directories in the
     # container, so it's necessary to use sudo to clean up.
     utils.run('sudo rm -rf %s' % self.rootfs)
예제 #46
0
파일: npb.py 프로젝트: ghat/honor7x
    def run_once(self):
        """
        Run each benchmark twice, with different number of threads.

        A sanity check is made on each benchmark executed:
        The ratio between the times
        time_ratio = time_one_thrd / time_full_thrds

        Has to be contained inside an envelope:
        upper_bound = full_thrds * (1 + (1/n_cpus))
        lower_bound = full_thrds * (1 - (1/n_cpus))

        Otherwise, we throw an exception (this test might be running under a
        virtual machine and sanity check failure might mean bugs on smp
        implementation).
        """
        os.chdir(self.srcdir)

        # get the tests to run
        test_list = self.tests.split()

        if len(test_list) == 0:
            raise error.TestError('No tests (benchmarks) provided. Exit.')

        for itest in test_list:
            itest_cmd = os.path.join('NPB3.3-OMP/bin/', itest)
            try:
                itest = utils.run(itest_cmd)
            except:
                logging.error('NPB benchmark %s has failed. Output: %s',
                              itest_cmd, itest.stdout)
                self.n_fail += 1
            logging.debug(itest.stdout)

            # Get the number of threads that the test ran
            # (which is supposed to be equal to the number of system cores)
            m = re.search('Total threads\s*=\s*(.*)\n', itest.stdout)

            # Gather benchmark results
            ts = re.search('Time in seconds\s*=\s*(.*)\n', itest.stdout)
            mt = re.search('Mop/s total\s*=\s*(.*)\n', itest.stdout)
            mp = re.search('Mop/s/thread\s*=\s*(.*)\n', itest.stdout)

            time_seconds = float(ts.groups()[0])
            mops_total = float(mt.groups()[0])
            mops_per_thread = float(mp.groups()[0])

            logging.info('Test: %s', itest_cmd)
            logging.info('Time (s): %s', time_seconds)
            logging.info('Total operations executed (mops/s): %s', mops_total)
            logging.info('Total operations per thread (mops/s/thread): %s',
                         mops_per_thread)

            self.write_test_keyval({'test': itest_cmd})
            self.write_test_keyval({'time_seconds': time_seconds})
            self.write_test_keyval({'mops_total': mops_total})
            self.write_test_keyval({'mops_per_thread': mops_per_thread})

            # A little extra sanity check comes handy
            if int(m.groups()[0]) != utils.count_cpus():
                raise error.TestError("NPB test suite evaluated the number "
                                      "of threads incorrectly: System appears "
                                      "to have %s cores, but %s threads were "
                                      "executed.")

            # We will use this integer with float point vars later.
            full_thrds = float(m.groups()[0])

            # get duration for full_threads running.
            m = re.search('Time in seconds\s*=\s*(.*)\n', itest.stdout)
            time_full_thrds = float(m.groups()[0])

            # repeat the execution with single thread.
            itest_single_cmd = ''.join(['OMP_NUM_THREADS=1 ', itest_cmd])
            try:
                itest_single = utils.run(itest_single_cmd)
            except:
                logging.error(
                    'NPB benchmark single thread %s has failed. '
                    'Output: %s', itest_single_cmd, itest_single.stdout)
                self.n_fail += 1

            m = re.search('Time in seconds\s*=\s*(.*)\n', itest_single.stdout)
            time_one_thrd = float(m.groups()[0])

            # check durations
            ratio = self.ratio
            time_ratio = float(time_one_thrd / time_full_thrds)
            upper_bound = full_thrds * (1 + ratio)
            lower_bound = full_thrds * (1 - ratio)
            logging.debug('Time ratio for %s: %s', itest_cmd, time_ratio)
            logging.debug('Upper bound: %s', upper_bound)
            logging.debug('Lower bound: %s', lower_bound)

            violates_upper_bound = time_ratio > upper_bound
            violates_lower_bound = time_ratio < lower_bound
            if violates_upper_bound or violates_lower_bound:
                logging.error('NPB benchmark %s failed sanity check '
                              '- time ratio outside bounds' % itest_cmd)
                self.n_fail += 1
            else:
                logging.debug('NPB benchmark %s sanity check PASS' % itest_cmd)
예제 #47
0
    def run_tests_hasty(self, executable, test_cases):
        """Runs tests as quickly as possible.

        This function runs all the test cases, but does not isolate tests and
        may take shortcuts/not run all tests to provide maximum coverage at
        minumum runtime.

        @param executable: dEQP executable path.
        @param test_cases: List of dEQP test case strings.

        @return: dictionary of test results.
        """
        # TODO(ihf): It saves half the test time to use 32*32 but a few tests
        # fail as they need surfaces larger than 200*200.
        width = self._width
        height = self._height
        results = {}

        log_path = os.path.join(tempfile.gettempdir(),
                                '%s-logs' % self._filter)
        shutil.rmtree(log_path, ignore_errors=True)
        os.mkdir(log_path)

        # All tests combined less than 1h in hasty.
        batch_timeout = min(3600, self._timeout * self._hasty_batch_size)
        num_test_cases = len(test_cases)

        # We are dividing the number of tests into several shards but run them
        # in smaller batches. We start and end at multiples of batch_size
        # boundaries.
        shard_start = self._hasty_batch_size * (
            (self._shard_number *
             (num_test_cases / self._shard_count)) / self._hasty_batch_size)
        shard_end = self._hasty_batch_size * (
            ((self._shard_number + 1) *
             (num_test_cases / self._shard_count)) / self._hasty_batch_size)
        # The last shard will be slightly larger than the others. Extend it to
        # cover all test cases avoiding rounding problems with the integer
        # arithmetics done to compute shard_start and shard_end.
        if self._shard_number + 1 == self._shard_count:
            shard_end = num_test_cases

        for batch in xrange(shard_start, shard_end, self._hasty_batch_size):
            batch_to = min(batch + self._hasty_batch_size, shard_end)
            batch_cases = '\n'.join(test_cases[batch:batch_to])
            command = ('%s '
                       '--deqp-stdin-caselist '
                       '--deqp-surface-type=fbo '
                       '--deqp-log-images=disable '
                       '--deqp-visibility=hidden '
                       '--deqp-watchdog=enable '
                       '--deqp-surface-width=%d '
                       '--deqp-surface-height=%d ' %
                       (executable, width, height))

            log_file = os.path.join(log_path,
                                    '%s_hasty_%d.log' % (self._filter, batch))

            command += '--deqp-log-filename=' + log_file
            logging.info('Running tests %d...%d out of %d:\n%s\n%s', batch + 1,
                         batch_to, num_test_cases, command, batch_cases)

            try:
                utils.run(command,
                          timeout=batch_timeout,
                          stderr_is_expected=False,
                          ignore_status=False,
                          stdin=batch_cases,
                          stdout_tee=utils.TEE_TO_LOGS,
                          stderr_tee=utils.TEE_TO_LOGS)
            except Exception:
                pass
            # We are trying to handle all errors by parsing the log file.
            results = self._parse_test_results(log_file, results)
            logging.info(results)
        return results
예제 #48
0
 def start_service(self):
     """
     Starts the NFS server.
     """
     utils.run(self.start_cmd)
예제 #49
0
 def init_076_rbd_modprobe(self):
     for id_ in roles_of_type(self.my_roles, 'client'):
         if self.client_is_type(id_, 'rbd') and \
                 self.get_client_config(id_, 'rbd_kernel_mount'):
             utils.run('modprobe rbd')
             return
예제 #50
0
 def teardown_test_case(self, config):
     if os.path.exists(AUTHORIZED_KEYS_BACKUP):
         # Restore authorized_keys from backup.
         utils.run('mv -f ' + AUTHORIZED_KEYS_BACKUP + ' ' +
                   AUTHORIZED_KEYS,
                   ignore_status=True)
예제 #51
0
    def _find_device_ids(self, device_dir, input_type, name):
        """Find the fw_id and hw_id for the given device directory.

        Finding fw_id and hw_id applicable only for touchpads, touchscreens,
        and styluses.

        @param device_dir: the device directory.
        @param input_type: string of input type.
        @param name: string of input name.

        @returns: firmware id, hardware id

        """
        fw_id, hw_id = None, None

        if not device_dir or input_type not in [
                'touchpad', 'touchscreen', 'stylus'
        ]:
            return fw_id, hw_id
        if input_type == 'stylus':
            return self._find_device_ids_for_styluses(device_dir, name)

        # Touch devices with custom drivers usually save this info as a file.
        fw_filenames = ['fw_version', 'firmware_version', 'firmware_id']
        for fw_filename in fw_filenames:
            fw_path = os.path.join(device_dir, fw_filename)
            if os.path.exists(fw_path):
                if fw_id:
                    logging.warning(
                        'Found new potential fw_id when previous '
                        'value was %s!', fw_id)
                fw_id = self._get_contents_of_file(fw_path)

        hw_filenames = ['hw_version', 'product_id', 'board_id']
        for hw_filename in hw_filenames:
            hw_path = os.path.join(device_dir, hw_filename)
            if os.path.exists(hw_path):
                if hw_id:
                    logging.warning(
                        'Found new potential hw_id when previous '
                        'value was %s!', hw_id)
                hw_id = self._get_contents_of_file(hw_path)

        # Hw_ids for Weida and 2nd gen Synaptics are different.
        if not hw_id:
            id_folder = os.path.abspath(os.path.join(device_dir, '..', 'id'))
            product_path = os.path.join(id_folder, 'product')
            vendor_path = os.path.join(id_folder, 'vendor')

            if os.path.isfile(product_path):
                product = self._get_contents_of_file(product_path)
                if name.startswith('WD'):  # Weida ts, e.g. sumo
                    if os.path.isfile(vendor_path):
                        vendor = self._get_contents_of_file(vendor_path)
                        hw_id = vendor + product
                else:  # Synaptics tp or ts, e.g. heli, lulu, setzer
                    hw_id = product

        if not fw_id:
            # Fw_ids for 2nd gen Synaptics can only be found via rmi4update.
            # See if any /dev/hidraw* link to this device's input event.
            input_name = self._find_input_name(device_dir, name)
            hidraws = glob.glob('/dev/hidraw*')
            for hidraw in hidraws:
                class_folder = hidraw.replace('dev', 'sys/class/hidraw')
                input_folder_path = os.path.join(class_folder, 'device',
                                                 'input', input_name)
                if os.path.exists(input_folder_path):
                    fw_id = utils.run('rmi4update -p -d %s' % hidraw,
                                      ignore_status=True).stdout.strip()
                    if fw_id == '':
                        fw_id = None
                    break

        return fw_id, hw_id
예제 #52
0
 def cleanup(self, force_stop=False):
     error.context("Cleaning up test NFS share")
     utils.run("umount %s" % self.mnt_dir)
     utils.run("exportfs -u localhost:%s" % self.nfs_dir)
     if force_stop:
         self.stop_service()
예제 #53
0
def run_vmstop(test, params, env):
    """
    KVM guest stop test:
    1) Log into a guest
    2) Copy a file into guest
    3) Stop guest
    4) Check the status through monitor
    5) Check the session
    6) Migrat the vm to a file twice and compare them.

    @param test: kvm test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = float(params.get("login_timeout", 240))
    session = vm.wait_for_login(timeout=timeout)

    save_path = params.get("save_path", "/tmp")
    clean_save = params.get("clean_save") == "yes"
    save1 = os.path.join(save_path, "save1")
    save2 = os.path.join(save_path, "save2")

    guest_path = params.get("guest_path", "/tmp")
    file_size = params.get("file_size", "1000")

    try:
        utils.run("dd if=/dev/zero of=/tmp/file bs=1M count=%s" % file_size)
        # Transfer file from host to guest, we didn't expect the finish of
        # transfer, we just let it to be a kind of stress in guest.
        bg = kvm_utils.Thread(vm.copy_files_to, ("/tmp/file", guest_path),
                              dict(verbose=True, timeout=60))
        logging.info("Start the background transfer")
        bg.start()

        try:
            # wait for the transfer start
            time.sleep(5)
            logging.info("Stop the VM")
            vm.monitor.cmd("stop")

            # check with monitor
            logging.info("Check the status through monitor")
            if "paused" not in vm.monitor.info("status"):
                raise error.TestFail("Guest did not pause after sending stop")

            # check through session
            logging.info("Check the session")
            if session.is_responsive():
                raise error.TestFail("Session still alive after sending stop")

            # Check with the migration file
            logging.info("Save and check the state files")
            for p in [save1, save2]:
                vm.save_to_file(p)
                time.sleep(1)
                if not os.path.isfile(p):
                    raise error.TestFail("VM failed to save state file %s" % p)

            # Fail if we see deltas
            md5_save1 = utils.hash_file(save1)
            md5_save2 = utils.hash_file(save2)
            if md5_save1 != md5_save2:
                raise error.TestFail("The produced state files differ")
        finally:
            bg.join(suppress_exception=True)

    finally:
        session.close()
        if clean_save:
            logging.debug("Clean the state files")
            if os.path.isfile(save1):
                os.remove(save1)
            if os.path.isfile(save2):
                os.remove(save2)
        vm.monitor.cmd("cont")
예제 #54
0
 def restart_service(self):
     """
     Restarts the NFS server.
     """
     utils.run(self.restart_cmd)
예제 #55
0
    def run_once(self, check_link_speed=()):
        """
        Use rootdev to find the underlying block device even if the
        system booted to /dev/dm-0.
        """
        device = site_utils.get_root_device()

        def is_fixed(dev):
            """ Check the device is fixed.

            @param dev: device to check, i.e. 'sda'.
            """
            sysfs_path = '/sys/block/%s/removable' % dev
            return (os.path.exists(sysfs_path)
                    and open(sysfs_path).read().strip() == '0')

        # Catch device name like sda, mmcblk0, nvme0n1.
        device_re = re.compile(r'^/dev/([a-zA-Z0-9]+)$')
        dev = device_re.findall(device)
        if len(dev) != 1 or not is_fixed(dev[0]):
            raise error.TestFail('The main disk %s is not fixed' % dev)

        # If it is an mmcblk or nvme device, then it is SSD.
        # Else run hdparm to check for SSD.
        if re.search("nvme", device):
            return

        if re.search("mmcblk", device):
            return

        hdparm = utils.run('/sbin/hdparm -I %s' % device)

        # Check if device is a SSD
        match = re.search(r'Nominal Media Rotation Rate: (.+)$', hdparm.stdout,
                          re.MULTILINE)
        if match and match.group(1):
            if match.group(1) != 'Solid State Device':
                if utils.get_board() in self.boards_with_hdd:
                    return
                raise error.TestFail('The main disk is not a SSD, '
                                     'Rotation Rate: %s' % match.group(1))
        else:
            raise error.TestFail('Rotation Rate not reported from the device, '
                                 'unable to ensure it is a SSD')

        # Check if SSD is > 8GB in size
        match = re.search("device size with M = 1000\*1000: (.+) MBytes",
                          hdparm.stdout, re.MULTILINE)
        if match and match.group(1):
            size = int(match.group(1))
            self.write_perf_keyval({"mb_ssd_device_size": size})
        else:
            raise error.TestFail('Device size info missing from the device')

        # Check supported link speed.
        #
        # check_link_speed is an empty tuple by default, which does not perform
        # link speed checking.  You can run the test while specifying
        # check_link_speed=('1.5Gb/s', '3.0Gb/s') to check the 2 signaling
        # speeds are both supported.
        for link_speed in check_link_speed:
            if not re.search(r'Gen. signaling speed \(%s\)' % link_speed,
                             hdparm.stdout, re.MULTILINE):
                raise error.TestFail('Link speed %s not supported' %
                                     link_speed)
 def cleanup(self):
     """Unmount ram disk."""
     utils.run('umount %s' % self._RAMDISK)
     super(power_VideoPlayback, self).cleanup()
    container_id = lxc.ContainerId.create(TEST_JOB_ID)
    container = setup_test(bucket, container_id, options.skip_cleanup)
    test_share(container)
    test_autoserv(container)
    if options.dut:
        test_ssh(container, options.dut)
    if options.devserver:
        test_ssh(container, options.devserver)
    # Packages are installed in TEST_SCRIPT, verify the packages are installed.
    test_package_install(container)
    logging.info('All tests passed.')


if __name__ == '__main__':
    options = parse_options()
    try:
        main(options)
    except:
        # If the cleanup code below raises additional errors, they obfuscate the
        # actual error in the test.  Highlight the error to aid in debugging.
        logging.exception('ERROR:\n%s', error.format_error())
        raise
    finally:
        if not options.skip_cleanup:
            logging.info('Cleaning up temporary directory %s.', TEMP_DIR)
            try:
                lxc.ContainerBucket(TEMP_DIR).destroy_all()
            finally:
                utils.run('sudo rm -rf "%s"' % TEMP_DIR)
예제 #58
0
 def _kill_host_programs(kill_stress_cmd, kill_monitor_cmd):
     logging.info("Kill stress and monitor on host")
     utils.run(kill_stress_cmd, ignore_status=True)
     utils.run(kill_monitor_cmd, ignore_status=True)
예제 #59
0
            (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
                                               (mem / 200 * 50 * perf_ratio))
        logging.debug(virt_test_utils.get_memory_info([vm]))
        logging.info("Phase 2g: PASS")

        logging.debug("Cleaning up...")
        for i in range(0, max_alloc):
            lsessions[i].cmd_output("die()", 20)
        session.close()
        vm.destroy(gracefully=False)

    # Main test code
    logging.info("Starting phase 0: Initialization")
    new_ksm = False
    if (os.path.exists("/sys/kernel/mm/ksm/run")):
        utils.run("echo 50 > /sys/kernel/mm/ksm/sleep_millisecs")
        utils.run("echo 5000 > /sys/kernel/mm/ksm/pages_to_scan")
        utils.run("echo 1 > /sys/kernel/mm/ksm/run")

        e_up = "/sys/kernel/mm/transparent_hugepage/enabled"
        e_rh = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
        if os.path.exists(e_up):
            utils.run("echo 'never' > %s" % e_up)
        if os.path.exists(e_rh):
            utils.run("echo 'never' > %s" % e_rh)
        new_ksm = True
    else:
        try:
            utils.run("modprobe ksm")
            utils.run("ksmctl start 5000 100")
        except error.CmdError, e:
예제 #60
0
    def run_once(self, iteration=1, dev=''):
        """
        Read S.M.A.R.T attribute from target device

        @param dev:    target device
        """
        if dev == '':
            logging.info('Run rootdev to determine boot device')
            dev = utils.get_root_device()

        logging.info(str('dev: %s' % dev))

        # Skip this test if dev is an eMMC device without raising an error
        if re.match('.*mmc.*', dev):
            logging.info('Target device is an eMMC device. Skip testing')
            self.write_perf_keyval({'device_model': 'eMMC'})
            return

        last_result = ''

        # run multiple time to test the firmware part that retrieve SMART value
        for loop in range(1, iteration + 1):
            cmd = 'smartctl -a -f brief %s' % dev
            result = utils.run(cmd, ignore_status=True)
            exit_status = result.exit_status
            result_text = result.stdout
            result_lines = result_text.split('\n')

            # log all line if line count is different
            # otherwise log only changed line
            if result_text != last_result:
                logging.info(str('Iteration #%d' % loop))
                last_result_lines = last_result.split('\n')
                if len(last_result_lines) != len(result_lines):
                    for line in result_lines:
                        logging.info(line)
                else:
                    for i, line in enumerate(result_lines):
                        if line != last_result_lines[i]:
                            logging.info(line)
                last_result = result_text

            # Ignore error other than first two bits
            if exit_status & 0x3:
                # Error message should be in 4th line of the output
                msg = 'Test failed with error: %s' % result_lines[3]
                raise error.TestFail(msg)

        logging.info(str('smartctl exit status: 0x%x' % exit_status))

        # find drive model
        lookup_table = {}
        pattern = re.compile(self._SMARTCTL_DEVICE_MODEL_PATTERN)
        for line in result_lines:
            if pattern.match(line):
                model = pattern.match(line).group('model')
                for known_model in self._SMARTCTL_LOOKUP_TABLE:
                    if model.startswith(known_model):
                        lookup_table = self._SMARTCTL_LOOKUP_TABLE[known_model]
                        break
                break
        else:
            raise error.TestFail('Can not find drive model')

        # Example of smart ctl result
        # ID# ATTRIBUTE_NAME          FLAGS    VALUE WORST THRESH FAIL RAW_VALUE
        #  12 Power_Cycle_Count       -O----   100   100   000    -    204
        # use flag field to find a valid line
        pattern = re.compile(self._SMARTCTL_RESULT_PATTERN)
        keyval = {}
        fail = []
        for line in result_lines:
            if not pattern.match(line):
                continue
            field = line.split()

            id = int(field[0])
            if id in lookup_table:
                # look up table overwrite smartctl name
                key = lookup_table[id]
            else:
                key = field[1]  # ATTRIBUTE_NAME
                if key == 'Unknown_Attribute':
                    key = "Smart_Attribute_ID_%d" % id

            keyval[key] = field[7]  # RAW_VALUE

            # check for failing attribute
            if field[6] != '-':
                fail += [key]

        if len(keyval) == 0:
            raise error.TestFail(
                'Test failed with error: Can not parse smartctl keyval')

        if len(fail) > 0:
            keyval['fail'] = fail

        keyval['exit_status'] = exit_status
        keyval['device_model'] = model
        self.write_perf_keyval(keyval)