Example #1
0
    def unmount_force(self):
        """
        Kill all other jobs accessing this partition. Use fuser and ps to find
        all mounts on this mountpoint and unmount them.

        @return: true for success or false for any errors
        """

        logging.debug("Standard umount failed, will try forcing. Users:")
        try:
            cmd = 'fuser ' + self.get_mountpoint()
            logging.debug(cmd)
            fuser = utils.system_output(cmd)
            logging.debug(fuser)
            users = re.sub('.*:', '', fuser).split()
            for user in users:
                m = re.match('(\d+)(.*)', user)
                (pid, usage) = (m.group(1), m.group(2))
                try:
                    ps = utils.system_output('ps -p %s | sed 1d' % pid)
                    logging.debug('%s %s %s' % (usage, pid, ps))
                except Exception:
                    pass
                utils.system('ls -l ' + self.device)
                umount_cmd = "umount -f " + self.device
                utils.system(umount_cmd)
                return True
        except error.CmdError:
            logging.debug('Umount_force failed for %s' % self.device)
            return False
Example #2
0
def get_git_branch(repository, branch, srcdir, commit=None, lbranch=None):
    """
    Retrieves a given git code repository.

    @param repository: Git repository URL
    """
    logging.info("Fetching git [REP '%s' BRANCH '%s' TAG '%s'] -> %s",
                 repository, branch, commit, srcdir)
    if not os.path.exists(srcdir):
        os.makedirs(srcdir)
    os.chdir(srcdir)

    if os.path.exists(".git"):
        utils.system("git reset --hard")
    else:
        utils.system("git init")

    if not lbranch:
        lbranch = branch

    utils.system("git fetch -q -f -u -t %s %s:%s" %
                 (repository, branch, lbranch))
    utils.system("git checkout %s" % lbranch)
    if commit:
        utils.system("git checkout %s" % commit)

    h = utils.system_output('git log --pretty=format:"%H" -1')
    desc = utils.system_output("git describe")
    logging.info("Commit hash for %s is %s (%s)" % (repository, h.strip(),
                                                    desc))
    return srcdir
Example #3
0
    def execute(self, iterations = 1, workfile = 'workfile.short',
                    start = 1, end = 10, increment = 2,
                    extra_args = '', tmpdir = None):
        if not tmpdir:
            tmpdir = self.tmpdir

        # -f workfile
        # -s <number of users to start with>
        # -e <number of users to end with>
        # -i <number of users to increment>
        workfile = os.path.join('data', workfile)
        args = "-f %s -s %d -e %d -i %d" % (workfile, start, end, increment)
        config = os.path.join(self.srcdir, 'reaim.config')
        utils.system('cp -f %s/reaim.config %s' % (self.bindir, config))
        args += ' -c ./reaim.config'
        open(config, 'a+').write("DISKDIR %s\n" % tmpdir)
        os.chdir(self.srcdir)
        cmd = self.ldlib + ' ./reaim ' + args + ' ' + extra_args

        results = []

        profilers = self.job.profilers
        if not profilers.only():
            for i in range(iterations):
                results.append(utils.system_output(cmd, retain_output=True))

        # Do a profiling run if necessary
        if profilers.present():
            profilers.start(self)
            results.append(utils.system_output(cmd, retain_output=True))
            profilers.stop(self)
            profilers.report(self)

        self.__format_results("\n".join(results))
Example #4
0
def convert(package, destination_format):
    """\
    Convert packages with the 'alien' utility. If alien is not installed, it
    throws a NotImplementedError exception.
    returns: filename of the package generated.
    """
    try:
        os_dep.command('alien')
    except Exception:
        e_msg = 'Cannot convert to %s, alien not installed' % destination_format
        raise error.TestError(e_msg)

    # alien supports converting to many formats, but its interesting to map
    # convertions only for the implemented package types.
    if destination_format == 'dpkg':
        deb_pattern = re.compile('[A-Za-z0-9_.-]*[.][d][e][b]')
        conv_output = utils.system_output('alien --to-deb %s 2>/dev/null'
                                          % package)
        converted_package = re.findall(deb_pattern, conv_output)[0]
    elif destination_format == 'rpm':
        rpm_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
        conv_output = utils.system_output('alien --to-rpm %s 2>/dev/null'
                                          % package)
        converted_package = re.findall(rpm_pattern, conv_output)[0]
    else:
        e_msg = 'Convertion to format %s not implemented' % destination_format
        raise NotImplementedError(e_msg)

    print 'Package %s successfuly converted to %s' % \
            (os.path.basename(package), os.path.basename(converted_package))
    return os.path.abspath(converted_package)
Example #5
0
      def run_once(self):
          logging.info("In run_once function.. ")
          logging.info("create pool of version 1.. ")
          disks = libzfs_common.get_free_disks()
          version = "1"
          status = libzpool.create(TESTPOOL, disks[0])
          if status != SUCCESS:
             raise error.TestFail("zpool create failed.. ")

          logging.info("create zvol on pool..")
          status = libzfs.create_zvol(TESTPOOL, "2G", TESTVOL)
          if status != SUCCESS:
             raise error.TestFail("cannot create zvol..")

          list_args = ["", "create -V", "create -V " + TESTPOOL,
                       "create -V " + TESTPOOL + "/" + TESTVOL + "@",
                       "create -V blah", "destroy"]

          logging.info("Try each ZFS volume sub-command without parameters to\
make sure it returns an error.")
          for i in list_args:
             try:
                status = SUCCESS
                utils.system_output("zfs " + i)
             except:
                status = FAIL
             if status == FAIL:
                logging.info("Badly formed ZFS volume sub-commands fail as expected.")
             else:
                raise error.TestFail("ZFS volume sub-commands succeeded unexpectedly.")
def pool_in_cache(pool, file):
    try:
       status = SUCCESS
       utils.system_output("strings " + file + " | grep -w " + pool)
    except:
       status = FAIL
    return status
Example #7
0
def pool_exists(pool):
    if pool == "":
       raise error.TestFail("Missing pool name..")
    try:
       status = SUCCESS
       utils.system_output("zpool list -H " + pool)
    except:
       status = FAIL
    return status
Example #8
0
def list_mount_devices():
    devices = []
    # list mounted filesystems
    for line in utils.system_output('mount').splitlines():
        devices.append(line.split()[0])
    # list mounted swap devices
    for line in utils.system_output('swapon -s').splitlines():
        if line.startswith('/'):        # skip header line
            devices.append(line.split()[0])
    return devices
Example #9
0
    def run_once(self, dev="", devices="", extra_args='', tmpdir=None):
        # @dev: The device against which the trace will be replayed.
        #       e.g. "sdb" or "md_d1"
        # @devices: A space-separated list of the underlying devices
        #    which make up dev, e.g. "sdb sdc". You only need to set
        #    devices if dev is an MD, LVM, or similar device;
        #    otherwise leave it as an empty string.

        if not tmpdir:
            tmpdir = self.tmpdir

        os.chdir(self.srcdir)

        alldevs = "-d /dev/" + dev
        alldnames = dev
        for d in devices.split():
            alldevs += " -d /dev/" + d
            alldnames += " " + d

        # convert the trace (assumed to be in this test's base
        # directory) into btreplay's required format
        #
        # TODO: The test currently halts here as there is no trace in the
        # test's base directory.
        cmd = "./btreplay/btrecord -d .. -D %s %s" % (tmpdir, dev)
        self.results.append(utils.system_output(cmd, retain_output=True))

        # time a replay that omits "thinktime" between requests
        # (by use of the -N flag)
        cmd = self.ldlib + " /usr/bin/time ./btreplay/btreplay -d "+\
              tmpdir+" -N -W "+dev+" "+extra_args+" 2>&1"
        self.results.append(utils.system_output(cmd, retain_output=True))

        # trace a replay that reproduces inter-request delays, and
        # analyse the trace with btt to determine the average request
        # completion latency
        utils.system("./blktrace -D %s %s >/dev/null &" % (tmpdir, alldevs))
        cmd = self.ldlib + " ./btreplay/btreplay -d %s -W %s %s" %\
              (tmpdir, dev, extra_args)
        self.results.append(utils.system_output(cmd, retain_output=True))
        utils.system("killall -INT blktrace")

        # wait until blktrace is really done
        slept = 0.0
        while utils.system("ps -C blktrace > /dev/null",
                           ignore_status=True) == 0:
            time.sleep(0.1)
            slept += 0.1
            if slept > 30.0:
                utils.system("killall -9 blktrace")
                raise error.TestError("blktrace failed to exit in 30 seconds")
        utils.system("./blkparse -q -D %s -d %s/trace.bin -O %s >/dev/null" %
                     (tmpdir, tmpdir, alldnames))
        cmd = "./btt/btt -i %s/trace.bin" % tmpdir
        self.results.append(utils.system_output(cmd, retain_output=True))
Example #10
0
def _dpkg_info(dpkg_package):
    """\
    Private function that returns a dictionary with information about a
    dpkg package file
    - type: Package management program that handles the file
    - system_support: If the package management program is installed on the
    system or not
    - source: If it is a source (True) our binary (False) package
    - version: The package version (or name), that is used to check against the
    package manager if the package is installed
    - arch: The architecture for which a binary package was built
    - installed: Whether the package is installed (True) on the system or not
    (False)
    """
    # We will make good use of what the file command has to tell us about the
    # package :)
    file_result = utils.system_output('file ' + dpkg_package)
    package_info = {}
    package_info['type'] = 'dpkg'
    # There's no single debian source package as is the case
    # with RPM
    package_info['source'] = False
    try:
        os_dep.command('dpkg')
        # Build the command strings that will be used to get package info
        # a_cmd - Command to determine package architecture
        # v_cmd - Command to determine package version
        # i_cmd - Command to determiine if package is installed
        a_cmd = 'dpkg -f ' + dpkg_package + ' Architecture 2>/dev/null'
        v_cmd = 'dpkg -f ' + dpkg_package + ' Package 2>/dev/null'
        i_cmd = 'dpkg -s ' + utils.system_output(v_cmd) + ' 2>/dev/null'

        package_info['system_support'] = True
        package_info['version'] = utils.system_output(v_cmd)
        package_info['arch'] = utils.system_output(a_cmd)
        # Checking if package is installed
        package_status = utils.system_output(i_cmd, ignore_status=True)
        not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
        dpkg_not_installed = re.search(not_inst_pattern, package_status)
        if dpkg_not_installed:
            package_info['installed'] = False
        else:
            package_info['installed'] = True

    except Exception:
        package_info['system_support'] = False
        package_info['installed'] = False
        # The output of file is not as generous for dpkg files as
        # it is with rpm files
        package_info['arch'] = 'Not Available'
        package_info['version'] = 'Not Available'

    return package_info
Example #11
0
    def install(self, tag='autotest', install_vmlinux=True):
        self.installed_as = tag

        self.image = None
        self.initrd = ''
        for rpm_pack in self.rpm_package:
            rpm_name = utils.system_output('rpm -qp ' + rpm_pack)

            # install
            utils.system('rpm -i --force ' + rpm_pack)

            # get file list
            files = utils.system_output('rpm -ql ' + rpm_name).splitlines()

            # search for vmlinuz
            for file in files:
                if file.startswith('/boot/vmlinuz'):
                    self.full_version = file[len('/boot/vmlinuz-'):]
                    self.image = file
                    self.rpm_flavour = rpm_name.split('-')[1]

                    # get version and release number
                    self.version, self.release = utils.system_output(
                            'rpm --queryformat="%{VERSION}\\n%{RELEASE}\\n" -q '
                            + rpm_name).splitlines()[0:2]

                    # prefer /boot/kernel-version before /boot/kernel
                    if self.full_version:
                        break

            # search for initrd
            for file in files:
                if file.startswith('/boot/init'):
                    self.initrd = file
                    # prefer /boot/initrd-version before /boot/initrd
                    if len(file) > len('/boot/initrd'):
                        break

        if self.image == None:
            errmsg = "specified rpm file(s) don't contain /boot/vmlinuz"
            raise error.TestError(errmsg)

        # install vmlinux
        if install_vmlinux:
            for rpm_pack in self.rpm_package:
                vmlinux = utils.system_output(
                        'rpm -q -l -p %s | grep /boot/vmlinux' % rpm_pack)
            utils.system('cd /; rpm2cpio %s | cpio -imuv .%s 2>&1'
                         % (rpm_pack, vmlinux))
            if not os.path.exists(vmlinux):
                raise error.TestError('%s does not exist after installing %s'
                                      % (vmlinux, rpm_pack))
Example #12
0
 def check_installed(self, name):
     if os.path.isfile(name):
         n_cmd = (self.lowlevel_base_cmd + ' -f ' + name +
                  ' Package 2>/dev/null')
         name = utils.system_output(n_cmd)
     i_cmd = self.lowlevel_base_cmd + ' -s ' + name + ' 2>/dev/null'
     # Checking if package is installed
     package_status = utils.system_output(i_cmd, ignore_status=True)
     not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
     dpkg_not_installed = re.search(not_inst_pattern, package_status)
     if dpkg_not_installed:
         return False
     return True
Example #13
0
 def initialize(self):
     """
     Gets path of kvm_stat and verifies if debugfs needs to be mounted.
     """
     self.stat_path = os_dep.command('kvm_stat')
     try:
         utils.system_output("%s --batch" % self.stat_path)
     except error.CmdError, e:
         if 'debugfs' in str(e):
             utils.system('mount -t debugfs debugfs /sys/kernel/debug')
         else:
             raise error.AutotestError('kvm_stat failed due to an '
                                       'unknown reason: %s' % str(e))
Example #14
0
def set_property(pool, pr, val):
    if pool == "":
       raise error.TestFail("cannot set property : Missing pool name..")
    if pr == "":
       raise error.TestFail("cannot set property : Missing property name..")
    if val == "":
       raise error.TestFail("cannot set property : Missing property value..")
    try:
       status = SUCCESS
       utils.system_output("zpool set " + pr + "=" + val + " " + pool)
    except:
       status = FAIL
    return status
Example #15
0
    def mkfs(self, fstype=None, args='', record=True):
        """
        Format a partition to filesystem type

        @param fstype: the filesystem type, e.g.. "ext3", "ext2"
        @param args: arguments to be passed to mkfs command.
        @param record: if set, output result of mkfs operation to autotest
                output
        """

        if list_mount_devices().count(self.device):
            raise NameError('Attempted to format mounted device %s' %
                             self.device)

        if not fstype:
            if self.fstype:
                fstype = self.fstype
            else:
                fstype = 'ext2'

        if self.mkfs_flags:
            args += ' ' + self.mkfs_flags
        if fstype == 'xfs':
            args += ' -f'

        if self.loop:
            # BAH. Inconsistent mkfs syntax SUCKS.
            if fstype.startswith('ext'):
                args += ' -F'
            elif fstype == 'reiserfs':
                args += ' -f'

        # If there isn't already a '-t <type>' argument, add one.
        if not "-t" in args:
            args = "-t %s %s" % (fstype, args)

        args = args.strip()

        mkfs_cmd = "%s %s %s" % (self.mkfs_exec(fstype), args, self.device)

        sys.stdout.flush()
        try:
            # We throw away the output here - we only need it on error, in
            # which case it's in the exception
            utils.system_output("yes | %s" % mkfs_cmd)
        except error.CmdError, e:
            logging.error(e.result_obj)
            if record:
                self.job.record('FAIL', None, mkfs_cmd, error.format_error())
            raise
Example #16
0
    def get_sim_cps(self, est_size):
        '''
        Calculate the amount of simultaneous copies that can be uncompressed
        so that it will make the system swap.

            @param est_size: Estimated size of uncompressed linux tarball
        '''
        mem_str = utils.system_output('grep MemTotal /proc/meminfo')
        mem = int(re.search(r'\d+', mem_str).group(0))
        mem = int(mem / 1024)

        # The general idea here is that we'll make an amount of copies of the
        # kernel tree equal to 1.5 times the physical RAM, to make sure the
        # system swaps, therefore reading and writing stuff to the disk. The
        # DMA reads and writes together with the memory operations that will
        # make it more likely to reveal failures in the memory subsystem.
        sim_cps = (1.5 * mem) / est_size

        if (mem % est_size) >= (est_size / 2):
            sim_cps += 1

        if (mem / 32) < 1:
            sim_cps += 1

        return int(sim_cps)
Example #17
0
 def run_once(self, dir='.', nprocs=None, seconds=600, args=''):
     if not nprocs:
         nprocs = self.job.cpu_count()
     loadfile = os.path.join(self.srcdir, 'client.txt')
     cmd = '%s %s %s -D %s -c %s -t %d' % (self.dbench, nprocs, args,
                                           dir, loadfile, seconds)
     self.results = utils.system_output(cmd, retain_output=True)
Example #18
0
    def netperf(i=0):
        guest_ip = vm.get_address(i)
        logging.info("Netperf_%s: netserver %s" % (i, guest_ip))
        result_file = os.path.join(test.resultsdir, "output_%s_%s"
                                   % (test.iteration, i ))
        list_fail = []
        result = open(result_file, "w")
        result.write("Netperf test results\n")

        for p in params.get("protocols").split():
            packet_size = params.get("packet_size", "1500")
            for size in packet_size.split():
                cmd = params.get("netperf_cmd") % (netperf_dir, p,
                                                   guest_ip, size)
                logging.info("Netperf_%s: protocol %s" % (i, p))
                try:
                    netperf_output = utils.system_output(cmd,
                                                         retain_output=True)
                    result.write("%s\n" % netperf_output)
                except:
                    logging.error("Test of protocol %s failed", p)
                    list_fail.append(p)

        result.close()
        if list_fail:
            raise error.TestFail("Some netperf tests failed: %s" %
                                 ", ".join(list_fail))
Example #19
0
def list_all():
    """Returns a list with the names of all currently installed packages."""
    support_info = os_support()
    installed_packages = []

    if support_info['rpm']:
        installed_packages += utils.system_output('rpm -qa').splitlines()

    if support_info['dpkg']:
        raw_list = utils.system_output('dpkg -l').splitlines()[5:]
        for line in raw_list:
            parts = line.split()
            if parts[0] == "ii":  # only grab "installed" packages
                installed_packages.append("%s-%s" % (parts[1], parts[2]))

    return installed_packages
Example #20
0
    def provides(self, name):
        """
        Searches for what provides a given file.

        @param name: File path.
        """
        p_cmd = self.base_command + ' what-provides ' + name
        list_provides = []
        try:
            p_output = utils.system_output(p_cmd).split('\n')[4:]
            for line in p_output:
                line = [a.strip() for a in line.split('|')]
                try:
                    state, pname, type, version, arch, repository = line
                    if pname not in list_provides:
                        list_provides.append(pname)
                except IndexError:
                    pass
            if len(list_provides) > 1:
                logging.warning('More than one package found, '
                                'opting by the first queue result')
            if list_provides:
                logging.info("Package %s provides %s", list_provides[0], name)
                return list_provides[0]
            return None
        except Exception:
            return None
Example #21
0
    def _run_sub_test(self, test):
        os.chdir(self.srcdir)
        output = utils.system_output("./check %s" % test, ignore_status=True, retain_output=True)
        lines = output.split("\n")
        result_line = lines[-1]

        if self.NA_RE.match(result_line):
            detail_line = lines[-3]
            match = self.NA_DETAIL_RE.match(detail_line)
            if match is not None:
                error_msg = match.groups()[2]
            else:
                error_msg = "Test dependency failed, test not run"
            raise error.TestNAError(error_msg)

        elif self.FAILED_RE.match(result_line):
            raise error.TestError("Test error, check debug logs for complete " "test output")

        elif self.PASSED_RE.match(result_line):
            return

        else:
            raise error.TestError(
                "Could not assert test success or failure, " "assuming failure. Please check debug logs"
            )
Example #22
0
    def login(self, timeout=LOGIN_TIMEOUT):
        """
        Log into the hypervisor using required URIs .

        @timeout: Time in seconds that we will wait before giving up on logging
                into the host.
        @return: A ShellSession object.
        """
        if self.driver is None:
            uri = utils.system_output('%s uri' % self.virsh_exec)
        else:
            uri = "%s+ssh://%s@%s/system" % (self.driver, self.username,
                                             self.host)

        command = "%s --connect  %s" % (self.virsh_exec, uri)

        session = aexpect.ShellSession(command, linesep=self.linesep,
                                       prompt=self.prompt)

        if self.username is not None:
            try:
                virt_utils._remote_login(session, self.username, self.password,
                                         self.prompt, timeout)
            except aexpect.ShellError:
                session.close()
                session = None

        return session
Example #23
0
 def setup(self):
     brctl_output = utils.system_output("brctl show")
     if self.brname not in brctl_output:
         logging.debug("Configuring KVM test private bridge %s", self.brname)
         try:
             self._add_bridge()
         except:
             self._remove_bridge()
             raise
         try:
             self._bring_bridge_up()
         except:
             self._bring_bridge_down()
             self._remove_bridge()
             raise
         try:
             self._enable_nat()
         except:
             self._disable_nat()
             self._bring_bridge_down()
             self._remove_bridge()
             raise
         try:
             self._start_dhcp_server()
         except:
             self._stop_dhcp_server()
             self._disable_nat()
             self._bring_bridge_down()
             self._remove_bridge()
             raise
         self._verify_bridge()
Example #24
0
    def server_start(self, cpu_affinity):
        utils.system('killall netserver', ignore_status=True)
        cmd = self.server_prog
        if cpu_affinity:
            cmd = 'taskset %s %s' % (cpu_affinity, cmd)

        self.results.append(utils.system_output(cmd, retain_output=True))
Example #25
0
    def run_once(self, dir=None, extra_args='', user='******'):
        if not dir:
            dir = self.tmpdir

        args = '-d ' + dir + ' -u ' + user + ' ' + extra_args
        cmd = self.srcdir + '/bonnie++ ' + args

        self.results.append(utils.system_output(cmd, retain_output=True))
Example #26
0
    def run_once(self, test_name, fs_type):
        os.chdir(self.srcdir)

        if test_name == 'setup':
            return

        cmd = 'tests/run_one.sh -K -t %s -b 1000000 -D /mnt/image -l /mnt/lower -u /mnt/upper -f %s' % (test_name, fs_type)
        self.results = utils.system_output(cmd, retain_output=True)
Example #27
0
    def server_start(self):
        args = ''
        if self.udp:
            args += '-u '

        utils.system('killall -9 iperf', ignore_status=True)
        self.results.append(utils.system_output(self.server_path % args,
                                                retain_output=True))
Example #28
0
 def run_once(self, args=''):
     vars = 'UB_TMPDIR="%s" UB_RESULTDIR="%s"' % (self.tmpdir,
                                                  self.resultsdir)
     os.chdir(self.srcdir)
     self.report_data = utils.system_output(vars + ' ./Run ' + args)
     self.results_path = os.path.join(self.resultsdir,
                                      'raw_output_%s' % self.iteration)
     utils.open_write_close(self.results_path, self.report_data)
Example #29
0
    def run_once(self, test_name):
        scripts = os.path.join(self.srcdir, "scripts")
        os.chdir(scripts)

        if test_name == "setup":
            return

        cmd = "python ./%s -v" % test_name
        self.results = utils.system_output(cmd, retain_output=True)
Example #30
0
def get_property(pool, pr):
    if pool == "":
       raise error.TestFail("cannot get property : Missing pool name..")
    if pr == "":
       raise error.TestFail("cannot get property : Missing property name..")
    val = utils.system_output("zpool get " + pr + " " + pool + " | awk '/" + pr +
		               "/{print $3}'")
    val = re.sub('\n',"", val)
    return val
def is_adb_connected():
    """Return true if adb is connected to the container."""
    output = utils.system_output('adb get-state', ignore_status=True)
    logging.debug('adb get-state: %s', output)
    return output.strip() == 'device'
def get_obb_mounter_pid():
    """Returns the PID of the OBB mounter."""
    return utils.system_output('pgrep -f -u root ^/usr/bin/arc-obb-mounter')
Example #33
0
 def get_regulatory_domains(self):
     """Get the list or regulatory domains in the DUT's database."""
     return utils.system_output('regdbdump %s | grep country | '
                                'sed -e s/^country.// -e s/:.*//' %
                                self.REGULATORY_DATABASE).split()
 def _IpTables(self, command):
     # Run, log, return output
     return utils.system_output('%s %s' % (self.IPTABLES, command),
                                retain_output=True)
Example #35
0
 def _ps(self, proc=constants.BROWSER):
     pscmd = 'ps -C %s -o pid --no-header | head -1' % proc
     return utils.system_output(pscmd)
Example #36
0
 def get_log_contents(self):
     """Return the logfiles from the chroot."""
     return utils.system_output("head -10000 %s" %
                                self.chroot_path("var/log/*"))
Example #37
0
    def __make_libvirt_command(self, name=None, params=None, root_dir=None):
        """
        Generate a libvirt command line. All parameters are optional. If a
        parameter is not supplied, the corresponding value stored in the
        class attributes is used.

        @param name: The name of the object
        @param params: A dict containing VM params
        @param root_dir: Base directory for relative filenames

        @note: The params dict should contain:
               mem -- memory size in MBs
               cdrom -- ISO filename to use with the qemu -cdrom parameter
               extra_params -- a string to append to the qemu command
               shell_port -- port of the remote shell daemon on the guest
               (SSH, Telnet or the home-made Remote Shell Server)
               shell_client -- client program to use for connecting to the
               remote shell daemon on the guest (ssh, telnet or nc)
               x11_display -- if specified, the DISPLAY environment variable
               will be be set to this value for the qemu process (useful for
               SDL rendering)
               images -- a list of image object names, separated by spaces
               nics -- a list of NIC object names, separated by spaces

               For each image in images:
               drive_format -- string to pass as 'if' parameter for this
               image (e.g. ide, scsi)
               image_snapshot -- if yes, pass 'snapshot=on' to qemu for
               this image
               image_boot -- if yes, pass 'boot=on' to qemu for this image
               In addition, all parameters required by get_image_filename.

               For each NIC in nics:
               nic_model -- string to pass as 'model' parameter for this
               NIC (e.g. e1000)
        """

        # helper function for command line option wrappers
        def has_option(help, option):
            return bool(re.search(r"--%s" % option, help, re.MULTILINE))

        # Wrappers for all supported libvirt command line parameters.
        # This is meant to allow support for multiple libvirt versions.
        # Each of these functions receives the output of 'libvirt --help' as a
        # parameter, and should add the requested command line option
        # accordingly.

        def add_name(help, name):
            return " --name '%s'" % name

        def add_hvm_or_pv(help, hvm_or_pv):
            if hvm_or_pv == "hvm":
                return " --hvm --accelerate"
            elif hvm_or_pv == "pv":
                return " --paravirt"
            else:
                logging.warning("Unknown virt type hvm_or_pv, using default.")
                return ""

        def add_mem(help, mem):
            return " --ram=%s" % mem

        def add_check_cpu(help):
            if has_option(help, "check-cpu"):
                return " --check-cpu"
            else:
                return ""

        def add_smp(help, smp):
            return " --vcpu=%s" % smp

        def add_location(help, location):
            if has_option(help, "location"):
                return " --location %s" % location
            else:
                return ""

        def add_cdrom(help, filename, index=None):
            if has_option(help, "cdrom"):
                return " --cdrom %s" % filename
            else:
                return ""

        def add_pxe(help):
            if has_option(help, "pxe"):
                return " --pxe"
            else:
                return ""

        def add_drive(help,
                      filename,
                      pool=None,
                      vol=None,
                      device=None,
                      bus=None,
                      perms=None,
                      size=None,
                      sparse=False,
                      cache=None,
                      format=None):
            cmd = " --disk"
            if filename:
                cmd += " path=%s" % filename
            elif pool:
                if vol:
                    cmd += " vol=%s/%s" % (pool, vol)
                else:
                    cmd += " pool=%s" % pool
            if device:
                cmd += ",device=%s" % device
            if bus:
                cmd += ",bus=%s" % bus
            if perms:
                cmd += ",%s" % perms
            if size:
                cmd += ",size=%s" % size.rstrip("Gg")
            if sparse:
                cmd += ",sparse=false"
            if format:
                cmd += ",format=%s" % format
            return cmd

        def add_floppy(help, filename):
            return " --disk path=%s,device=floppy,ro" % filename

        def add_vnc(help, vnc_port):
            return " --vnc --vncport=%d" % (vnc_port)

        def add_vnclisten(help, vnclisten):
            return " --vnclisten=%s " % (vnclisten)

        def add_sdl(help):
            if has_option(help, "sdl"):
                return " --sdl"
            else:
                return ""

        def add_nographic(help):
            return " --nographics"

        def add_video(help, video_device):
            if has_option(help, "video"):
                return " --video=%s" % (video_device)
            else:
                return ""

        def add_uuid(help, uuid):
            if has_option(help, "uuid"):
                return " --uuid %s" % uuid
            else:
                return ""

        def add_os_type(help, os_type):
            if has_option(help, "os-type"):
                return " --os-type %s" % os_type
            else:
                return ""

        def add_os_variant(help, os_variant):
            if has_option(help, "os-variant"):
                return " --os-variant %s" % os_variant
            else:
                return ""

        def add_pcidevice(help, pci_device):
            if has_option(help, "host-device"):
                return " --host-device %s" % pci_device
            else:
                return ""

        def add_soundhw(help, sound_device):
            if has_option(help, "soundhw"):
                return " --soundhw %s" % sound_device
            else:
                return ""

        def add_serial(help, filename):
            if has_option(help, "serial"):
                return "  --serial file,path=%s --serial pty" % filename
            else:
                return ""

        def add_kernel_cmdline(help, cmdline):
            return " -append %s" % cmdline

        def add_connect_uri(help, uri):
            if has_option(help, "connect"):
                return " --connect=%s" % uri
            else:
                return ""

        # End of command line option wrappers

        if name is None:
            name = self.name
        if params is None:
            params = self.params
        if root_dir is None:
            root_dir = self.root_dir

        # Clone this VM using the new params
        vm = self.clone(name, params, root_dir, copy_state=True)

        virt_install_binary = virt_utils.get_path(
            root_dir, params.get("virt_install_binary", "virt-install"))

        help = utils.system_output("%s --help" % virt_install_binary)

        # Start constructing the qemu command
        virt_install_cmd = ""
        # Set the X11 display parameter if requested
        if params.get("x11_display"):
            virt_install_cmd += "DISPLAY=%s " % params.get("x11_display")
        # Add the qemu binary
        virt_install_cmd += virt_install_binary

        # set connect uri
        virt_install_cmd += add_connect_uri(help, self.connect_uri)

        # hvm or pv specificed by libvirt switch (pv used  by Xen only)
        hvm_or_pv = params.get("hvm_or_pv")
        if hvm_or_pv:
            virt_install_cmd += add_hvm_or_pv(help, hvm_or_pv)

        # Add the VM's name
        virt_install_cmd += add_name(help, name)

        mem = params.get("mem")
        if mem:
            virt_install_cmd += add_mem(help, mem)

        # TODO: should we do the check before we call ? negative case ?
        check_cpu = params.get("use_check_cpu")
        if check_cpu:
            virt_install_cmd += add_check_cpu(help)

        smp = params.get("smp")
        if smp:
            virt_install_cmd += add_smp(help, smp)

        # libvirt expects --location <path>/images/pxeboot/<vmlinuz|initrd>
        location = None
        if params.get("medium") == 'url':
            location = params.get('url')

        elif params.get("medium") == 'kernel_initrd':
            # directory location of kernel/initrd pair (directory layout must
            # be in format libvirt will recognize)
            location = params.get("image_dir")

        elif params.get("medium") == 'nfs':
            location = "nfs:%s:%s" % (params.get("nfs_server"),
                                      params.get("nfs_dir"))

        elif params.get("medium") == 'cdrom':
            if params.get("use_libvirt_cdrom_switch") == 'yes':
                virt_install_cmd += add_cdrom(help, params.get("cdrom_cd1"))
            elif ((self.driver_type == self.LIBVIRT_XEN)
                  and (params.get('hvm_or_pv') == 'hvm')):
                virt_install_cmd += add_cdrom(help,
                                              params.get("cdrom_unattended"))
            else:
                # Fake images/pxeboot using relative symlinks
                # Assumes kernel and initrd were copied to same dir
                # TODO: This and cooresponding add_cdrom() in unattended_install test
                #       should be much cleaner.
                location = os.path.dirname(params.get("kernel"))
                try:
                    os.symlink(".", os.path.join(location, "images"))
                    os.symlink(".", os.path.join(location, "pxeboot"))
                except OSError:
                    pass  # ignore if already exists

        if location:
            virt_install_cmd += add_location(help, location)

        if params.get("display") == "vnc":
            if params.get("vnc_port"):
                vm.vnc_port = int(params.get("vnc_port"))
            virt_install_cmd += add_vnc(help, vm.vnc_port)
            if params.get("vnclisten"):
                vm.vnclisten = params.get("vnclisten")
            virt_install_cmd += add_vnclisten(help, vm.vnclisten)
        elif params.get("display") == "sdl":
            virt_install_cmd += add_sdl(help)
        elif params.get("display") == "nographic":
            virt_install_cmd += add_nographic(help)

        video_device = params.get("video_device")
        if video_device:
            virt_install_cmd += add_video(help, video_device)

        sound_device = params.get("sound_device")
        if sound_device:
            virt_install_cmd += add_soundhw(help, sound_device)

        # if none is given a random UUID will be generated by libvirt
        if params.get("uuid"):
            virt_install_cmd += add_uuid(help, params.get("uuid"))

        # selectable OS type
        if params.get("use_os_type") == "yes":
            virt_install_cmd += add_os_type(help, params.get("os_type"))

        # selectable OS variant
        if params.get("use_os_variant") == "yes":
            virt_install_cmd += add_os_variant(help, params.get("os_variant"))

        # If the PCI assignment step went OK, add each one of the PCI assigned
        # devices to the command line.
        if self.pci_devices:
            for pci_id in self.pci_devices:
                virt_install_cmd += add_pcidevice(help, pci_id)

        for image_name in params.objects("images"):
            image_params = params.object_params(image_name)
            filename = virt_vm.get_image_filename(image_params, root_dir)
            if image_params.get("use_storage_pool") == "yes":
                filename = None
            if image_params.get("boot_drive") == "no":
                continue
            virt_install_cmd += add_drive(help, filename,
                                          image_params.get("image_pool"),
                                          image_params.get("image_vol"),
                                          image_params.get("image_device"),
                                          image_params.get("image_bus"),
                                          image_params.get("image_perms"),
                                          image_params.get("image_size"),
                                          image_params.get("drive_sparse"),
                                          image_params.get("drive_cache"),
                                          image_params.get("image_format"))

        if self.driver_type == self.LIBVIRT_QEMU:
            for cdrom in params.objects("cdroms"):
                cdrom_params = params.object_params(cdrom)
                iso = cdrom_params.get("cdrom")
                if params.get("use_libvirt_cdrom_switch") == 'yes':
                    # we don't want to skip the winutils iso
                    if not cdrom == 'winutils':
                        logging.debug(
                            "Using --cdrom instead of --disk for install")
                        logging.debug("Skipping CDROM:%s:%s", cdrom, iso)
                        continue
                if params.get("medium") == 'cdrom_no_kernel_initrd':
                    if iso == params.get("cdrom_cd1"):
                        logging.debug("Using cdrom or url for install")
                        logging.debug("Skipping CDROM: %s", iso)
                        continue

                if iso:
                    virt_install_cmd += add_drive(
                        help, virt_utils.get_path(root_dir, iso),
                        image_params.get("iso_image_pool"),
                        image_params.get("iso_image_vol"), 'cdrom', None, None,
                        None, None, None, None)

        # We may want to add {floppy_otps} parameter for -fda
        # {fat:floppy:}/path/. However vvfat is not usually recommended.
        floppy = params.get("floppy")
        if floppy:
            floppy = virt_utils.get_path(root_dir, floppy)
            virt_install_cmd += add_drive(help, floppy, None, None, 'floppy',
                                          None, None, None, None, None, None)

        # FIXME: for now in the pilot always add mac address to virt-install
        vlan = 0
        mac = vm.get_mac_address(vlan)
        if mac:
            virt_install_cmd += " --mac %s" % mac
            self.nic_mac = mac

        if self.driver_type == self.LIBVIRT_XEN:
            virt_install_cmd += (" --network=%s" % params.get("virsh_network"))
        elif self.driver_type == self.LIBVIRT_QEMU:
            virt_install_cmd += (
                " --network=%s,model=%s" %
                (params.get("virsh_network"), params.get("nic_model")))

        if params.get("use_no_reboot") == "yes":
            virt_install_cmd += " --noreboot"

        if params.get("use_autostart") == "yes":
            virt_install_cmd += " --autostart"

        if params.get("virt_install_debug") == "yes":
            virt_install_cmd += " --debug"

        # bz still open, not fully functional yet
        if params.get("use_virt_install_wait") == "yes":
            virt_install_cmd += (" --wait %s" %
                                 params.get("virt_install_wait_time"))

        kernel_params = params.get("kernel_params")
        if kernel_params:
            virt_install_cmd += " --extra-args '%s'" % kernel_params

        virt_install_cmd += " --noautoconsole"

        return virt_install_cmd
Example #38
0
 def _is_android_booted():
     output = utils.system_output(
         'android-sh -c "getprop sys.boot_completed"', ignore_status=True)
     return output.strip() == '1'
Example #39
0
            try:
                session.cmd_output(dd_cmd, timeout=360)
            except aexpect.ShellCmdError, e:
                return failure
        else:
            tcpdump_cmd += " and dst %s" % guest_ip
            copy_files_from = vm.copy_files_to
            try:
                utils.system(dd_cmd)
            except error.CmdError, e:
                return failure

        # only capture the new tcp port after offload setup
        original_tcp_ports = re.findall(
            "tcp.*:(\d+).*%s" % guest_ip,
            utils.system_output("/bin/netstat -nap"))
        for i in original_tcp_ports:
            tcpdump_cmd += " and not port %s" % i
        logging.debug("Listen using command: %s", tcpdump_cmd)
        session2.sendline(tcpdump_cmd)
        if not virt_utils.wait_for(
                lambda: session.cmd_status("pgrep tcpdump") == 0, 30):
            return (False, "Tcpdump process wasn't launched")

        logging.info("Start to transfer file")
        try:
            copy_files_from(filename, filename)
        except virt_utils.SCPError, e:
            return (False, "File transfer failed (%s)" % e)
        logging.info("Transfer file completed")
        session.cmd("killall tcpdump")
Example #40
0
 def run_once(self, dir = None, nprocs = None, args = ''):
     cmd = self.srcdir + '/tests/pfm_tests' + args
     # self.results.append(utils.system_output(cmd, retain_output=True))
     if 'FAIL' in utils.system_output(cmd, retain_output=True):
         raise error.TestError('some perfmon tests failed')
    def run_screenshot_comparison_test(self):
        """
        Template method to run screenshot comparison tests for ui pieces.

        1. Set up test dirs.
        2. Create folder name
        3. Download golden image.
        4. Capture test image.
        5. Compare images locally, if FAIL upload to remote for analysis later.
        6. Clean up test dirs.

        """

        img_comp_conf_path = os.path.join(ui_TestBase.AUTOTEST_CROS_UI_DIR,
                                          ui_TestBase.IMG_COMP_CONF_FILE)

        img_comp_factory = image_comparison_factory.ImageComparisonFactory(
            img_comp_conf_path)

        golden_image_local_dir = os.path.join(ui_TestBase.WORKING_DIR,
                                              'golden_images')

        file_utils.make_leaf_dir(golden_image_local_dir)

        filename = '%s.png' % self.tagged_testname

        golden_image_remote_path = os.path.join(
            ui_TestBase.REMOTE_DIR, 'ui',
            lsbrelease_utils.get_chrome_milestone(), self.folder_name,
            filename)

        golden_image_local_path = os.path.join(golden_image_local_dir,
                                               filename)

        test_image_filepath = os.path.join(ui_TestBase.WORKING_DIR, filename)

        try:
            file_utils.download_file(golden_image_remote_path,
                                     golden_image_local_path)
        except urllib2.HTTPError as e:
            warn = "No screenshot found for {0} on milestone {1}. ".format(
                self.tagged_testname, lsbrelease_utils.get_chrome_milestone())
            warn += e.msg
            raise error.TestWarn(warn)

        self.capture_screenshot(test_image_filepath)

        comparer = img_comp_factory.make_pdiff_comparer()
        comp_res = comparer.compare(golden_image_local_path,
                                    test_image_filepath)

        if comp_res.diff_pixel_count > img_comp_factory.pixel_thres:
            publisher = img_comp_factory.make_imagediff_publisher(
                self.resultsdir)

            # get chrome version
            version_string = utils.system_output(
                constants.CHROME_VERSION_COMMAND, ignore_status=True)
            version_string = utils.parse_chrome_version(version_string)[0]

            # tags for publishing
            tags = {
                'testname': self.tagged_testname,
                'chromeos_version': utils.get_chromeos_release_version(),
                'chrome_version': version_string,
                'board': utils.get_board(),
                'date': datetime.date.today().strftime("%m/%d/%y"),
                'diff_pixels': comp_res.diff_pixel_count
            }

            publisher.publish(golden_image_local_path, test_image_filepath,
                              comp_res.pdiff_image_path, tags)

            raise error.TestFail('Test Failed. Please see image comparison '
                                 'result by opening index.html from the '
                                 'results directory.')

        file_utils.rm_dir_if_exists(ui_TestBase.WORKING_DIR)
 def _ps(self, proc=constants.BROWSER):
     """Grab the oldest pid for process |proc|."""
     pscmd = 'ps -C %s -o pid --no-header | head -1' % proc
     return utils.system_output(pscmd)
    def run_once(self, run_time_sec=60):
        if run_time_sec < 10:
            raise error.TestFail('Must run for at least 10 seconds')

        with chrome.Chrome():
            # Audio loop time should be significantly shorter than
            # |run_time_sec| time, so that the total playback time doesn't
            # exceed it by much.
            audio_loop_time_sec = min(10, run_time_sec / 10 + 0.5)

            # Set a low audio volume to avoid annoying people during tests.
            audio_helper.set_volume_levels(10, 100)

            # Start a subprocess that uses dbus-monitor to listen for suspend
            # announcements from powerd and writes the output to a log.
            dbus_log_fd, dbus_log_name = tempfile.mkstemp()
            os.unlink(dbus_log_name)
            dbus_log = os.fdopen(dbus_log_fd)
            dbus_proc = subprocess.Popen(
                'dbus-monitor --monitor --system ' +
                '"type=\'signal\',interface=\'org.chromium.PowerManager\',' +
                'member=\'SuspendImminent\'"',
                shell=True,
                stdout=dbus_log)

            # Start playing audio file.
            self._enable_audio_playback = True
            thread = threading.Thread(target=self._play_audio,
                                      args=(audio_loop_time_sec, ))
            thread.start()

            # Restart powerd with timeouts for quick idle events.
            gap_ms = run_time_sec * 1000 / 4
            dim_ms = min(10000, gap_ms)
            off_ms = min(20000, gap_ms * 2)
            suspend_ms = min(30000, gap_ms * 3)
            prefs = {
                'disable_idle_suspend': 0,
                'ignore_external_policy': 1,
                'plugged_dim_ms': dim_ms,
                'plugged_off_ms': off_ms,
                'plugged_suspend_ms': suspend_ms,
                'unplugged_dim_ms': dim_ms,
                'unplugged_off_ms': off_ms,
                'unplugged_suspend_ms': suspend_ms
            }
            self._pref_change = power_utils.PowerPrefChanger(prefs)

            # Set an alarm to wake up the system in case the audio detector
            # fails and the system suspends.
            alarm_time = rtc.get_seconds() + run_time_sec
            rtc.set_wake_alarm(alarm_time)

            time.sleep(run_time_sec)

            # Stop powerd to avoid suspending when the audio stops.
            utils.system_output('stop powerd')

            # Stop audio and wait for the audio thread to terminate.
            self._enable_audio_playback = False
            thread.join(timeout=(audio_loop_time_sec * 2))
            if thread.is_alive():
                logging.error('Audio thread did not terminate at end of test.')

            # Check the D-Bus log to make sure that no suspend took place.
            # dbus-monitor logs messages about its initial connection to the bus
            # in addition to the signals that we asked it for, so look for the
            # signal name in its output.
            dbus_proc.kill()
            dbus_log.seek(0)
            if 'SuspendImminent' in dbus_log.read():
                err_str = 'System suspended while audio was playing.'
                raise error.TestFail(err_str)
    def run_once(self):
        with chrome.Chrome(logged_in=self._logged_in) as cr:
            username = (cr.username if self._logged_in
                                    else cryptohome.GUEST_USER_NAME)

            """Check permissions within cryptohome for anything too permissive.
            """
            passes = []

            homepath = "/home/chronos"
            passes.append(self.check_owner_mode(homepath, "chronos", 0755))

            user_mountpt = cryptohome.user_path(username)
            passes.append(self.check_owner_mode(user_mountpt, "chronos",
                                                self._HOMEDIR_MODE))

            # TODO(benchan): Refactor the following code to use some helper
            # functions instead of find commands.

            # An array of shell commands, each representing a test that
            # passes if it emits no output. The first test is the main one.
            # In general, writable by anyone else is bad, as is owned by
            # anyone else. Any exceptions to that are pruned out of the
            # first test and checked individually by subsequent tests.
            cmds = [
                ('find -L "%s" -path "%s" -o '
                 # Avoid false-positives on SingletonLock, SingletonCookie, etc.
                 ' \\( -name "Singleton*" -a -type l \\) -o '
                 ' -path "%s/user" -prune -o '
                 ' -path "%s/Downloads" -prune -o '
                 ' -path "%s/flimflam" -prune -o '
                 ' -path "%s/shill" -prune -o '
                 ' -path "%s/.chaps" -prune -o '
                 ' -path "%s/u-*" -prune -o '
                 ' -path "%s/crash" -prune -o '
                 ' \\( -perm /022 -o \\! -user chronos \\) -ls') %
                (homepath, homepath, homepath, user_mountpt, user_mountpt,
                user_mountpt, user_mountpt, homepath, homepath),
                # /home/chronos/user and /home/chronos/user/Downloads are owned
                # by the chronos-access group and with a group execute
                # permission.
                'find -L "%s" -maxdepth 0 \\( \\! -perm 710 '
                '-o \\! -user chronos -o \\! -group chronos-access \\) -ls' %
                user_mountpt,
                'find -L "%s/Downloads" -maxdepth 0 \\( \\! -perm 710 '
                '-o \\! -user chronos -o \\! -group chronos-access \\) -ls' %
                user_mountpt,
                'find -L "%s/flimflam" \\( -perm /077 -o \\! -user root \\) -ls'
                % user_mountpt,
                'find -L "%s/shill" \\( -perm /077 -o \\! -user root \\) -ls' %
                user_mountpt,
                'find -L "%s/.chaps -name auth_data_salt -prune -o '
                '\\! -user chaps -o \\! -group chronos-access -o -perm /027 -ls'
                % user_mountpt,
                'find -L "%s/.chaps -name auth_data_salt -a '
                '\\( \\! -user root -o -perm /077 \\) -ls' % user_mountpt,
            ]

            for cmd in cmds:
                cmd_output = utils.system_output(cmd, ignore_status=True)
                if cmd_output:
                    passes.append(False)
                    logging.error(cmd_output)

            # This next section only applies if we have a real vault mounted
            # (ie, not a BWSI tmpfs).
            if cryptohome.is_permanent_vault_mounted(username):
                # Also check the permissions of the underlying vault and
                # supporting directory structure.
                mountpath = cryptohome.get_mounted_vault_path(username)

                # On ecryptfs backend, there's a 'vault' directory storing the
                # encrypted data. If it exists, check its ownership as well.
                vaultpath = os.path.join(mountpath, '../vault')
                if os.path.exists(vaultpath):
                    passes.append(self.check_owner_mode(vaultpath,
                                                        "root", 0700))
                passes.append(self.check_owner_mode(mountpath, "root", 0700))
                passes.append(self.check_owner_mode(mountpath + "/../master.0",
                                                    "root", 0600))
                passes.append(self.check_owner_mode(mountpath + "/../",
                                                    "root", 0700))
                passes.append(self.check_owner_mode(mountpath + "/../../",
                                                    "root", 0700))

            if False in passes:
                raise error.TestFail(
                    'Bad permissions found on cryptohome files')
    def run_once(self, rootdir="/", args=[]):
        """
        Do a find for all the ELF files on the system.
        For each one, test for compiler options that should have been used
        when compiling the file.

        For missing compiler options, print the files.
        """

        parser = OptionParser()
        parser.add_option('--hardfp',
                          dest='enable_hardfp',
                          default=False,
                          action='store_true',
                          help='Whether to check for hardfp binaries.')
        (options, args) = parser.parse_args(args)

        option_sets = []

        libc_glob = "/lib/libc-[0-9]*"

        readelf_cmd = glob.glob("/usr/local/*/binutils-bin/*/readelf")[0]

        # We do not test binaries if they are built with Address Sanitizer
        # because it is a separate testing tool.
        no_asan_used = utils.system_output("%s -s "
                                           "/opt/google/chrome/chrome | "
                                           "egrep -q \"__asan_init\" || "
                                           "echo no ASAN" % readelf_cmd)
        if not no_asan_used:
            logging.debug("ASAN detected on /opt/google/chrome/chrome. "
                          "Will skip all checks.")
            return

        # Check that gold was used to build binaries.
        # TODO(jorgelo): re-enable this check once crbug.com/417912 is fixed.
        # gold_cmd = ("%s -S {} 2>&1 | "
        #             "egrep -q \".note.gnu.gold-ve\"" % readelf_cmd)
        # gold_find_options = ""
        # if utils.get_cpu_arch() == "arm":
        #     # gold is only enabled for Chrome on ARM.
        #     gold_find_options = "-path \"/opt/google/chrome/chrome\""
        # gold_whitelist = os.path.join(self.bindir, "gold_whitelist")
        # option_sets.append(self.create_and_filter("gold",
        #                                           gold_cmd,
        #                                           gold_whitelist,
        #                                           gold_find_options))

        # Verify non-static binaries have BIND_NOW in dynamic section.
        now_cmd = ("(%s {} | grep -q statically) ||"
                   "%s -d {} 2>&1 | "
                   "egrep -q \"BIND_NOW\"" % (FILE_CMD, readelf_cmd))
        now_whitelist = os.path.join(self.bindir, "now_whitelist")
        option_sets.append(self.create_and_filter("-Wl,-z,now",
                                                  now_cmd,
                                                  now_whitelist))

        # Verify non-static binaries have RELRO program header.
        relro_cmd = ("(%s {} | grep -q statically) ||"
                     "%s -l {} 2>&1 | "
                     "egrep -q \"GNU_RELRO\"" % (FILE_CMD, readelf_cmd))
        relro_whitelist = os.path.join(self.bindir, "relro_whitelist")
        option_sets.append(self.create_and_filter("-Wl,-z,relro",
                                                  relro_cmd,
                                                  relro_whitelist))

        # Verify non-static binaries are dynamic (built PIE).
        pie_cmd = ("(%s {} | grep -q statically) ||"
                   "%s -l {} 2>&1 | "
                   "egrep -q \"Elf file type is DYN\"" % (FILE_CMD,
                                                          readelf_cmd))
        pie_whitelist = os.path.join(self.bindir, "pie_whitelist")
        option_sets.append(self.create_and_filter("-fPIE",
                                                  pie_cmd,
                                                  pie_whitelist))

        # Verify ELFs don't include TEXTRELs.
        # FIXME: Remove the i?86 filter after the bug is fixed.
        # crbug.com/686926
        if (utils.get_current_kernel_arch() not in
                ('i%d86' % i for i in xrange(3,7))):
            textrel_cmd = ("(%s {} | grep -q statically) ||"
                           "%s -d {} 2>&1 | "
                           "(egrep -q \"0x0+16..TEXTREL\"; [ $? -ne 0 ])"
                           % (FILE_CMD, readelf_cmd))
            textrel_whitelist = os.path.join(self.bindir, "textrel_whitelist")
            option_sets.append(self.create_and_filter("TEXTREL",
                                                      textrel_cmd,
                                                      textrel_whitelist))

        # Verify all binaries have non-exec STACK program header.
        stack_cmd = ("%s -lW {} 2>&1 | "
                     "egrep -q \"GNU_STACK.*RW \"" % readelf_cmd)
        stack_whitelist = os.path.join(self.bindir, "stack_whitelist")
        option_sets.append(self.create_and_filter("Executable Stack",
                                                  stack_cmd,
                                                  stack_whitelist))

        # Verify all binaries have W^X LOAD program headers.
        loadwx_cmd = ("%s -lW {} 2>&1 | "
                      "grep \"LOAD\" | egrep -v \"(RW |R E)\" | "
                      "wc -l | grep -q \"^0$\"" % readelf_cmd)
        loadwx_whitelist = os.path.join(self.bindir, "loadwx_whitelist")
        option_sets.append(self.create_and_filter("LOAD Writable and Exec",
                                                  loadwx_cmd,
                                                  loadwx_whitelist))

        # Verify ARM binaries are all using VFP registers.
        if (options.enable_hardfp and utils.get_cpu_arch() == 'arm'):
            hardfp_cmd = ("%s -A {} 2>&1 | "
                          "egrep -q \"Tag_ABI_VFP_args: VFP registers\"" %
                          readelf_cmd)
            hardfp_whitelist = os.path.join(self.bindir, "hardfp_whitelist")
            option_sets.append(self.create_and_filter("hardfp", hardfp_cmd,
                                                      hardfp_whitelist))

        fail_msg = ""

        # There is currently no way to clear binary prebuilts for all devs.
        # Thus, when a new check is added to this test, the test might fail
        # for users who have old prebuilts which have not been compiled
        # in the correct manner.
        fail_summaries = []
        full_msg = "Test results:"
        num_fails = 0
        for cos in option_sets:
            if len(cos.filtered_set):
                num_fails += 1
                fail_msg += cos.get_fail_message() + "\n"
                fail_summaries.append(cos.get_fail_summary_message())
            full_msg += str(cos) + "\n\n"
        fail_summary_msg = ", ".join(fail_summaries)

        logging.error(fail_msg)
        logging.debug(full_msg)
        if num_fails:
            raise error.TestFail(fail_summary_msg)
Example #46
0
 def list_all(self):
     """
     List all installed packages.
     """
     installed_packages = utils.system_output('rpm -qa').splitlines()
     return installed_packages
Example #47
0
def get_disk_list(std_mounts_only=True):

    # Get hold of the currently mounted file systems
    mounts = utils.system_output('mount').splitlines()

    # Grab all the interesting disk partition names from /proc/partitions,
    # and build up the table of drives present in the system.
    hd_list = []
    hd_regexp = re.compile("([hs]d[a-z]+3)$")

    partfile = open(_DISKPART_FILE)
    for partline in partfile:
        parts = partline.strip().split()
        if len(parts) != 4 or partline.startswith('major'):
            continue

        # Get hold of the partition name
        partname = parts[3]

        # The partition name better end with a digit
        if not partname[-1:].isdigit():
            continue

        # Process any site-specific filters on the partition name
        if not fd_mgr.use_partition(partname):
            continue

        # We need to know the IDE/SATA/... device name for setting tunables
        tunepath = fd_mgr.map_drive_name(partname)

        # Check whether the device is mounted (and how)
        mstat = 0
        fstype = ''
        fsopts = ''
        fsmkfs = '?'

        # Prepare the full device path for matching
        chkdev = '/dev/' + partname

        # If the partition is mounted, we'll record the mount point
        mountpt = None

        for mln in mounts:

            splt = mln.split()

            # Typical 'mount' output line looks like this (indices
            # for the split() result shown below):
            #
            #    <device> on <mount_point> type <fstp> <options>
            #    0        1  2             3    4      5

            if splt[0] == chkdev:

                # Make sure the mount point looks reasonable
                mountpt = fd_mgr.check_mount_point(partname, splt[2])
                if not mountpt:
                    mstat = -1
                    break

                # Grab the file system type and mount options
                fstype = splt[4]
                fsopts = splt[5]

                # Check for something other than a r/w mount
                if fsopts[:3] != '(rw':
                    mstat = -1
                    break

                # The drive is mounted at the 'normal' mount point
                mstat = 1

        # Does the caller only want to allow 'standard' mount points?
        if std_mounts_only and mstat < 0:
            continue

        # Was this partition mounted at all?
        if not mountpt:
            # Ask the client where we should mount this partition
            mountpt = fd_mgr.check_mount_point(partname, None)
            if not mountpt:
                # Client doesn't know where to mount partition - ignore it
                continue

        # Looks like we have a valid disk drive, add it to the list
        hd_list.append({
            'device': partname,
            'mountpt': mountpt,
            'tunable': tunepath,
            'fs_type': fstype,
            'fs_opts': fsopts,
            'fs_mkfs': fsmkfs,
            'mounted': mstat
        })

    return hd_list
Example #48
0
 def exists(self):
     """Checks if the service is present in systemd configuration."""
     cmd = 'systemctl show -p ActiveState %s.service' % self._service_name
     output = utils.system_output(cmd, ignore_status=True).strip()
     return output == 'ActiveState=active'
Example #49
0
def _pidsof(exe_name):
    """Returns the PIDs of processes with the given name as a list."""
    output = utils.system_output('pidof %s' % exe_name,
                                 ignore_status=True).strip()
    return [int(pid) for pid in output.split()]
    def __init__(self,
                 logdir,
                 method=sys_power.do_suspend,
                 throw=False,
                 device_times=False,
                 suspend_state=''):
        """
        Prepare environment for suspending.
        @param suspend_state: Suspend state to enter into. It can be
                              'mem' or 'freeze' or an empty string. If
                              the suspend state is an empty string,
                              system suspends to the default pref.
        """
        self.disconnect_3G_time = 0
        self.successes = []
        self.failures = []
        self._logdir = logdir
        self._suspend = method
        self._throw = throw
        self._reset_pm_print_times = False
        self._restart_tlsdated = False
        self._log_file = None
        self._suspend_state = suspend_state
        if device_times:
            self.device_times = []

        # stop tlsdated, make sure we/hwclock have /dev/rtc for ourselves
        if utils.system_output('initctl status tlsdated').find('start') != -1:
            utils.system('initctl stop tlsdated')
            self._restart_tlsdated = True
            # give process's file descriptors time to asynchronously tear down
            time.sleep(0.1)

        # prime powerd_suspend RTC timestamp saving and make sure hwclock works
        utils.open_write_close(self.HWCLOCK_FILE, '')
        hwclock_output = utils.system_output('hwclock -r --verbose --utc',
                                             ignore_status=True)
        if not re.search('Using.*rtc interface to.*clock', hwclock_output):
            raise error.TestError('hwclock cannot find rtc: ' + hwclock_output)

        # activate device suspend timing debug output
        if hasattr(self, 'device_times'):
            if not int(utils.read_one_line('/sys/power/pm_print_times')):
                self._set_pm_print_times(True)
                self._reset_pm_print_times = True

        # Shut down 3G to remove its variability from suspend time measurements
        flim = flimflam.FlimFlam()
        service = flim.FindCellularService(0)
        if service:
            logging.info('Found 3G interface, disconnecting.')
            start_time = time.time()
            (success, status) = flim.DisconnectService(service=service,
                                                       wait_timeout=60)
            if success:
                logging.info('3G disconnected successfully.')
                self.disconnect_3G_time = time.time() - start_time
            else:
                logging.error('Could not disconnect: %s.', status)
                self.disconnect_3G_time = -1

        self._configure_suspend_state()
Example #51
0
def list_mount_points():
    mountpoints = []
    for line in utils.system_output('mount').splitlines():
        mountpoints.append(line.split()[2])
    return mountpoints
Example #52
0
def main():
    # Call the original parser.
    parse.main()

    # Results directory should be the last argument passed in.
    results_dir = sys.argv[-1]

    # Load the Chrome OS source tree location.
    cros_src_dir = global_config.global_config.get_config_value('CROS',
                                                                'source_tree',
                                                                default='')

    # We want the standard Autotest parser to keep working even if we haven't
    # been setup properly.
    if not cros_src_dir:
        tko_utils.dprint(
            'Unable to load required components for site parser. Falling back'
            ' to default parser.')
        return

    # Load ResultCollector from the Chrome OS source tree.
    sys.path.append(
        os.path.join(cros_src_dir, 'src/platform/crostestutils/utils_py'))
    from generate_test_report import ResultCollector

    # Collect results using the standard Chrome OS test report generator. Doing
    # so allows us to use the same crash white list and reporting standards the
    # VM based test instances use.
    # TODO(scottz): Reevaluate this code usage. crosbug.com/35282
    results = ResultCollector().RecursivelyCollectResults(results_dir)
    # We don't care about successful tests. We only want failed or crashing.
    # Note: list([]) generates a copy of the dictionary, so it's safe to delete.
    for test_status in list(results):
        if test_status['crashes']:
            continue
        elif test_status['status'] == 'PASS':
            results.remove(test_status)

    # Filter results and collect logs. If we can't find a log for the test, skip
    # it. The Emailer will fill in the blanks using Database data later.
    filtered_results = {}
    for test_dict in results:
        result_log = ''
        test_name = os.path.basename(test_dict['testdir'])
        error = os.path.join(test_dict['testdir'], 'debug',
                             '%s.ERROR' % test_name)

        # If the error log doesn't exist, we don't care about this test.
        if not os.path.isfile(error):
            continue

        # Parse failure reason for this test.
        for t, r in parse_reason(test_dict['testdir']).iteritems():
            # Server tests may have subtests which will each have their own
            # reason, so display the test name for the subtest in that case.
            if t != test_name:
                result_log += '%s: ' % t
            result_log += '%s\n\n' % r.strip()

        # Trim results_log to last _STATUS_LOG_LIMIT lines.
        short_result_log = '\n'.join(
            result_log.splitlines()[-1 * _STATUS_LOG_LIMIT:]).strip()

        # Let the reader know we've trimmed the log.
        if short_result_log != result_log.strip():
            short_result_log = (
                '[...displaying only the last %d status log lines...]\n%s' %
                (_STATUS_LOG_LIMIT, short_result_log))

        # Pull out only the last _LOG_LIMIT lines of the file.
        short_log = utils.system_output('tail -n %d %s' %
                                        (_ERROR_LOG_LIMIT, error))

        # Let the reader know we've trimmed the log.
        if len(short_log.splitlines()) == _ERROR_LOG_LIMIT:
            short_log = (
                '[...displaying only the last %d error log lines...]\n%s' %
                (_ERROR_LOG_LIMIT, short_log))

        filtered_results[test_name] = test_dict
        filtered_results[test_name]['log'] = '%s\n\n%s' % (short_result_log,
                                                           short_log)

    # Generate JSON dump of results. Store in results dir.
    json_file = open(os.path.join(results_dir, _JSON_REPORT_FILE), 'w')
    json.dump(filtered_results, json_file)
    json_file.close()
Example #53
0
def i2c_detect(bus, addr):
    full_cmd = 'i2cdetect -y %d 0x%x 0x%x' % (bus, addr, addr)
    result = utils.system_output(full_cmd)
    logging.debug('Command: %s', full_cmd)
    logging.debug('Result: %s', result)
    return result
Example #54
0
 def _check_minidump_stackwalk(self, minidump_path, basename,
                               from_crash_reporter):
     stack = utils.system_output('/usr/bin/minidump_stackwalk %s %s' %
                                 (minidump_path, self._symbol_dir))
     self._verify_stack(stack, basename, from_crash_reporter)
Example #55
0
def run_qemu_io_blkdebug(test, params, env):
    """
    Run qemu-io blkdebug tests:
    1. Create image with given parameters
    2. Write the blkdebug config file
    3. Try to do operate in image with qemu-io and get the error message
    4. Get the error message from perror by error number set in config file
    5. Compare the error message

    @param test:   kvm test object
    @param params: Dictionary with the test parameters
    @param env:    Dictionary with test environment.
    """
    tmp_dir = params.get("tmp_dir", "/tmp")
    blkdebug_cfg = virt_utils.get_path(tmp_dir, params.get("blkdebug_cfg",
                                                            "blkdebug.cfg"))
    err_command = params.get("err_command")
    err_event = params.get("err_event")
    errn_list = re.split("\s+", params.get("errn_list").strip())
    re_std_msg = params.get("re_std_msg")
    test_timeout = int(params.get("test_timeout", "60"))
    pre_err_commands = params.get("pre_err_commands")
    image = params.get("images")
    blkdebug_default = params.get("blkdebug_default")

    error.context("Create image", logging.info)
    image_name = virt_vm.create_image(params.object_params(image), test.bindir)

    template_name =  virt_utils.get_path(test.virtdir, blkdebug_default)
    template = ConfigParser.ConfigParser()
    template.read(template_name)

    for errn in errn_list:
        log_filename = virt_utils.get_path(test.outputdir,
                                           "qemu-io-log-%s" % errn)
        error.context("Write the blkdebug config file", logging.info)
        template.set("inject-error", "event", '"%s"' % err_event)
        template.set("inject-error", "errno", '"%s"' % errn)

        with open(blkdebug_cfg, 'w') as blkdebug:
            template.write(blkdebug)
            blkdebug.close()

        error.context("Operate in qemu-io to trigger the error", logging.info)
        session = qemu_io.QemuIOShellSession(test, params, image_name,
                                             blkdebug_cfg=blkdebug_cfg,
                                             log_filename=log_filename)
        if pre_err_commands:
            for cmd in re.split(",", pre_err_commands.strip()):
                session.cmd_output(cmd, timeout=test_timeout)

        output = session.cmd_output(err_command, timeout=test_timeout)
        error.context("Get error message from command perror", logging.info)
        perror_cmd = "perror %s" % errn
        std_msg = utils.system_output(perror_cmd)
        std_msg = re.findall(re_std_msg, std_msg)
        if std_msg:
            std_msg = std_msg[0]
        else:
            std_msg = ""
            logging.warning("Can not find error message from perror")

        session.close()
        error.context("Compare the error message", logging.info)
        if std_msg:
            if std_msg in output:
                logging.info("Error message is correct in qemu-io")
            else:
                fail_log = "The error message is mismatch:"
                fail_log += "qemu-io reports: '%s'," % output
                fail_log += "perror reports: '%s'" % std_msg
                raise error.TestFail(fail_log)
        else:
            logging.warning("Can not find error message from perror."
                            " The output from qemu-io is %s" % output)
Example #56
0
def run_softlockup(test, params, env):
    """
    soft lockup/drift test with stress.

    1) Boot up a VM.
    2) Build stress on host and guest.
    3) run heartbeat with the given options on server and host.
    3) Run for a relatively long time length. ex: 12, 18 or 24 hours.
    4) Output the test result and observe drift.

    @param test: KVM test object.
    @param params: Dictionary with the test parameters.
    @param env: Dictionary with test environment.
    """
    stress_setup_cmd = params.get("stress_setup_cmd")
    stress_cmd = params.get("stress_cmd")
    server_setup_cmd = params.get("server_setup_cmd")
    drift_cmd = params.get("drift_cmd")
    kill_stress_cmd = params.get("kill_stress_cmd")
    kill_monitor_cmd = params.get("kill_monitor_cmd")

    threshold = int(params.get("stress_threshold"))
    monitor_log_file_server = params.get("monitor_log_file_server")
    monitor_log_file_client = params.get("monitor_log_file_client")
    test_length = int(3600 * float(params.get("test_length")))
    monitor_port = int(params.get("monitor_port"))

    vm = env.get_vm(params["main_vm"])
    login_timeout = int(params.get("login_timeout", 360))
    stress_dir = os.path.join(os.environ['AUTODIR'], "tests/stress")
    monitor_dir = os.path.join(test.bindir, 'deps')

    def _kill_guest_programs(session, kill_stress_cmd, kill_monitor_cmd):
        logging.info("Kill stress and monitor on guest")
        try:
            session.cmd(kill_stress_cmd)
        except Exception:
            pass
        try:
            session.cmd(kill_monitor_cmd)
        except Exception:
            pass

    def _kill_host_programs(kill_stress_cmd, kill_monitor_cmd):
        logging.info("Kill stress and monitor on host")
        utils.run(kill_stress_cmd, ignore_status=True)
        utils.run(kill_monitor_cmd, ignore_status=True)

    def host():
        logging.info("Setup monitor server on host")
        # Kill previous instances of the host load programs, if any
        _kill_host_programs(kill_stress_cmd, kill_monitor_cmd)
        # Cleanup previous log instances
        if os.path.isfile(monitor_log_file_server):
            os.remove(monitor_log_file_server)
        # Opening firewall ports on host
        utils.run("iptables -F", ignore_status=True)

        # Run heartbeat on host
        utils.run(
            server_setup_cmd %
            (monitor_dir, threshold, monitor_log_file_server, monitor_port))

        logging.info("Build stress on host")
        # Uncompress and build stress on host
        utils.run(stress_setup_cmd % stress_dir)

        logging.info("Run stress on host")
        # stress_threads = 2 * n_cpus
        threads_host = 2 * utils.count_cpus()
        # Run stress test on host
        utils.run(stress_cmd % (stress_dir, threads_host))

    def guest():
        try:
            host_ip = socket.gethostbyname(socket.gethostname())
        except socket.error:
            try:
                # Hackish, but works well on stand alone (laptop) setups
                # with access to the internet. If this fails, well, then
                # not much else can be done...
                s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
                s.connect(("redhat.com", 80))
                host_ip = s.getsockname()[0]
            except socket.error, (value, e):
                raise error.TestError("Could not determine host IP: %d %s" %
                                      (value, e))

        # Now, starting the guest
        vm.verify_alive()
        session = vm.wait_for_login(timeout=login_timeout)

        # Kill previous instances of the load programs, if any
        _kill_guest_programs(session, kill_stress_cmd, kill_monitor_cmd)
        # Clean up previous log instances
        session.cmd("rm -f %s" % monitor_log_file_client)

        # Opening firewall ports on guest
        try:
            session.cmd("iptables -F")
        except Exception:
            pass

        # Get required files and copy them from host to guest
        monitor_path = os.path.join(test.virtdir, 'deps', 'heartbeat_slu.py')
        stress_path = os.path.join(os.environ['AUTODIR'], "tests", "stress",
                                   "stress-1.0.4.tar.gz")
        vm.copy_files_to(monitor_path, "/tmp")
        vm.copy_files_to(stress_path, "/tmp")

        logging.info("Setup monitor client on guest")
        # Start heartbeat on guest
        session.cmd(
            params.get("client_setup_cmd") %
            ("/tmp", host_ip, monitor_log_file_client, monitor_port))

        logging.info("Build stress on guest")
        # Uncompress and build stress on guest
        session.cmd(stress_setup_cmd % "/tmp", timeout=200)

        logging.info("Run stress on guest")
        # stress_threads = 2 * n_vcpus
        threads_guest = 2 * int(params.get("smp", 1))
        # Run stress test on guest
        session.cmd(stress_cmd % ("/tmp", threads_guest))

        # Wait and report
        logging.debug("Wait for %d s", test_length)
        time.sleep(test_length)

        # Kill instances of the load programs on both guest and host
        _kill_guest_programs(session, kill_stress_cmd, kill_monitor_cmd)
        _kill_host_programs(kill_stress_cmd, kill_monitor_cmd)

        # Collect drift
        drift = utils.system_output(drift_cmd % monitor_log_file_server)
        logging.info("Drift noticed: %s", drift)
    def run_once(self):
        # Cache the architecture to avoid redundant execs to "uname".
        arch = utils.get_arch()
        userspace_arch = utils.get_arch_userspace()

        # Report the full uname for anyone reading logs.
        logging.info('Running %s kernel, %s userspace: %s', arch,
                     userspace_arch, utils.system_output('uname -a'))

        # Load the list of kernel config variables.
        config = kernel_config.KernelConfig()
        config.initialize()

        # Adjust for kernel-version-specific changes
        kernel_ver = os.uname()[2]
        if utils.compare_versions(kernel_ver, "3.10") >= 0:
            for entry in self.IS_EXCLUSIVE:
                if entry['regex'] == 'BINFMT_':
                    entry['builtin'].append('BINFMT_SCRIPT')

        if utils.compare_versions(kernel_ver, "3.14") >= 0:
            self.IS_MODULE.append('TEST_ASYNC_DRIVER_PROBE')
            for entry in self.IS_EXCLUSIVE:
                if entry['regex'] == 'BINFMT_':
                    entry['builtin'].append('BINFMT_MISC')

        if utils.compare_versions(kernel_ver, "3.18") >= 0:
            for entry in self.IS_EXCLUSIVE:
                if entry['regex'] == '.*_FS$':
                    entry['builtin'].append('SND_PROC_FS')

        if utils.compare_versions(kernel_ver, "4.4") < 0:
            for entry in self.IS_EXCLUSIVE:
                if entry['regex'] == '.*_FS$':
                    entry['builtin'].append('EXT4_USE_FOR_EXT23')

        # Run the static checks.
        map(config.has_builtin, self.IS_BUILTIN)
        map(config.has_module, self.IS_MODULE)
        map(config.is_enabled, self.IS_ENABLED)
        map(config.is_missing, self.IS_MISSING)
        map(config.is_exclusive, self.IS_EXCLUSIVE)

        # Run the dynamic checks.

        # Security; NULL-address hole should be as large as possible.
        # Upstream kernel recommends 64k, which should be large enough to
        # catch nearly all dereferenced structures.
        wanted = '65536'
        if self.is_arm_family(arch):
            # ... except on ARM where it shouldn't be larger than 32k due
            # to historical ELF load location.
            wanted = '32768'
        config.has_value('DEFAULT_MMAP_MIN_ADDR', [wanted])

        # Security; make sure NX page table bits are usable.
        if self.is_x86_family(arch):
            if arch == "i386":
                config.has_builtin('X86_PAE')
            else:
                config.has_builtin('X86_64')

        # Security; marks data segments as RO/NX, text as RO.
        if (arch == 'armv7l'
                and utils.compare_versions(kernel_ver, "3.8") < 0):
            config.is_missing('DEBUG_RODATA')
            config.is_missing('DEBUG_SET_MODULE_RONX')
        else:
            config.has_builtin('DEBUG_RODATA')
            config.has_builtin('DEBUG_SET_MODULE_RONX')

            if arch == 'aarch64':
                config.has_builtin('DEBUG_ALIGN_RODATA')

        # NaCl; allow mprotect+PROT_EXEC on noexec mapped files.
        config.has_value('MMAP_NOEXEC_TAINT', ['0'])

        # Kernel: make sure port 0xED is the one used for I/O delay
        if self.is_x86_family(arch):
            config.has_builtin('IO_DELAY_0XED')
            needed = config.get('CONFIG_IO_DELAY_TYPE_0XED', None)
            config.has_value('DEFAULT_IO_DELAY_TYPE', [needed])

        # Raise a failure if anything unexpected was seen.
        if len(config.failures()):
            raise error.TestFail((", ".join(config.failures())))
Example #58
0
def run_cmd(cmd):
    return utils.system_output(cmd + ' 2>&1',
                               retain_output=True,
                               ignore_status=True)
Example #59
0
 def run_once(self):
     probe_results = utils.system_output('gooftool probe')
     logging.info(probe_results)
     return
Example #60
0
 def server_start(self, args):
     cmd = self.server_path % args
     self.results.append(utils.system_output(cmd, retain_output=True))