def reboot_setup(self):
     # save the partition list and mount points, as well as the cpu count
     partition_list = partition_lib.get_partition_list(self,
                                                       exclude_swap=False)
     mount_info = partition_lib.get_mount_info(partition_list)
     self._state.set('client', 'mount_info', mount_info)
     self._state.set('client', 'cpu_count', utils.count_cpus())
Example #2
0
    def build(self, make_opts = '', logfile = '', extraversion='autotest'):
        """build the kernel

        make_opts
                additional options to make, if any
        """
        os_dep.commands('gcc', 'make')
        if logfile == '':
            logfile = os.path.join(self.log_dir, 'kernel_build')
        os.chdir(self.build_dir)
        if extraversion:
            self.extraversion(extraversion)
        self.set_cross_cc()
        # setup_config_file(config_file, config_overrides)

        # Not needed on 2.6, but hard to tell -- handle failure
        utils.system('make dep', ignore_status=True)
        threads = 2 * utils.count_cpus()
        build_string = 'make -j %d %s %s' % (threads, make_opts,
                                     self.build_target)
                                # eg make bzImage, or make zImage
        print build_string
        utils.system(build_string)
        if kernel_config.modules_needed('.config'):
            utils.system('make -j %d modules' % (threads))

        kernel_version = self.get_kernel_build_ver()
        kernel_version = re.sub('-autotest', '', kernel_version)
        self.logfile.write('BUILD VERSION: %s\n' % kernel_version)

        utils.force_copy(self.build_dir+'/System.map',
                                  self.results_dir)
 def run_once(self):
     num_cpus = bin_utils.count_cpus()
     logging.debug('Running using all cpus: %d' % num_cpus)
     results = self.openssl_speed('aes-256-cbc', '-multi %d' % num_cpus)
     parsed = self.parse_results(results)
     self.update_stats(parsed)
     self.export_stats()
Example #4
0
    def _build(self):
        make_jobs = utils.count_cpus()
        cfg = './configure'
        if self.kmod_srcdir:
            logging.info('Building KVM modules')
            os.chdir(self.kmod_srcdir)
            module_build_steps = [cfg,
                                  'make clean',
                                  'make sync LINUX=%s' % self.kernel_srcdir,
                                  'make']
        elif self.kernel_srcdir:
            logging.info('Building KVM modules')
            os.chdir(self.userspace_srcdir)
            cfg += ' --kerneldir=%s' % self.host_kernel_srcdir
            module_build_steps = [cfg,
                            'make clean',
                            'make -C kernel LINUX=%s sync' % self.kernel_srcdir]
        else:
            module_build_steps = []

        for step in module_build_steps:
            utils.run(step)

        logging.info('Building KVM userspace code')
        os.chdir(self.userspace_srcdir)
        cfg += ' --prefix=%s' % self.prefix
        if "--disable-strip" in self.configure_options:
            cfg += ' --disable-strip'
        if self.extra_configure_options:
            cfg += ' %s' % self.extra_configure_options
        utils.system(cfg)
        utils.system('make clean')
        utils.system('make -j %s' % make_jobs)
Example #5
0
    def setup(self,
              tarball='oprofile-0.9.4.tar.bz2',
              local=None,
              *args,
              **dargs):
        if local == True:
            return

        try:
            self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
            utils.extract_tarball_to_dir(self.tarball, self.srcdir)
            os.chdir(self.srcdir)

            patch = os.path.join(self.bindir, "oprofile-69455.patch")
            utils.system('patch -p1 < %s' % patch)
            utils.configure('--with-kernel-support --prefix=' + \
                                                    self.srcdir)
            utils.make('-j %d' % utils.count_cpus())
            utils.make('install')
        except Exception:
            # Build from source failed.
            # But maybe can still use the local copy
            local_opcontrol = os.path.exists('/usr/bin/opcontrol')
            local_opreport = os.path.exists('/usr/bin/opreport')
            if local == False or not local_opcontrol or not local_opreport:
                raise error.AutotestError('No oprofile available')
        else:
            # if we managed to build, try again to pick binaries
            self._pick_binaries(True)
Example #6
0
    def run_once(self, args = ''):
        if not args:
            threads = 2*utils.count_cpus()
            args = '-c %d -i %d -m %d -d %d -t 60 -v' % \
                    (threads, threads, threads, threads)

        utils.system(self.srcdir + '/src/stress ' + args)
Example #7
0
File: job.py Project: ceph/autotest
 def reboot_setup(self):
     # save the partition list and mount points, as well as the cpu count
     partition_list = partition_lib.get_partition_list(self,
                                                       exclude_swap=False)
     mount_info = partition_lib.get_mount_info(partition_list)
     self._state.set('client', 'mount_info', mount_info)
     self._state.set('client', 'cpu_count', utils.count_cpus())
Example #8
0
    def setup(self, tarball='oprofile-0.9.4.tar.bz2', local=None,
              *args, **dargs):
        if local == True:
            return

        try:
            self.tarball = utils.unmap_url(self.bindir, tarball,
                                                    self.tmpdir)
            utils.extract_tarball_to_dir(self.tarball, self.srcdir)
            os.chdir(self.srcdir)

            patch = os.path.join(self.bindir,"oprofile-69455.patch")
            utils.system('patch -p1 < %s' % patch)
            utils.system('./configure --with-kernel-support --prefix=' + \
                                                    self.srcdir)
            utils.system('make -j %d' % utils.count_cpus())
            utils.system('make install')
        except:
            # Build from source failed.
            # But maybe can still use the local copy
            local_opcontrol = os.path.exists('/usr/bin/opcontrol')
            local_opreport = os.path.exists('/usr/bin/opreport')
            if local == False or not local_opcontrol or not local_opreport:
                raise error.AutotestError('No oprofile available')
        else:
            # if we managed to build, try again to pick binaries
            self._pick_binaries(True)
Example #9
0
    def run_once(self, db_type = 'pgsql', build = 1, \
                    num_threads = utils.count_cpus(), max_time = 60, \
                    read_only = 0, args = ''):
        plib = os.path.join(self.autodir, 'deps/pgsql/pgsql/lib')
        mlib = os.path.join(self.autodir, 'deps/mysql/mysql/lib/mysql')
        ld_path = utils.prepend_path(plib,
            utils.environ('LD_LIBRARY_PATH'))
        ld_path = utils.prepend_path(mlib, ld_path)
        os.environ['LD_LIBRARY_PATH'] = ld_path

        # The databases don't want to run as root so run them as nobody
        self.dbuser = '******'
        self.dbuid = pwd.getpwnam(self.dbuser)[2]
        self.sudo = 'sudo -u ' + self.dbuser + ' '

        # Check for nobody user
        try:
            utils.system(self.sudo + '/bin/true')
        except:
            raise error.TestError('Unable to run as nobody')

        if (db_type == 'pgsql'):
            self.execute_pgsql(build, num_threads, max_time, read_only, args)
        elif (db_type == 'mysql'):
            self.execute_mysql(build, num_threads, max_time, read_only, args)
Example #10
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file('http://mirror.x10.com/mirror/mysql/Downloads/MySQL-5.0/mysql-5.0.45.tar.gz', tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure('--prefix=%s/mysql --enable-thread-safe-client' \
                    % topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    #
    # MySQL doesn't create this directory on it's own.
    # This is where database logs and files are created.
    #
    try:
        os.mkdir(topdir + '/mysql/var')
    except Exception:
        pass
    #
    # Initialize the database.
    #
    utils.system('%s/mysql/bin/mysql_install_db' % topdir)

    os.chdir(topdir)
Example #11
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file(
            'http://mirror.x10.com/mirror/mysql/Downloads/MySQL-5.0/mysql-5.0.45.tar.gz',
            tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure('--prefix=%s/mysql --enable-thread-safe-client' \
                    % topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    #
    # MySQL doesn't create this directory on it's own.
    # This is where database logs and files are created.
    #
    try:
        os.mkdir(topdir + '/mysql/var')
    except Exception:
        pass
    #
    # Initialize the database.
    #
    utils.system('%s/mysql/bin/mysql_install_db' % topdir)

    os.chdir(topdir)
Example #12
0
    def _build(self):
        make_jobs = utils.count_cpus()
        cfg = './configure'
        if self.kmod_srcdir:
            logging.info('Building KVM modules')
            os.chdir(self.kmod_srcdir)
            module_build_steps = [
                cfg, 'make clean',
                'make sync LINUX=%s' % self.kernel_srcdir, 'make'
            ]
        elif self.kernel_srcdir:
            logging.info('Building KVM modules')
            os.chdir(self.userspace_srcdir)
            cfg += ' --kerneldir=%s' % self.host_kernel_srcdir
            module_build_steps = [
                cfg, 'make clean',
                'make -C kernel LINUX=%s sync' % self.kernel_srcdir
            ]
        else:
            module_build_steps = []

        for step in module_build_steps:
            utils.run(step)

        logging.info('Building KVM userspace code')
        os.chdir(self.userspace_srcdir)
        cfg += ' --prefix=%s' % self.prefix
        if "--disable-strip" in self.configure_options:
            cfg += ' --disable-strip'
        if self.extra_configure_options:
            cfg += ' %s' % self.extra_configure_options
        utils.system(cfg)
        utils.system('make clean')
        utils.system('make -j %s' % make_jobs)
Example #13
0
    def _build(self):
        make_jobs = utils.count_cpus()
        cfg = './configure'
        self.modules_build_succeed = False
        if self.kmod_srcdir:
            logging.info('Building KVM modules')
            os.chdir(self.kmod_srcdir)
            module_build_steps = [cfg,
                                  'make clean',
                                  'make sync LINUX=%s' % self.kernel_srcdir,
                                  'make']
        elif self.kernel_srcdir:
            logging.info('Building KVM modules')
            os.chdir(self.userspace_srcdir)
            cfg += ' --kerneldir=%s' % self.host_kernel_srcdir
            module_build_steps = [cfg,
                            'make clean',
                            'make -C kernel LINUX=%s sync' % self.kernel_srcdir]
        else:
            module_build_steps = []

        try:
            if module_build_steps:
                for step in module_build_steps:
                    utils.run(step)
                self.modules_build_succeed = True
        except error.CmdError, e:
            logging.error("KVM modules build failed to build: %s" % e)
Example #14
0
File: npb.py Project: ceph/autotest
 def initialize(self, tests=''):
     # Initialize failure counter
     self.n_fail = 0
     # Get the parameters for run_once()
     self.tests = tests
     # Ratio is the reason between 1 and the number of CPUs of the system.
     self.ratio = 1.0 / utils.count_cpus()
     logging.debug('Ratio (1/n_cpus) found for this system: %s' % self.ratio)
Example #15
0
 def initialize(self, tests=''):
     # Initialize failure counter
     self.n_fail = 0
     # Get the parameters for run_once()
     self.tests = tests
     # Ratio is the reason between 1 and the number of CPUs of the system.
     self.ratio = 1.0 / utils.count_cpus()
     logging.debug('Ratio (1/n_cpus) found for this system: %s' %
                   self.ratio)
Example #16
0
    def _pull_code(self):
        """
        Retrieves code from git repositories.
        """
        params = self.params
        make_jobs = utils.count_cpus()
        cfg = 'PKG_CONFIG_PATH="%s/lib/pkgconfig:%s/share/pkgconfig" ./configure' % (
            self.prefix, self.prefix)

        self.spice_protocol = GitRepo(
            installer=self,
            prefix='spice_protocol',
            srcdir='spice-protocol',
            build_steps=[
                './autogen.sh',
                './configure --prefix=%s' % self.prefix, 'make clean',
                'make -j %s' % (make_jobs), 'make install'
            ])

        self.spice = GitRepo(
            installer=self,
            prefix='spice',
            srcdir='spice',
            build_steps=[
                'PKG_CONFIG_PATH="%s/lib/pkgconfig:%s/share/pkgconfig" CXXFLAGS=-Wl,--add-needed ./autogen.sh --prefix=%s'
                % (self.prefix, self.prefix, self.prefix), 'make clean',
                'make -j %s' % (make_jobs), 'make install'
            ])

        self.userspace = GitRepo(installer=self,
                                 prefix='user',
                                 repo_param='user_git_repo',
                                 srcdir='kvm_userspace')

        p = os.path.join(self.userspace.srcdir, 'configure')
        self.configure_options = virt_installer.check_configure_options(p)

        cfg = cfg + ' --prefix=%s' % self.prefix
        if "--disable-strip" in self.configure_options:
            cfg += ' --disable-strip'
        if self.extra_configure_options:
            cfg += ' %s' % self.extra_configure_options

        self.userspace.build_steps = [
            cfg, 'make clean', 'make -j %s' % make_jobs
        ]

        if not self.userspace.repo:
            message = "KVM user git repository path not specified"
            logging.error(message)
            raise error.TestError(message)

        for repo in [self.userspace, self.spice_protocol, self.spice]:
            if not repo.repo:
                continue
            repo.fetch_and_patch()
Example #17
0
def setup(topdir):
    srcdir = os.path.join(topdir, 'src')

    os.chdir(srcdir)

    utils.configure('--with-elfutils=elfutils ' \
                    '--prefix=%s/systemtap' % topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
Example #18
0
    def setup(self, tarball='ltp-full-20120104.bz2'):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        os.chdir(self.srcdir)
        ltpbin_dir = os.path.join(self.srcdir, 'bin')
        os.mkdir(ltpbin_dir)

        utils.system('cp ../scan.c pan/')  # saves having lex installed
        utils.configure('--prefix=%s' % ltpbin_dir)
        utils.make('-j %d all' % utils.count_cpus())
        utils.system('yes n | make SKIP_IDCHECK=1 install')
Example #19
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, "src")
    if not os.path.exists(tarball):
        utils.get_file("ftp://ftp.postgresql.org/pub/source/v8.3.1/postgresql-8.3.1.tar.bz2", tarball)
    utils.extract_tarball_to_dir(tarball, "src")
    os.chdir(srcdir)
    utils.configure("--without-readline --without-zlib --enable-debug --prefix=%s/pgsql" % topdir)
    utils.make("-j %d" % utils.count_cpus())
    utils.make("install")

    os.chdir(topdir)
Example #20
0
def setup(topdir):
    srcdir = os.path.join(topdir, 'src')

    os.chdir(srcdir)

    utils.configure('--with-elfutils=elfutils ' \
                    '--prefix=%s/systemtap' % topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
    def verify_msr(self, match_list):
        """
        Verify MSR

        @param match_list: match list
        """
        errors = 0
        for cpu_id in xrange(0, max(utils.count_cpus(), 1)):
            self._cpu_id = cpu_id
            errors += self._verify_registers('msr', self._read_msr, match_list)
        return errors
Example #22
0
    def setup(self, tarball = 'ltp-full-20120104.bz2'):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        os.chdir(self.srcdir)
        ltpbin_dir = os.path.join(self.srcdir, 'bin')
        os.mkdir(ltpbin_dir)

        utils.system('cp ../scan.c pan/')   # saves having lex installed
        utils.configure('--prefix=%s' % ltpbin_dir)
        utils.make('-j %d all' % utils.count_cpus())
        utils.system('yes n | make SKIP_IDCHECK=1 install')
    def test_cpu(self):
        """Test the CPU configuration."""
        errors = ''
        warning = ''
        if self.cpu_cores != utils.count_cpus():
            errors += 'Expecting %d CPU cores but found %d cores\n' % (
                self.cpu_cores, utils.count_cpus())

        for cpu_info in utils.get_cpuinfo():
            if self.cpu_arch not in cpu_info['model name']:
                errors += 'Expecting %s CPU but found %s' % (
                    self.cpu_arch, cpu_info['model name'])

            flags = sorted(cpu_info['flags'].split(' '))
            if flags != self.cpu_flags:
                # TODO(pwang): convert warning to error once VM got better
                # infra support.
                warning += 'Expecting CPU flags %s but found %s\n' % (
                    self.cpu_flags, flags)
        return errors, warning
Example #24
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file('ftp://ftp.postgresql.org/pub/source/v8.3.1/postgresql-8.3.1.tar.bz2', tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.system ('./configure --without-readline --without-zlib --enable-debug --prefix=%s/pgsql' % topdir)
    utils.system('make -j %d' % utils.count_cpus())
    utils.system('make install')

    os.chdir(topdir)
Example #25
0
 def __build(self):
     if self.kmod_srcdir:
         logging.info('Building KVM modules')
         os.chdir(self.kmod_srcdir)
         utils.system('./configure')
         utils.system('make clean')
         utils.system('make sync LINUX=%s' % self.kernel_srcdir)
         utils.system('make -j %s' % utils.count_cpus())
         logging.info('Building KVM userspace code')
         os.chdir(self.userspace_srcdir)
         utils.system('./configure --prefix=%s' % self.prefix)
         utils.system('make clean')
         utils.system('make -j %s' % utils.count_cpus())
     else:
         os.chdir(self.userspace_srcdir)
         utils.system('./configure --prefix=%s' % self.prefix)
         logging.info('Building KVM modules')
         utils.system('make clean')
         utils.system('make -C kernel LINUX=%s sync' % self.kernel_srcdir)
         logging.info('Building KVM userspace code')
         utils.system('make -j %s' % utils.count_cpus())
Example #26
0
 def _build(self):
     make_jobs = utils.count_cpus()
     os.chdir(self.srcdir)
     # For testing purposes, it's better to build qemu binaries with
     # debugging symbols, so we can extract more meaningful stack traces.
     cfg = "./configure --prefix=%s" % self.prefix
     if "--disable-strip" in self.configure_options:
         cfg += " --disable-strip"
     steps = [cfg, "make clean", "make -j %s" % make_jobs]
     logging.info("Building KVM")
     for step in steps:
         utils.system(step)
Example #27
0
 def _build(self):
     make_jobs = utils.count_cpus()
     os.chdir(self.srcdir)
     # For testing purposes, it's better to build qemu binaries with
     # debugging symbols, so we can extract more meaningful stack traces.
     cfg = "./configure --prefix=%s" % self.prefix
     if "--disable-strip" in self.configure_options:
         cfg += " --disable-strip"
     steps = [cfg, "make clean", "make -j %s" % make_jobs]
     logging.info("Building KVM")
     for step in steps:
         utils.system(step)
Example #28
0
    def __build(self):
        os.chdir(self.srcdir)
        cfg = "./configure --prefix=%s" % self.prefix
        if self.repo_type == 1:
            steps = [cfg, "make clean", "make -j %s" % utils.count_cpus()]
            if not os.path.exists('qemu/pc-bios/bios.bin'):
                steps.append("make -C bios")
                steps.append("make -C extboot")
                steps.append("cp -f bios/BIOS-bochs-latest"
                             " qemu/pc-bios/bios.bin")
                steps.append("cp -f vgabios/VGABIOS-lgpl-latest.bin"
                             " qemu/pc-bios/vgabios.bin")
                steps.append("cp -f vgabios/VGABIOS-lgpl-latest.cirrus.bin"
                             " qemu/pc-bios/vgabios-cirrus.bin")
                steps.append("cp -f extboot/extboot.bin"
                             " qemu/pc-bios/extboot.bin")
        elif self.repo_type == 2:
            steps = [cfg, "make clean", "make -j %s" % utils.count_cpus()]

        logging.info("Building KVM")
        for step in steps:
            utils.system(step)
Example #29
0
    def setup(self, tarball="sysbench-0.4.8.tar.bz2"):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        self.job.setup_dep(["pgsql", "mysql"])

        os.chdir(self.srcdir)

        pgsql_dir = os.path.join(self.autodir, "deps/pgsql/pgsql")
        mysql_dir = os.path.join(self.autodir, "deps/mysql/mysql")

        # configure wants to get at pg_config, so add its path
        utils.system("PATH=%s/bin:$PATH ./configure --with-mysql=%s --with-pgsql" % (pgsql_dir, mysql_dir))
        utils.make("-j %d" % utils.count_cpus())
Example #30
0
    def _check_post_reboot(self, subdir, running_id=None):
        """
        Function to perform post boot checks such as if the system configuration
        has changed across reboots (specifically, CPUs and partitions).

        @param subdir: The subdir to use in the job.record call.
        @param running_id: An optional running_id to include in the reboot
            failure log message

        @raise JobError: Raised if the current configuration does not match the
            pre-reboot configuration.
        """
        abort_on_mismatch = GLOBAL_CONFIG.get_config_value('CLIENT',
                                                           'abort_on_mismatch',
                                                           type=bool,
                                                           default=False)
        # check to see if any partitions have changed
        partition_list = partition_lib.get_partition_list(self,
                                                          exclude_swap=False)
        mount_info = partition_lib.get_mount_info(partition_list)
        old_mount_info = self._state.get('client', 'mount_info')
        if mount_info != old_mount_info:
            new_entries = mount_info - old_mount_info
            old_entries = old_mount_info - mount_info
            description = ("mounted partitions are different after reboot "
                           "(old entries: %s, new entries: %s)" %
                           (old_entries, new_entries))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir,
                                            "reboot.verify_config",
                                            description,
                                            running_id=running_id)
                raise error.JobError("Reboot failed: %s" % description)
            else:
                logging.warning(description)

        # check to see if any CPUs have changed
        cpu_count = utils.count_cpus()
        old_count = self._state.get('client', 'cpu_count')
        if cpu_count != old_count:
            description = ('Number of CPUs changed after reboot '
                           '(old count: %d, new count: %d)' %
                           (old_count, cpu_count))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir,
                                            'reboot.verify_config',
                                            description,
                                            running_id=running_id)
                raise error.JobError('Reboot failed: %s' % description)
            else:
                logging.warning(description)
Example #31
0
def setup(tarball_systemtap, tarball_elfutils, topdir):
    srcdir = os.path.join(topdir, "src")

    utils.extract_tarball_to_dir(tarball_systemtap, "src")
    utils.extract_tarball_to_dir(tarball_elfutils, "elfutils")
    shutil.move("elfutils", "src")

    os.chdir(srcdir)

    utils.system("./configure --with-elfutils=elfutils " "--prefix=%s/systemtap" % topdir)
    utils.system("make -j %d" % utils.count_cpus())
    utils.system("make install")

    os.chdir(topdir)
Example #32
0
    def setup(self, tarball = 'sysbench-0.4.8.tar.bz2'):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        self.job.setup_dep(['pgsql', 'mysql'])

        os.chdir(self.srcdir)

        pgsql_dir = os.path.join(self.autodir, 'deps/pgsql/pgsql')
        mysql_dir = os.path.join(self.autodir, 'deps/mysql/mysql')

        # configure wants to get at pg_config, so add its path
        utils.system(
            'PATH=%s/bin:$PATH ./configure --with-mysql=%s --with-pgsql'
            % (pgsql_dir, mysql_dir))
        utils.system('make -j %d' % utils.count_cpus())
Example #33
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file(
            'ftp://ftp-archives.postgresql.org/pub/source/v8.3.1/postgresql-8.3.1.tar.bz2',
            tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure(
        '--without-readline --without-zlib --enable-debug --prefix=%s/pgsql' %
        topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
Example #34
0
    def setup(self, tarball='sysbench-0.4.8.tar.bz2'):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        self.job.setup_dep(['pgsql', 'mysql'])

        os.chdir(self.srcdir)

        pgsql_dir = os.path.join(self.autodir, 'deps/pgsql/pgsql')
        mysql_dir = os.path.join(self.autodir, 'deps/mysql/mysql')

        # configure wants to get at pg_config, so add its path
        utils.system(
            'PATH=%s/bin:$PATH ./configure --with-mysql=%s --with-pgsql' %
            (pgsql_dir, mysql_dir))
        utils.make('-j %d' % utils.count_cpus())
Example #35
0
    def setup(self, tarball='ltp-full-20120104.bz2'):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        os.chdir(self.srcdir)
        ltpbin_dir = os.path.join(self.srcdir, 'bin')
        os.mkdir(ltpbin_dir)

        utils.system('patch -p1 < ../patches/getdents.patch')
        utils.system('patch -p1 < ../patches/cpuid.patch')
        utils.system('patch -p1 < ../patches/kill-ipc.patch')
        utils.system('patch -p1 < ../patches/genpow.patch')
        utils.system('patch -p1 < ../patches/sysctl.patch')
        utils.make('autotools')
        utils.configure('--prefix=%s' % ltpbin_dir)
        utils.make('-j %d all' % utils.count_cpus())
        utils.system('yes n | make SKIP_IDCHECK=1 install')
Example #36
0
def setup(tarball, topdir):
    # FIXME - Waiting to be able to specify dependency.
    #self.job.setup_dep(['pgsql'])
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file('http://pgfoundry.org/frs/download.php/1083/pgpool-II-1.0.1.tar.gz', tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    # FIXEME - Waiting to be able to use self.autodir instead of
    # os.environ['AUTODIR']
    utils.system('./configure --prefix=%s/pgpool --with-pgsql=%s/deps/pgsql/pgsql' \
                    % (topdir, os.environ['AUTODIR']))
    utils.system('make -j %d' % utils.count_cpus())
    utils.system('make install')

    os.chdir(topdir)
Example #37
0
    def _check_post_reboot(self, subdir, running_id=None):
        """
        Function to perform post boot checks such as if the system configuration
        has changed across reboots (specifically, CPUs and partitions).

        @param subdir: The subdir to use in the job.record call.
        @param running_id: An optional running_id to include in the reboot
            failure log message

        @raise JobError: Raised if the current configuration does not match the
            pre-reboot configuration.
        """
        abort_on_mismatch = GLOBAL_CONFIG.get_config_value('CLIENT',
                                                           'abort_on_mismatch',
                                                           type=bool,
                                                           default=False)
        # check to see if any partitions have changed
        partition_list = partition_lib.get_partition_list(self,
                                                          exclude_swap=False)
        mount_info = partition_lib.get_mount_info(partition_list)
        old_mount_info = self._state.get('client', 'mount_info')
        if mount_info != old_mount_info:
            new_entries = mount_info - old_mount_info
            old_entries = old_mount_info - mount_info
            description = ("mounted partitions are different after reboot "
                           "(old entries: %s, new entries: %s)" %
                           (old_entries, new_entries))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir, "reboot.verify_config",
                                            description, running_id=running_id)
                raise error.JobError("Reboot failed: %s" % description)
            else:
                logging.warning(description)

        # check to see if any CPUs have changed
        cpu_count = utils.count_cpus()
        old_count = self._state.get('client', 'cpu_count')
        if cpu_count != old_count:
            description = ('Number of CPUs changed after reboot '
                           '(old count: %d, new count: %d)' %
                           (old_count, cpu_count))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir, 'reboot.verify_config',
                                            description, running_id=running_id)
                raise error.JobError('Reboot failed: %s' % description)
            else:
                logging.warning(description)
Example #38
0
    def _cg_test_shares(self):
        stats = []

        self._cg_set_shares(2)
        pid = self._cg_start_task()
        stats.append(self._parse_pid_stats(pid))

        # load system heavily
        for _ in xrange(utils.count_cpus() * 2 + 1):
            self._cg_start_task(in_cgroup=False)

        time.sleep(self._MIN_SECS)

        stats.append(self._parse_pid_stats(pid))

        self._cg_stop_tasks()
        return stats
Example #39
0
    def _pull_code(self):
        """
        Retrieves code from git repositories.
        """
        params = self.params
        make_jobs = utils.count_cpus()
        cfg = 'PKG_CONFIG_PATH="%s/lib/pkgconfig:%s/share/pkgconfig" ./configure' % (
            self.prefix, self.prefix)

        self.spice_protocol = GitRepo(installer=self, prefix='spice_protocol',
            srcdir='spice-protocol',
            build_steps= ['./autogen.sh',
                          './configure --prefix=%s' % self.prefix,
                          'make clean',
                          'make -j %s' % (make_jobs),
                          'make install'])

        self.spice = GitRepo(installer=self, prefix='spice', srcdir='spice',
            build_steps= ['PKG_CONFIG_PATH="%s/lib/pkgconfig:%s/share/pkgconfig" CXXFLAGS=-Wl,--add-needed ./autogen.sh --prefix=%s' % (self.prefix, self.prefix, self.prefix),
                          'make clean',
                          'make -j %s' % (make_jobs),
                          'make install'])

        self.userspace = GitRepo(installer=self, prefix='user',
            repo_param='user_git_repo', srcdir='kvm_userspace')

        p = os.path.join(self.userspace.srcdir, 'configure')
        self.configure_options = virt_installer.check_configure_options(p)

        cfg = cfg + ' --prefix=%s' % self.prefix
        if "--disable-strip" in self.configure_options:
            cfg += ' --disable-strip'
        if self.extra_configure_options:
            cfg += ' %s' % self.extra_configure_options

        self.userspace.build_steps=[cfg, 'make clean', 'make -j %s' % make_jobs]

        if not self.userspace.repo:
            message = "KVM user git repository path not specified"
            logging.error(message)
            raise error.TestError(message)

        for repo in [self.userspace, self.spice_protocol, self.spice]:
            if not repo.repo:
                continue
            repo.fetch_and_patch()
Example #40
0
def setup(tarball, topdir):
    # FIXME - Waiting to be able to specify dependency.
    #self.job.setup_dep(['pgsql'])
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file(
            'http://pgfoundry.org/frs/download.php/1083/pgpool-II-1.0.1.tar.gz',
            tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    # FIXEME - Waiting to be able to use self.autodir instead of
    # os.environ['AUTODIR']
    utils.configure('--prefix=%s/pgpool --with-pgsql=%s/deps/pgsql/pgsql' \
                    % (topdir, os.environ['AUTODIR']))
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
Example #41
0
    def verify_graphics_i915_min_clock(self):
        """ On i915 systems, check that we get into the lowest clock frequency;
        idle before doing so, and retry every second for 20 seconds."""
        logging.info('Running verify_graphics_i915_min_clock')

        # TODO(benzh): enable once crbug.com/719040 is fixed.
        if self._gpu_type == 'baytrail' and utils.count_cpus() == 4:
            logging.info('Waived min clock check due to crbug.com/719040')
            return ''

        if (utils.get_cpu_soc_family() == 'x86_64'
                and self._gpu_type != 'pinetrail'):
            tries = 0
            found = False
            param_path = self.get_valid_path(CLOCK_PATHS)
            if not param_path:
                return 'CLOCK_PATHS not found.'
            while not found and tries < 80:
                time.sleep(0.25)

                with open(param_path, 'r') as delayinfo_file:
                    for line in delayinfo_file:
                        # This file has a different format depending on the
                        # board, so we parse both. Also, it would be tedious
                        # to add the minimum clock for each board, so instead
                        # we use 650MHz which is the max of the minimum clocks.
                        match = re.search(r'CAGF: (.*)MHz', line)
                        if match and int(match.group(1)) <= 650:
                            found = True
                            break

                        match = re.search(r'current GPU freq: (.*) MHz', line)
                        if match and int(match.group(1)) <= 650:
                            found = True
                            break

                tries += 1

            if not found:
                return self.handle_error('Did not see the min i915 clock. ',
                                         param_path)

        return ''
Example #42
0
class ltp(test.test):
    version = 6

    def _import_site_config(self):
        site_config_path = os.path.join(os.path.dirname(__file__),
                                        'site_config.py')
        if os.path.exists(site_config_path):
            # for some reason __import__ with full path does not work within
            # autotest, although it works just fine on the same client machine
            # in the python interactive shell or separate testcases
            execfile(site_config_path)
            self.site_ignore_tests = locals().get('ignore_tests', [])
        else:
            self.site_ignore_tests = []


    def initialize(self):
        self._import_site_config()
        self.job.require_gcc()


    # http://prdownloads.sourceforge.net/ltp/ltp-full-20091231.tgz
    def setup(self, tarball = 'ltp-full-20091231.tar.bz2'):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        os.chdir(self.srcdir)
        ltpbin_dir = os.path.join(self.srcdir, 'bin')
        os.mkdir(ltpbin_dir)

        utils.system('patch -p1 < ../ltp.patch')

        # comment the capability tests if we fail to load the capability module
        try:
            utils.system('modprobe capability')
        except error.CmdError, detail:
            utils.system('patch -p1 < ../ltp_capability.patch')

        utils.system('cp ../scan.c pan/')   # saves having lex installed
        utils.make('autotools')
        utils.configure('--prefix=%s' % ltpbin_dir)
        utils.make('-j %d all' % utils.count_cpus())
        utils.system('yes n | make SKIP_IDCHECK=1 install')
Example #43
0
    def initialize(self):
        # Check if the kernel supports cpu hotplug
        if utils.running_config():
            utils.check_for_kernel_feature('HOTPLUG_CPU')

        # Check cpu nums, if equals 1, quit.
        if utils.count_cpus() == 1:
            e_msg = 'Single CPU online detected, test not supported.'
            raise error.TestNAError(e_msg)

        # Have a simple and quick check first, FIX me please.
        utils.system('dmesg -c > /dev/null')
        for cpu in utils.cpu_online_map():
            if os.path.isfile('/sys/devices/system/cpu/cpu%s/online' % cpu):
                utils.system('echo 0 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
                utils.system('dmesg -c')
                time.sleep(3)
                utils.system('echo 1 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
                utils.system('dmesg -c')
                time.sleep(3)
Example #44
0
    def __build(self):
        # Number of concurrent build tasks
        make_jobs = utils.count_cpus()
        if self.kmod_srcdir:
            logging.info('Building KVM modules')
            os.chdir(self.kmod_srcdir)
            utils.system('./configure')
            utils.system('make clean')
            utils.system('make sync LINUX=%s' % self.kernel_srcdir)
            utils.system('make -j %s' % make_jobs)

            logging.info('Building KVM userspace code')
            os.chdir(self.userspace_srcdir)
            cfg = './configure --prefix=%s' % self.prefix
            if "--disable-strip" in self.configure_options:
                cfg += ' --disable-strip'
            if self.extra_configure_options:
                cfg = ' %s' % self.extra_configure_options
            utils.system(cfg)
            utils.system('make clean')
            utils.system('make -j %s' % make_jobs)
        else:
            logging.info('Building KVM modules')
            os.chdir(self.userspace_srcdir)
            cfg = './configure --kerneldir=%s' % self.host_kernel_srcdir
            utils.system(cfg)
            utils.system('make clean')
            utils.system('make -j %s -C kernel LINUX=%s sync' %
                         (make_jobs, self.kernel_srcdir))

            logging.info('Building KVM userspace code')
            # This build method (no kvm-kmod) requires that we execute
            # configure again, but now let's use the full command line.
            cfg += ' --prefix=%s' % self.prefix
            if "--disable-strip" in self.configure_options:
                cfg += ' --disable-strip'
            if self.extra_configure_options:
                cfg += ' %s' % self.extra_configure_options
            steps = [cfg, 'make -j %s' % make_jobs]
            for step in steps:
                utils.system(step)
Example #45
0
    def host():
        logging.info("Setup monitor server on host")
        # Kill previous instances of the host load programs, if any
        _kill_host_programs(kill_stress_cmd, kill_monitor_cmd)
        # Cleanup previous log instances
        if os.path.isfile(monitor_log_file_server):
            os.remove(monitor_log_file_server)
        # Opening firewall ports on host
        utils.run("iptables -F", ignore_status=True)

        # Run heartbeat on host
        utils.run(server_setup_cmd % (monitor_dir, threshold, monitor_log_file_server, monitor_port))

        logging.info("Build stress on host")
        # Uncompress and build stress on host
        utils.run(stress_setup_cmd % stress_dir)

        logging.info("Run stress on host")
        # stress_threads = 2 * n_cpus
        threads_host = 2 * utils.count_cpus()
        # Run stress test on host
        utils.run(stress_cmd % (stress_dir, threads_host))
Example #46
0
    def run_once(self, db_type="pgsql", build=1, num_threads=utils.count_cpus(), max_time=60, read_only=0, args=""):
        plib = os.path.join(self.autodir, "deps/pgsql/pgsql/lib")
        mlib = os.path.join(self.autodir, "deps/mysql/mysql/lib/mysql")
        ld_path = utils.prepend_path(plib, utils.environ("LD_LIBRARY_PATH"))
        ld_path = utils.prepend_path(mlib, ld_path)
        os.environ["LD_LIBRARY_PATH"] = ld_path

        # The databases don't want to run as root so run them as nobody
        self.dbuser = "******"
        self.dbuid = pwd.getpwnam(self.dbuser)[2]
        self.sudo = "sudo -u " + self.dbuser + " "

        # Check for nobody user
        try:
            utils.system(self.sudo + "/bin/true")
        except Exception:
            raise error.TestError("Unable to run as nobody")

        if db_type == "pgsql":
            self.execute_pgsql(build, num_threads, max_time, read_only, args)
        elif db_type == "mysql":
            self.execute_mysql(build, num_threads, max_time, read_only, args)
Example #47
0
    def _check_post_reboot(self, subdir, running_id=None):
        """
        Function to perform post boot checks such as if the system configuration
        has changed across reboots (specifically, CPUs and partitions).

        @param subdir: The subdir to use in the job.record call.
        @param running_id: An optional running_id to include in the reboot
            failure log message

        @raise JobError: Raised if the current configuration does not match the
            pre-reboot configuration.
        """
        # check to see if any partitions have changed
        partition_list = partition_lib.get_partition_list(self, exclude_swap=False)
        mount_info = partition_lib.get_mount_info(partition_list)

        old_mount_info = self._state.get("client", "mount_info")
        if mount_info != old_mount_info:
            new_entries = mount_info - old_mount_info
            old_entries = old_mount_info - mount_info
            description = "mounted partitions are different after reboot " "(old entries: %s, new entries: %s)" % (
                old_entries,
                new_entries,
            )
            self._record_reboot_failure(subdir, "reboot.verify_config", description, running_id=running_id)
            raise error.JobError("Reboot failed: %s" % description)

        # check to see if any CPUs have changed
        cpu_count = utils.count_cpus()
        old_count = self._state.get("client", "cpu_count")
        if cpu_count != old_count:
            description = "Number of CPUs changed after reboot " "(old count: %d, new count: %d)" % (
                old_count,
                cpu_count,
            )
            self._record_reboot_failure(subdir, "reboot.verify_config", description, running_id=running_id)
            raise error.JobError("Reboot failed: %s" % description)
Example #48
0
    def run_once(self, args='', stress_length=60):
        if not args:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * utils.count_cpus()

            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start to
            # happen. Let's avoid that.
            mb = utils.freememtotal() + utils.read_from_meminfo('SwapFree') / 2
            memory_per_thread = (mb * 1024) / threads

            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = utils.freespace(self.srcdir)
            file_size_per_thread = 1024**2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

            # Number of CPU workers spinning on sqrt()
            args = '--cpu %d ' % threads
            # Number of IO workers spinning on sync()
            args += '--io %d ' % threads
            # Number of Memory workers spinning on malloc()/free()
            args += '--vm %d ' % threads
            # Amount of memory used per each worker
            args += '--vm-bytes %d ' % memory_per_thread
            # Number of HD workers spinning on write()/ulink()
            args += '--hdd %d ' % threads
            # Size of the files created by each worker in bytes
            args += '--hdd-bytes %d ' % file_size_per_thread
            # Time for which the stress test will run
            args += '--timeout %d ' % stress_length
            # Verbose flag
            args += '--verbose'

        utils.system(self.srcdir + '/src/stress ' + args)
Example #49
0
    def run_once(self, args = '', stress_length=60):
        if not args:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * utils.count_cpus()

            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start to
            # happen. Let's avoid that.
            mb = utils.freememtotal() + utils.read_from_meminfo('SwapFree') / 2
            memory_per_thread = (mb * 1024) / threads

            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = utils.freespace(self.srcdir)
            file_size_per_thread = 1024 ** 2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

            # Number of CPU workers spinning on sqrt()
            args = '--cpu %d ' % threads
            # Number of IO workers spinning on sync()
            args += '--io %d ' % threads
            # Number of Memory workers spinning on malloc()/free()
            args += '--vm %d ' % threads
            # Amount of memory used per each worker
            args += '--vm-bytes %d ' % memory_per_thread
            # Number of HD workers spinning on write()/ulink()
            args += '--hdd %d ' % threads
            # Size of the files created by each worker in bytes
            args += '--hdd-bytes %d ' % file_size_per_thread
            # Time for which the stress test will run
            args += '--timeout %d ' % stress_length
            # Verbose flag
            args += '--verbose'

        utils.system(self.srcdir + '/src/stress ' + args)
Example #50
0
    def run_once(self, db_type = 'pgsql', build = 1, \
                    num_threads = utils.count_cpus(), max_time = 60, \
                    read_only = 0, args = ''):
        plib = os.path.join(self.autodir, 'deps/pgsql/pgsql/lib')
        mlib = os.path.join(self.autodir, 'deps/mysql/mysql/lib/mysql')
        ld_path = utils.prepend_path(plib, utils.environ('LD_LIBRARY_PATH'))
        ld_path = utils.prepend_path(mlib, ld_path)
        os.environ['LD_LIBRARY_PATH'] = ld_path

        # The databases don't want to run as root so run them as nobody
        self.dbuser = '******'
        self.dbuid = pwd.getpwnam(self.dbuser)[2]
        self.sudo = 'sudo -u ' + self.dbuser + ' '

        # Check for nobody user
        try:
            utils.system(self.sudo + '/bin/true')
        except:
            raise error.TestError('Unable to run as nobody')

        if (db_type == 'pgsql'):
            self.execute_pgsql(build, num_threads, max_time, read_only, args)
        elif (db_type == 'mysql'):
            self.execute_mysql(build, num_threads, max_time, read_only, args)
Example #51
0
    def host():
        logging.info("Setup monitor server on host")
        # Kill previous instances of the host load programs, if any
        _kill_host_programs(kill_stress_cmd, kill_monitor_cmd)
        # Cleanup previous log instances
        if os.path.isfile(monitor_log_file_server):
            os.remove(monitor_log_file_server)
        # Opening firewall ports on host
        utils.run("iptables -F", ignore_status=True)

        # Run heartbeat on host
        utils.run(
            server_setup_cmd %
            (monitor_dir, threshold, monitor_log_file_server, monitor_port))

        logging.info("Build stress on host")
        # Uncompress and build stress on host
        utils.run(stress_setup_cmd % stress_dir)

        logging.info("Run stress on host")
        # stress_threads = 2 * n_cpus
        threads_host = 2 * utils.count_cpus()
        # Run stress test on host
        utils.run(stress_cmd % (stress_dir, threads_host))
Example #52
0
    def execute(self, testdir = None, iterations = 10000):
        if not testdir:
            testdir = self.tmpdir
        os.chdir(testdir)
        file = os.path.join(testdir, 'foo')
        # Want to use 3/4 of all memory for each of
        # bash-shared-mapping and usemem
        kilobytes = (3 * utils.memtotal()) / 4

        # Want two usemem -m megabytes in parallel in background.
        pid = [None, None]
        usemem = os.path.join(self.srcdir, 'usemem')
        args = ('usemem', '-N', '-m', '%d' % (kilobytes / 1024))
        # print_to_tty ('2 x ' + ' '.join(args))
        for i in (0,1):
            pid[i] = os.spawnv(os.P_NOWAIT, usemem, args)

        cmd = "%s/bash-shared-mapping %s %d -t %d -n %d" % \
                        (self.srcdir, file, kilobytes,
                         utils.count_cpus(), iterations)
        os.system(cmd)

        for i in (0, 1):
            os.kill(pid[i], signal.SIGKILL)
Example #53
0
    def run_once(self):
        """
        Run each benchmark twice, with different number of threads.

        A sanity check is made on each benchmark executed:
        The ratio between the times
        time_ratio = time_one_thrd / time_full_thrds

        Has to be contained inside an envelope:
        upper_bound = full_thrds * (1 + (1/n_cpus))
        lower_bound = full_thrds * (1 - (1/n_cpus))

        Otherwise, we throw an exception (this test might be running under a
        virtual machine and sanity check failure might mean bugs on smp
        implementation).
        """
        os.chdir(self.srcdir)

        # get the tests to run
        test_list = self.tests.split()

        if len(test_list) == 0:
            raise error.TestError('No tests (benchmarks) provided. Exit.')

        for itest in test_list:
            itest_cmd = os.path.join('NPB3.3-OMP/bin/', itest)
            try:
                itest = utils.run(itest_cmd)
            except Exception:
                logging.error('NPB benchmark %s has failed. Output: %s',
                              itest_cmd, itest.stdout)
                self.n_fail += 1
            logging.debug(itest.stdout)

            # Get the number of threads that the test ran
            # (which is supposed to be equal to the number of system cores)
            m = re.search('Total threads\s*=\s*(.*)\n', itest.stdout)

            # Gather benchmark results
            ts = re.search('Time in seconds\s*=\s*(.*)\n', itest.stdout)
            mt = re.search('Mop/s total\s*=\s*(.*)\n', itest.stdout)
            mp = re.search('Mop/s/thread\s*=\s*(.*)\n', itest.stdout)

            time_seconds = float(ts.groups()[0])
            mops_total = float(mt.groups()[0])
            mops_per_thread = float(mp.groups()[0])

            logging.info('Test: %s', itest_cmd)
            logging.info('Time (s): %s', time_seconds)
            logging.info('Total operations executed (mops/s): %s', mops_total)
            logging.info('Total operations per thread (mops/s/thread): %s',
                         mops_per_thread)

            self.write_test_keyval({'test': itest_cmd})
            self.write_test_keyval({'time_seconds': time_seconds})
            self.write_test_keyval({'mops_total': mops_total})
            self.write_test_keyval({'mops_per_thread': mops_per_thread})

            # A little extra sanity check comes handy
            if int(m.groups()[0]) != utils.count_cpus():
                raise error.TestError("NPB test suite evaluated the number "
                                      "of threads incorrectly: System appears "
                                      "to have %s cores, but %s threads were "
                                      "executed.")

            # We will use this integer with float point vars later.
            full_thrds = float(m.groups()[0])

            # get duration for full_threads running.
            m = re.search('Time in seconds\s*=\s*(.*)\n', itest.stdout)
            time_full_thrds = float(m.groups()[0])

            # repeat the execution with single thread.
            itest_single_cmd = ''.join(['OMP_NUM_THREADS=1 ', itest_cmd])
            try:
                itest_single = utils.run(itest_single_cmd)
            except Exception:
                logging.error(
                    'NPB benchmark single thread %s has failed. '
                    'Output: %s', itest_single_cmd, itest_single.stdout)
                self.n_fail += 1

            m = re.search('Time in seconds\s*=\s*(.*)\n', itest_single.stdout)
            time_one_thrd = float(m.groups()[0])

            # check durations
            ratio = self.ratio
            time_ratio = float(time_one_thrd / time_full_thrds)
            upper_bound = full_thrds * (1 + ratio)
            lower_bound = full_thrds * (1 - ratio)
            logging.debug('Time ratio for %s: %s', itest_cmd, time_ratio)
            logging.debug('Upper bound: %s', upper_bound)
            logging.debug('Lower bound: %s', lower_bound)

            violates_upper_bound = time_ratio > upper_bound
            violates_lower_bound = time_ratio < lower_bound
            if violates_upper_bound or violates_lower_bound:
                logging.error('NPB benchmark %s failed sanity check '
                              '- time ratio outside bounds' % itest_cmd)
                self.n_fail += 1
            else:
                logging.debug('NPB benchmark %s sanity check PASS' % itest_cmd)
 def cpu_count(self):
     return utils.count_cpus()  # use total system count
Example #55
0
    def build(self, make_opts = '', logfile = '', extraversion='autotest'):
        """build xen

        make_opts
                additional options to make, if any
        """
        self.log('running build')
        os_dep.commands('gcc', 'make')
        # build xen with extraversion flag
        os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
        if logfile == '':
            logfile = os.path.join(self.log_dir, 'xen_build')
        os.chdir(self.build_dir)
        self.log('log_dir: %s ' % self.log_dir)
        self.job.logging.tee_redirect_debug_dir(self.log_dir, log_name=logfile)

        # build xen hypervisor and user-space tools
        targets = ['xen', 'tools']
        threads = 2 * utils.count_cpus()
        for t in targets:
            build_string = 'make -j %d %s %s' % (threads, make_opts, t)
            self.log('build_string: %s' % build_string)
            utils.system(build_string)

        # make a kernel job out of the kernel from the xen src if one isn't provided
        if self.kjob is None:
            # get xen kernel tree ready
            self.log("prep-ing xen'ified kernel source tree")
            utils.system('make prep-kernels')

            v = self.get_xen_kernel_build_ver()
            self.log('building xen kernel version: %s' % v)

            # build xen-ified kernel in xen tree
            kernel_base_tree = os.path.join(self.build_dir, \
                    'linux-%s' % self.get_xen_kernel_build_ver())

            self.log('kernel_base_tree = %s' % kernel_base_tree)
            # fix up XENGUEST value in EXTRAVERSION; we can't have
            # files with '$(XENGEUST)' in the name, =(
            self.fix_up_xen_kernel_makefile(kernel_base_tree)

            # make the kernel job
            self.kjob = self.job.kernel(kernel_base_tree)

            # hardcoding dom0 config (no modules for testing, yay!)
            # FIXME: probe host to determine which config to pick
            c = self.build_dir + '/buildconfigs/linux-defconfig_xen0_x86_32'
            self.log('using kernel config: %s ' % c)
            self.kjob.config(c)

            # Xen's kernel tree sucks; doesn't use bzImage, but vmlinux
            self.kjob.set_build_target('vmlinuz')

            # also, the vmlinuz is not out in arch/*/boot, ARGH! more hackery
            self.kjob.set_build_image(self.job.tmpdir + '/build/linux/vmlinuz')

        self.kjob.build()

        self.job.logging.restore()

        xen_version = self.get_xen_build_ver()
        self.log('BUILD VERSION: Xen: %s Kernel:%s' % \
                        (xen_version, self.kjob.get_kernel_build_ver()))
    def run_once(self,
                 seconds=60,
                 free_memory_fraction=0.95,
                 wait_secs=0,
                 disk_thread=True):
        '''
        Args:
          free_memory_fraction: Fraction of free memory (as determined by
            utils.freememtotal()) to use.
          wait_secs: time to wait in seconds before executing stressapptest.
          disk_thread: also stress disk using -f argument of stressapptest.
        '''
        assert free_memory_fraction > 0
        assert free_memory_fraction < 1

        # Wait other parallel tests memory usage to settle to a stable value, so
        # stressapptest will not claim too much memory.
        if wait_secs:
            time.sleep(wait_secs)

        # Allow shmem access to all of memory. This is used for 32 bit
        # access to > 1.4G. Virtual address space limitation prevents
        # directly mapping the memory.
        utils.run('mount -o remount,size=100% /dev/shm')
        cpus = max(utils.count_cpus(), 1)
        mbytes = max(int(utils.freememtotal() * free_memory_fraction / 1024),
                     512)
        # Even though shared memory allows us to go past the 1.4G
        # limit, ftruncate still limits us to 2G max on 32 bit systems.
        if sys.maxsize < 2**32 and mbytes > 2047:
            mbytes = 2047
        # SAT should use as much memory as possible, while still
        # avoiding OOMs and allowing the kernel to run, so that
        # the maximum amoun tof memory can be tested.
        args = ' -M %d' % mbytes  # megabytes to test
        # The number of seconds under test can be chosen to fit into
        # manufacturing or test flow. 60 seconds gives several
        # passes and several patterns over each memory location
        # and should catch clearly fautly memeory. 4 hours
        # is an effective runin test, to catch lower frequency errors.
        args += ' -s %d' % seconds  # seconds to run
        # One memory copy thread per CPU should keep the memory bus
        # as saturated as possible, while keeping each CPU busy as well.
        args += ' -m %d' % cpus  # memory copy threads.
        # SSE copy and checksum increases the rate at which the CPUs
        # can drive memory, as well as stressing the CPU.
        args += ' -W'  # Use SSE optimizatin in memory threads.
        # File IO threads allow stressful transactions over the
        # south bridge and SATA, as well as potentially finding SSD
        # or disk cache problems. Two threads ensure multiple
        # outstanding transactions to the disk, if supported.
        if disk_thread:
            args += ' -f sat.diskthread.a'  # disk thread
            args += ' -f sat.diskthread.b'

        if utils.get_board() == 'link':
            args += memory_channel_args_snb_bdw(
                [['U1', 'U2', 'U3', 'U4'],
                 ['U6', 'U5', 'U7', 'U8']])  # yes, U6 is actually before U5

        if utils.get_board() == 'samus':
            args += memory_channel_args_snb_bdw([['U11', 'U12'],
                                                 ['U13', 'U14']])

        # 'stressapptest' is provided by dev-util/stressapptest, pre-installed
        # in test images.
        sat = utils.run('stressapptest' + args)
        logging.debug(sat.stdout)
        if not re.search('Status: PASS', sat.stdout):
            raise error.TestFail(sat.stdout)