示例#1
0
 def download(self):
     '''
     Copies patch files from remote locations to the source directory
     '''
     for patch in self.patches:
         utils.get_file(
             patch, os.path.join(self.source_dir, os.path.basename(patch)))
示例#2
0
def auto_kernel(job, path, subdir, tmp_dir, build_dir, leave=False):
    """
    Create a kernel object, dynamically selecting the appropriate class to use
    based on the path provided.
    """
    kernel_paths = [preprocess_path(path)]
    if kernel_paths[0].endswith('.list'):
    # Fetch the list of packages to install
        kernel_list = os.path.join(tmp_dir, 'kernel.list')
        utils.get_file(kernel_paths[0], kernel_list)
        kernel_paths = [p.strip() for p in open(kernel_list).readlines()]

    if kernel_paths[0].endswith('.rpm'):
        rpm_paths = []
        for kernel_path in kernel_paths:
            if os.path.exists(kernel_path):
                rpm_paths.append(kernel_path)
            else:
                # Fetch the rpm into the job's packages directory and pass it to
                # rpm_kernel
                rpm_name = os.path.basename(kernel_path)

                # If the preprocessed path (kernel_path) is only a name then
                # search for the kernel in all the repositories, else fetch the
                # kernel from that specific path.
                job.pkgmgr.fetch_pkg(rpm_name, os.path.join(job.pkgdir, rpm_name),
                                     repo_url=os.path.dirname(kernel_path))

                rpm_paths.append(os.path.join(job.pkgdir, rpm_name))
        return rpm_kernel_vendor(job, rpm_paths, subdir)
    else:
        if len(kernel_paths) > 1:
            raise error.TestError("don't know what to do with more than one non-rpm kernel file")
        return kernel(job, kernel_paths[0], subdir, tmp_dir, build_dir, leave)
示例#3
0
    def _get(self, url, dst):
        '''
        Download a given file to a destination path.

        This is a wrapper to utils.get_file(), that will keep trying to
        download the file from the URL for the time defined in the
        RETRY_TIMEOUT class attribute, in step intervals defined in the
        RETRY_STEP class attribute.

        :param url: Universal Resource Location of the source file
        :param dst: Destination path
        :raise: class `KojiDownloadError`
        '''
        success = False
        last_error = ""
        end_time = time.time() + self.RETRY_TIMEOUT

        while time.time() < end_time:
            try:
                utils.get_file(url, dst)
                success = True
                break
            except Exception, e:
                last_error = str(e)
                logging.error("Download failed: %s", last_error)
                logging.error("Retrying after %s seconds...",
                              self.RETRY_STEP)
                if os.path.isfile(dst):
                    os.unlink(dst)
                time.sleep(self.RETRY_STEP)
    def run_once(self, options="", testlist=""):
        """
        Passes the appropriate parameters to the testsuite.

        # Usage: $0 [options] [testlist]
        # check options
        #     -raw                test raw (default)
        #     -cow                test cow
        #     -qcow               test qcow
        #     -qcow2              test qcow2
        #     -vpc                test vpc
        #     -vmdk               test vmdk
        #     -qed                test qed
        #     -xdiff              graphical mode diff
        #     -nocache            use O_DIRECT on backing file
        #     -misalign           misalign memory allocations
        #     -n                  show me, do not run tests
        #     -T                  output timestamps
        #     -r                  randomize test order
        #
        # testlist options
        #     -g group[,group...] include tests from these groups
        #     -x group[,group...] exclude tests from these groups
        #     NNN                 include test NNN
        #     NNN-NNN             include test range (eg. 012-021)

        :param qemu_path: Optional qemu install path.
        :param options: Options accepted by the testsuite.
        :param testlist: List of tests that will be executed (by default, all
                testcases will be executed).
        """
        os.chdir(self.srcdir)
        test_dir = os.path.join(self.srcdir, "scratch")
        if not os.path.exists(test_dir):
            os.mkdir(test_dir)
        cmd = "./check"
        if options:
            cmd += " " + options
        if testlist:
            cmd += " " + testlist

        try:
            try:
                result = utils.system(cmd)
            except error.CmdError, e:
                failed_cases = re.findall("Failures: (\d+)", str(e))
                for num in failed_cases:
                    failed_name = num + ".out.bad"
                    src = os.path.join(self.srcdir, failed_name)
                    dest = os.path.join(self.resultsdir, failed_name)
                    utils.get_file(src, dest)
                if failed_cases:
                    e_msg = "Qemu-iotests failed. Failed cases: %s" % failed_cases
                else:
                    e_msg = "Qemu-iotests failed"
                raise error.TestFail(e_msg)
        finally:
            src = os.path.join(self.srcdir, "check.log")
            dest = os.path.join(self.resultsdir, "check.log")
            utils.get_file(src, dest)
示例#5
0
def auto_kernel(job, path, subdir, tmp_dir, build_dir, leave=False):
    """
    Create a kernel object, dynamically selecting the appropriate class to use
    based on the path provided.
    """
    kernel_paths = [preprocess_path(path)]
    if kernel_paths[0].endswith('.list'):
        # Fetch the list of packages to install
        kernel_list = os.path.join(tmp_dir, 'kernel.list')
        utils.get_file(kernel_paths[0], kernel_list)
        kernel_paths = [p.strip() for p in open(kernel_list).readlines()]

    if kernel_paths[0].endswith('.rpm'):
        rpm_paths = []
        for kernel_path in kernel_paths:
            if os.path.exists(kernel_path):
                rpm_paths.append(kernel_path)
            else:
                # Fetch the rpm into the job's packages directory and pass it to
                # rpm_kernel
                rpm_name = os.path.basename(kernel_path)

                # If the preprocessed path (kernel_path) is only a name then
                # search for the kernel in all the repositories, else fetch the
                # kernel from that specific path.
                job.pkgmgr.fetch_pkg(rpm_name, os.path.join(job.pkgdir, rpm_name),
                                     repo_url=os.path.dirname(kernel_path))

                rpm_paths.append(os.path.join(job.pkgdir, rpm_name))
        return rpm_kernel_vendor(job, rpm_paths, subdir)
    else:
        if len(kernel_paths) > 1:
            raise error.TestError("don't know what to do with more than one non-rpm kernel file")
        return kernel(job, kernel_paths[0], subdir, tmp_dir, build_dir, leave)
示例#6
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file(
            'http://downloads.mysql.com/archives/mysql-5.0/mysql-5.0.45.tar.gz',
            tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure('--prefix=%s/mysql --enable-thread-safe-client' % topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    #
    # MySQL doesn't create this directory on it's own.
    # This is where database logs and files are created.
    #
    try:
        os.mkdir(topdir + '/mysql/var')
    except Exception:
        pass
    #
    # Initialize the database.
    #
    utils.system('%s/mysql/bin/mysql_install_db' % topdir)

    os.chdir(topdir)
示例#7
0
    def _get(self, url, dst):
        '''
        Download a given file to a destination path.

        This is a wrapper to utils.get_file(), that will keep trying to
        download the file from the URL for the time defined in the
        RETRY_TIMEOUT class attribute, in step intervals defined in the
        RETRY_STEP class attribute.

        :param url: Universal Resource Location of the source file
        :param dst: Destination path
        :raise: class `KojiDownloadError`
        '''
        success = False
        last_error = ""
        end_time = time.time() + self.RETRY_TIMEOUT

        while time.time() < end_time:
            try:
                utils.get_file(url, dst)
                success = True
                break
            except Exception, e:
                last_error = str(e)
                logging.error("Download failed: %s", last_error)
                logging.error("Retrying after %s seconds...",
                              self.RETRY_STEP)
                if os.path.isfile(dst):
                    os.unlink(dst)
                time.sleep(self.RETRY_STEP)
示例#8
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file('http://downloads.mysql.com/archives/mysql-5.0/mysql-5.0.45.tar.gz', tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure('--prefix=%s/mysql --enable-thread-safe-client'
                    % topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    #
    # MySQL doesn't create this directory on it's own.
    # This is where database logs and files are created.
    #
    try:
        os.mkdir(topdir + '/mysql/var')
    except Exception:
        pass
    #
    # Initialize the database.
    #
    utils.system('%s/mysql/bin/mysql_install_db' % topdir)

    os.chdir(topdir)
示例#9
0
 def download(self):
     '''
     Copies patch files from remote locations to the source directory
     '''
     for patch in self.patches:
         utils.get_file(patch, os.path.join(self.source_dir,
                                            os.path.basename(patch)))
示例#10
0
    def __init__(self,
                 job,
                 build_dir,
                 config_dir,
                 orig_file,
                 overrides,
                 defconfig=False,
                 name=None,
                 make=None):
        self.build_dir = build_dir
        self.config_dir = config_dir
        self.orig_config = os.path.join(config_dir, 'config.orig')
        self.running_config = utils.running_config()

        # 1. Get original config file
        self.build_config = os.path.join(build_dir, '.config')
        if (orig_file == '' and not defconfig
                and not make):  # use user default
            s = job.config_get("kernel.default_config_set")
            defconf = None
            if s and name:
                defconf = config_by_name(name, s)
            if not defconf:
                defconf = job.config_get("kernel.default_config")
            if defconf:
                orig_file = defconf
            else:
                if self.running_config is not None:
                    orig_file = self.running_config
        if (orig_file == '' and not make and defconfig):  # use defconfig
            make = 'defconfig'
        if (orig_file == '' and make):  # use the config command
            logging.debug("using %s to configure kernel" % make)
            os.chdir(build_dir)
            make_return = utils.system('make %s > /dev/null' % make)
            self.config_record(make)
            if make_return:
                raise error.TestError('make %s failed' % make)
        else:
            logging.debug("using %s to configure kernel", orig_file)
            utils.get_file(orig_file, self.orig_config)
            self.update_config(self.orig_config, self.orig_config + '.new')
            diff_configs(self.orig_config, self.orig_config + '.new')

        # 2. Apply overrides
        if overrides:
            logging.debug("using %s to re-configure kernel", overrides)
            self.over_config = os.path.join(config_dir, 'config.over')
            overrides_local = self.over_config + '.changes'
            utils.get_file(overrides, overrides_local)
            apply_overrides(self.build_config, overrides_local,
                            self.over_config)
            self.update_config(self.over_config, self.over_config + '.new')
            diff_configs(self.over_config, self.over_config + '.new')
        else:
            self.over_config = self.orig_config
    def __init__(self, job, build_dir, config_dir, orig_file, overrides,
                 defconfig=False, name=None, make=None):
        self.build_dir = build_dir
        self.config_dir = config_dir
        self.orig_config = os.path.join(config_dir, 'config.orig')
        running_config = utils.running_config()
        if running_config is None:
            running_config = ''
        if running_config.endswith('.gz'):
            tmp_running_config = '/tmp/running_config'
            utils.system('cat %s | gunzip > %s' %
                         (running_config, tmp_running_config))
            running_config = tmp_running_config

        self.running_config = running_config

        # 1. Get original config file
        self.build_config = os.path.join(build_dir, '.config')
        if (orig_file == '' and not defconfig and not make): # use user default
            s = job.config_get("kernel.default_config_set")
            defconf = None
            if s and name:
                defconf = config_by_name(name, s)
            if not defconf:
                defconf = job.config_get("kernel.default_config")
            if defconf:
                orig_file = defconf
            else:
                if self.running_config:
                    orig_file = self.running_config
        if (orig_file == '' and not make and defconfig): # use defconfig
            make = 'defconfig'
        if (orig_file == '' and make): # use the config command
            logging.debug("using %s to configure kernel" % make)
            os.chdir(build_dir)
            make_return = utils.system('make %s > /dev/null' % make)
            self.config_record(make)
            if make_return:
                raise error.TestError('make %s failed' % make)
        else:
            logging.debug("using %s to configure kernel", orig_file)
            utils.get_file(orig_file, self.orig_config)
            self.update_config(self.orig_config, self.orig_config + '.new')
            diff_configs(self.orig_config, self.orig_config + '.new')

        # 2. Apply overrides
        if overrides:
            logging.debug("using %s to re-configure kernel", overrides)
            self.over_config = os.path.join(config_dir, 'config.over')
            overrides_local = self.over_config + '.changes'
            utils.get_file(overrides, overrides_local)
            apply_overrides(self.build_config, overrides_local, self.over_config)
            self.update_config(self.over_config, self.over_config + '.new')
            diff_configs(self.over_config, self.over_config + '.new')
        else:
            self.over_config = self.orig_config
示例#12
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file('ftp://ftp-archives.postgresql.org/pub/source/v8.3.1/postgresql-8.3.1.tar.bz2', tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure('--without-readline --without-zlib --enable-debug --prefix=%s/pgsql' % topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
示例#13
0
    def __init__(self,
                 job,
                 build_dir,
                 config_dir,
                 orig_file,
                 overrides,
                 defconfig=False,
                 name=None,
                 make=None):
        self.build_dir = build_dir
        self.config_dir = config_dir

        #       1. Get original config file
        self.build_config = build_dir + '/.config'
        if (orig_file == '' and not defconfig
                and not make):  # use user default
            set = job.config_get("kernel.default_config_set")
            defconf = None
            if set and name:
                defconf = config_by_name(name, set)
            if not defconf:
                defconf = job.config_get("kernel.default_config")
            if defconf:
                orig_file = defconf
        if (orig_file == '' and not make and defconfig):  # use defconfig
            make = 'defconfig'
        if (orig_file == '' and make):  # use the config command
            print "kernel_config: using " + make + " to configure kernel"
            os.chdir(build_dir)
            make_return = utils.system('make %s > /dev/null' % make)
            self.config_record(make)
            if (make_return):
                raise error.TestError('make % failed' % make)
        else:
            print "kernel_config: using " + orig_file + \
                                            " to configure kernel"
            self.orig_config = config_dir + '/config.orig'
            utils.get_file(orig_file, self.orig_config)
            self.update_config(self.orig_config, self.orig_config + '.new')
            diff_configs(self.orig_config, self.orig_config + '.new')

        #       2. Apply overrides
        if overrides:
            print "kernel_config: using " + overrides + \
                                            " to re-configure kernel"
            self.over_config = config_dir + '/config.over'
            overrides_local = self.over_config + '.changes'
            utils.get_file(overrides, overrides_local)
            apply_overrides(self.build_config, overrides_local,
                            self.over_config)
            self.update_config(self.over_config, self.over_config + '.new')
            diff_configs(self.over_config, self.over_config + '.new')
        else:
            self.over_config = self.orig_config
示例#14
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file('http://www.packetfactory.net/libnet/dist/libnet.tar.gz',
                       tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure('--prefix=%s/libnet' % topdir)
    utils.make()
    utils.make('install')

    os.chdir(topdir)
示例#15
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file(
            'http://www.packetfactory.net/libnet/dist/libnet.tar.gz', tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure('--prefix=%s/libnet' % topdir)
    utils.make()
    utils.make('install')

    os.chdir(topdir)
示例#16
0
 def get_patches(self, patches):
     """fetch the patches to the local src_dir"""
     local_patches = []
     for patch in patches:
         dest = os.path.join(self.src_dir, os.path.basename(patch))
         # FIXME: this isn't unique. Append something to it
         # like wget does if it's not there?
         print "get_file %s %s %s %s" % (patch, dest, self.src_dir, os.path.basename(patch))
         utils.get_file(patch, dest)
         # probably safer to use the command, not python library
         md5sum = utils.system_output("md5sum " + dest).split()[0]
         local_patches.append((patch, dest, md5sum))
     return local_patches
示例#17
0
 def get_patches(self, patches):
     """fetch the patches to the local src_dir"""
     local_patches = []
     for patch in patches:
         dest = os.path.join(self.src_dir, os.path.basename(patch))
         # FIXME: this isn't unique. Append something to it
         # like wget does if it's not there?
         print "get_file %s %s %s %s" % (patch, dest, self.src_dir,
                                         os.path.basename(patch))
         utils.get_file(patch, dest)
         # probably safer to use the command, not python library
         md5sum = utils.system_output('md5sum ' + dest).split()[0]
         local_patches.append((patch, dest, md5sum))
     return local_patches
示例#18
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file(
            'ftp://ftp-archives.postgresql.org/pub/source/v8.3.1/postgresql-8.3.1.tar.bz2',
            tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure(
        '--without-readline --without-zlib --enable-debug --prefix=%s/pgsql' %
        topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
示例#19
0
    def _kernel_install_src(self, base_tree, config, config_list=None,
                           patch_list=None, need_reboot=True):
        if not utils.is_url(base_tree):
            base_tree = os.path.join(self.bindir, base_tree)
        if not utils.is_url(config):
            config = os.path.join(self.bindir, config)
        kernel = self.job.kernel(base_tree, self.outputdir)
        if patch_list:
            patches = []
            for p in patch_list.split():
                # Make sure all the patches are in local.
                if not utils.is_url(p):
                    continue
                dst = os.path.join(self.bindir, os.path.basename(p))
                local_patch = utils.get_file(p, dst)
                patches.append(local_patch)
            kernel.patch(*patches)
        kernel.config(config, config_list)
        kernel.build()
        kernel.install()

        if need_reboot:
            kernel.boot()
        else:
            kernel.add_to_bootloader()
示例#20
0
    def execute(self):
        '''
        Executes all action this helper class is suposed to perform

        This is the main entry point method for this class, and all other
        helper classes.

        This implementation fetches the remote tar file and then extracts
        it using the functionality present in the parent class.
        '''
        name = os.path.basename(self.source)
        base_dest = os.path.dirname(self.destination_dir)
        dest = os.path.join(base_dest, name)
        utils.get_file(self.source, dest)
        self.source = dest
        self.extract()
示例#21
0
    def _kernel_install_rpm(self,
                            rpm_file,
                            kernel_deps_rpms=None,
                            need_reboot=True):
        """
        Install kernel rpm package.
        The rpm packages should be a url or put in this test's
        directory (client/test/kernelinstall)
        """
        if kernel_deps_rpms:
            logging.info("Installing kernel dependencies.")
            if isinstance(kernel_deps_rpms, list):
                kernel_deps_rpms = " ".join(kernel_deps_rpms)
            utils.run('rpm -U --force %s' % kernel_deps_rpms)

        dst = os.path.join("/tmp", os.path.basename(rpm_file))
        knl = utils.get_file(rpm_file, dst)
        kernel = self.job.kernel(knl)
        logging.info("Installing kernel %s", rpm_file)
        kernel.install(install_vmlinux=False)

        if need_reboot:
            kernel.boot()
        else:
            kernel.add_to_bootloader()
示例#22
0
    def make_guest_kernel(self):
        '''
        Runs "make", using a single job
        '''
        os.chdir(self.source)
        logging.info("Building guest kernel")
        logging.debug("Kernel config is %s" % self.config)
        utils.get_file(self.config, '.config')

        # FIXME currently no support for builddir
        # run old config
        utils.system('yes "" | make oldconfig > /dev/null')
        parallel_make_jobs = utils.count_cpus()
        make_command = "make -j %s %s" % (parallel_make_jobs, self.build_target)
        logging.info("Running parallel make on src dir")
        utils.system(make_command)
示例#23
0
    def get_scratch_pkgs(self, pkg, dst_dir, arch=None):
        '''
        Download the packages from a scratch build

        :type pkg: KojiScratchPkgSpec
        :param pkg: a scratch package specification
        :type dst_dir: string
        :param dst_dir: the destination directory, where the downloaded
                packages will be saved on
        :type arch: string
        :param arch: packages built for this architecture, but also including
                architecture independent (noarch) packages
        '''
        rpm_urls = self.get_scratch_pkg_urls(pkg, arch)
        for url in rpm_urls:
            utils.get_file(url, os.path.join(dst_dir, os.path.basename(url)))
    def _kernel_install_src(self, base_tree, config, config_list=None,
                            patch_list=None, need_reboot=True):
        if not utils.is_url(base_tree):
            base_tree = os.path.join(self.bindir, base_tree)
        if not utils.is_url(config):
            config = os.path.join(self.bindir, config)
        kernel = self.job.kernel(base_tree, self.outputdir)
        if patch_list:
            patches = []
            for p in patch_list:
                # Make sure all the patches are in local.
                if not utils.is_url(p):
                    continue
                dst = os.path.join(self.bindir, os.path.basename(p))
                local_patch = utils.get_file(p, dst)
                patches.append(local_patch)
            kernel.patch(*patches)
        kernel.config(config, config_list)
        kernel.build()
        kernel.install()

        if need_reboot:
            kernel.boot()
        else:
            kernel.add_to_bootloader()
示例#25
0
    def execute(self):
        '''
        Executes all action this helper class is suposed to perform

        This is the main entry point method for this class, and all other
        helper classes.

        This implementation fetches the remote tar file and then extracts
        it using the functionality present in the parent class.
        '''
        name = os.path.basename(self.source)
        base_dest = os.path.dirname(self.destination_dir)
        dest = os.path.join(base_dest, name)
        utils.get_file(self.source, dest)
        self.source = dest
        self.extract()
示例#26
0
def setup(tarball, topdir):
    # FIXME - Waiting to be able to specify dependency.
    # self.job.setup_dep(['pgsql'])
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file('http://pgfoundry.org/frs/download.php/1083/pgpool-II-1.0.1.tar.gz', tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    # FIXEME - Waiting to be able to use self.autodir instead of
    # os.environ['AUTODIR']
    utils.configure('--prefix=%s/pgpool --with-pgsql=%s/deps/pgsql/pgsql'
                    % (topdir, os.environ['AUTODIR']))
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
示例#27
0
    def make_guest_kernel(self):
        '''
        Runs "make", using a single job
        '''
        os.chdir(self.source)
        logging.info("Building guest kernel")
        logging.debug("Kernel config is %s" % self.config)
        utils.get_file(self.config, '.config')

        # FIXME currently no support for builddir
        # run old config
        utils.system('yes "" | make oldconfig > /dev/null')
        parallel_make_jobs = utils.count_cpus()
        make_command = "make -j %s %s" % (parallel_make_jobs,
                                          self.build_target)
        logging.info("Running parallel make on src dir")
        utils.system(make_command)
示例#28
0
    def get_scratch_pkgs(self, pkg, dst_dir, arch=None):
        '''
        Download the packages from a scratch build

        :type pkg: KojiScratchPkgSpec
        :param pkg: a scratch package specification
        :type dst_dir: string
        :param dst_dir: the destination directory, where the downloaded
                packages will be saved on
        :type arch: string
        :param arch: packages built for this architecture, but also including
                architecture independent (noarch) packages
        '''
        rpm_urls = self.get_scratch_pkg_urls(pkg, arch)
        for url in rpm_urls:
            utils.get_file(url,
                           os.path.join(dst_dir, os.path.basename(url)))
示例#29
0
    def __init__(self, job, build_dir, config_dir, orig_file,
                            overrides, defconfig = False, name = None, make = None):
        self.build_dir = build_dir
        self.config_dir = config_dir

        #       1. Get original config file
        self.build_config = build_dir + '/.config'
        if (orig_file == '' and not defconfig and not make):    # use user default
            set = job.config_get("kernel.default_config_set")
            defconf = None
            if set and name:
                defconf = config_by_name(name, set)
            if not defconf:
                defconf = job.config_get("kernel.default_config")
            if defconf:
                orig_file = defconf
        if (orig_file == '' and not make and defconfig):        # use defconfig
            make = 'defconfig'
        if (orig_file == '' and make): # use the config command
            print "kernel_config: using " + make + " to configure kernel"
            os.chdir(build_dir)
            make_return = utils.system('make %s > /dev/null' % make)
            self.config_record(make)
            if (make_return):
                raise error.TestError('make % failed' % make)
        else:
            print "kernel_config: using " + orig_file + \
                                            " to configure kernel"
            self.orig_config = config_dir + '/config.orig'
            utils.get_file(orig_file, self.orig_config)
            self.update_config(self.orig_config, self.orig_config+'.new')
            diff_configs(self.orig_config, self.orig_config+'.new')


        #       2. Apply overrides
        if overrides:
            print "kernel_config: using " + overrides + \
                                            " to re-configure kernel"
            self.over_config = config_dir + '/config.over'
            overrides_local = self.over_config + '.changes'
            utils.get_file(overrides, overrides_local)
            apply_overrides(self.build_config, overrides_local, self.over_config)
            self.update_config(self.over_config, self.over_config+'.new')
            diff_configs(self.over_config, self.over_config+'.new')
        else:
            self.over_config = self.orig_config
示例#30
0
    def get_kernel_tree(self, base_tree):
        """Extract/link base_tree to self.build_dir"""

        # if base_tree is a dir, assume uncompressed kernel
        if os.path.isdir(base_tree):
            print 'Symlinking existing kernel source'
            if os.path.islink(self.build_dir):
                os.remove(self.build_dir)
            os.symlink(base_tree, self.build_dir)

        # otherwise, extract tarball
        else:
            os.chdir(os.path.dirname(self.src_dir))
            # Figure out local destination for tarball
            tarball = os.path.join(self.src_dir, os.path.basename(base_tree.split(';')[0]))
            utils.get_file(base_tree, tarball)
            print 'Extracting kernel tarball:', tarball, '...'
            utils.extract_tarball_to_dir(tarball, self.build_dir)
示例#31
0
    def get_kernel_tree(self, base_tree):
        """Extract/link base_tree to self.build_dir"""

        # if base_tree is a dir, assume uncompressed kernel
        if os.path.isdir(base_tree):
            print 'Symlinking existing kernel source'
            if os.path.islink(self.build_dir):
                os.remove(self.build_dir)
            os.symlink(base_tree, self.build_dir)

        # otherwise, extract tarball
        else:
            os.chdir(os.path.dirname(self.src_dir))
            # Figure out local destination for tarball
            tarball = os.path.join(self.src_dir, os.path.basename(base_tree.split(';')[0]))
            utils.get_file(base_tree, tarball)
            print 'Extracting kernel tarball:', tarball, '...'
            utils.extract_tarball_to_dir(tarball, self.build_dir)
示例#32
0
def setup(tarball, topdir):
    # FIXME - Waiting to be able to specify dependency.
    # self.job.setup_dep(['pgsql'])
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file(
            'http://pgfoundry.org/frs/download.php/1083/pgpool-II-1.0.1.tar.gz',
            tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    # FIXEME - Waiting to be able to use self.autodir instead of
    # os.environ['AUTODIR']
    utils.configure('--prefix=%s/pgpool --with-pgsql=%s/deps/pgsql/pgsql' %
                    (topdir, os.environ['AUTODIR']))
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
示例#33
0
    def _kernel_install_rpm(self, rpm_file, kernel_deps_rpms=None, need_reboot=True):
        """
        Install kernel rpm package.
        The rpm packages should be a url or put in this test's
        directory (client/test/kernelinstall)
        """
        if kernel_deps_rpms:
            logging.info("Installing kernel dependencies.")
            if isinstance(kernel_deps_rpms, list):
                kernel_deps_rpms = " ".join(kernel_deps_rpms)
            utils.run("rpm -U --force %s" % kernel_deps_rpms)

        dst = os.path.join("/tmp", os.path.basename(rpm_file))
        knl = utils.get_file(rpm_file, dst)
        kernel = self.job.kernel(knl)
        logging.info("Installing kernel %s", rpm_file)
        kernel.install(install_vmlinux=False)

        if need_reboot:
            kernel.boot()
        else:
            kernel.add_to_bootloader()
示例#34
0
 def _copy_file_to_test_dir(file_name, dest_dir):
     if not utils.is_url(file_name):
         file_name = os.path.join(test.bindir, file_name)
     dest = os.path.join(dest_dir, os.path.basename(file_name))
     # return the absolute path of file.
     return os.path.basename(utils.get_file(file_name, dest))
示例#35
0
        asset_info = virttest.asset.get_asset_info(asset_name)

    # Do not force extraction if integrity information is available
    if asset_info['sha1_url']:
        force = params.get("force_copy", "no") == "yes"
    else:
        force = params.get("force_copy", "yes") == "yes"

    try:
        error.context("Copy image '%s'" % image, logging.info)
        if utils.is_url(asset_info['url']):
            virttest.asset.download_file(asset_info,
                                         interactive=False,
                                         force=force)
        else:
            utils.get_file(asset_info['url'], asset_info['destination'])

    finally:
        sub_type = params.get("sub_type")
        if sub_type:
            error.context("Run sub test '%s'" % sub_type, logging.info)
            params['image_name'] += "-error"
            params['boot_once'] = "c"
            vm.create(params=params)
            virttest.utils_test.run_virt_sub_test(test, params, env,
                                                  params.get("sub_type"))


@error.context_aware
def run_file_transfer(test, params, env):
    """
示例#36
0
    def run_once(self, options='', testlist=''):
        """
        Passes the appropriate parameters to the testsuite.

        # Usage: $0 [options] [testlist]
        # check options
        #     -raw                test raw (default)
        #     -cow                test cow
        #     -qcow               test qcow
        #     -qcow2              test qcow2
        #     -vpc                test vpc
        #     -vmdk               test vmdk
        #     -qed                test qed
        #     -xdiff              graphical mode diff
        #     -nocache            use O_DIRECT on backing file
        #     -misalign           misalign memory allocations
        #     -n                  show me, do not run tests
        #     -T                  output timestamps
        #     -r                  randomize test order
        #
        # testlist options
        #     -g group[,group...] include tests from these groups
        #     -x group[,group...] exclude tests from these groups
        #     NNN                 include test NNN
        #     NNN-NNN             include test range (eg. 012-021)

        @param qemu_path: Optional qemu install path.
        @param options: Options accepted by the testsuite.
        @param testlist: List of tests that will be executed (by default, all
                testcases will be executed).
        """
        os.chdir(self.srcdir)
        test_dir = os.path.join(self.srcdir, "scratch")
        if not os.path.exists(test_dir):
            os.mkdir(test_dir)
        cmd = "./check"
        if options:
            cmd += " " + options
        if testlist:
            cmd += " " + testlist

        try:
            try:
                result = utils.system(cmd)
            except error.CmdError, e:
                failed_cases = re.findall("Failures: (\d+)", str(e))
                for num in failed_cases:
                    failed_name = num + ".out.bad"
                    src = os.path.join(self.srcdir, failed_name)
                    dest = os.path.join(self.resultsdir, failed_name)
                    utils.get_file(src, dest)
                if failed_cases:
                    e_msg = ("Qemu-iotests failed. Failed cases: %s" %
                             failed_cases)
                else:
                    e_msg = "Qemu-iotests failed"
                raise error.TestFail(e_msg)
        finally:
            src = os.path.join(self.srcdir, "check.log")
            dest = os.path.join(self.resultsdir, "check.log")
            utils.get_file(src, dest)
示例#37
0
        error.context("Test Env setup")
        iperf_downloaded = 0
        iperf_url = linux_iperf_url

        app_check_cmd = params.get("linux_app_check_cmd", "false")
        app_check_exit_status = int(params.get("linux_app_check_exit_status",
                                               "0"))
        exit_status = utils.system(app_check_cmd, ignore_status=True)

        # Install iperf in host if not available
        default_install_cmd = "tar zxvf %s; cd iperf-%s;"
        default_install_cmd += " ./configure; make; make install"
        install_cmd = params.get("linux_install_cmd", default_install_cmd)
        if not exit_status == app_check_exit_status:
            error.context("install iperf in host", logging.info)
            utils.get_file(iperf_url, host_path)
            iperf_downloaded = 1
            utils.system(install_cmd % (host_path, iperf_version))

        # The guest may not be running Linux, see if we should update the
        # app_check variables
        if not os_type == "linux":
            app_check_cmd = params.get("win_app_check_cmd", "false")
            app_check_exit_status = int(params.get("win_app_check_exit_status",
                                                   "0"))

        # Install iperf in guest if not available
        if not session.cmd_status(app_check_cmd) == app_check_exit_status:
            error.context("install iperf in guest", logging.info)
            if not iperf_downloaded:
                utils.get_file(iperf_url, host_path)
示例#38
0
        for param in params.get("copy_to_local").split():
            l_value = params.get(param)
            if l_value:
                need_copy = True
                nfs_link = utils_misc.get_path(test.bindir, l_value)
                i_name = os.path.basename(l_value)
                local_link = os.path.join(local_dir, i_name)
                if os.path.isfile(local_link):
                    file_hash = utils.hash_file(local_link, "md5")
                    expected_hash = utils.hash_file(nfs_link, "md5")
                    if file_hash == expected_hash:
                        need_copy = False
                if need_copy:
                    msg = "Copy %s to %s in local host." % (i_name, local_link)
                    error.context(msg, logging.info)
                    utils.get_file(nfs_link, local_link)
                    params[param] = local_link

    unattended_install_config = UnattendedInstallConfig(test, params, vm)
    unattended_install_config.setup()

    # params passed explicitly, because they may have been updated by
    # unattended install config code, such as when params['url'] == auto
    vm.create(params=params)

    post_finish_str = params.get("post_finish_str",
                                 "Post set up finished")
    install_timeout = int(params.get("install_timeout", 3000))

    migrate_background = params.get("migrate_background") == "yes"
    if migrate_background:
示例#39
0
def install_host_kernel(job, params):
    """
    Install a host kernel, given the appropriate params.

    @param job: Job object.
    @param params: Dict with host kernel install params.
    """
    install_type = params.get('host_kernel_install_type')

    if install_type == 'rpm':
        logging.info('Installing host kernel through rpm')

        rpm_url = params.get('host_kernel_rpm_url')
        k_basename = os.path.basename(rpm_url)
        dst = os.path.join("/var/tmp", k_basename)
        k = utils.get_file(rpm_url, dst)
        host_kernel = job.kernel(k)
        host_kernel.install(install_vmlinux=False)
        utils.write_keyval(job.resultdir,
                           {'software_version_kernel': k_basename})
        host_kernel.boot()

    elif install_type in ['koji', 'brew']:
        logging.info('Installing host kernel through koji/brew')

        koji_cmd = params.get('host_kernel_koji_cmd')
        koji_build = params.get('host_kernel_koji_build')
        koji_tag = params.get('host_kernel_koji_tag')

        k_deps = utils_koji.KojiPkgSpec(
            tag=koji_tag,
            build=koji_build,
            package='kernel',
            subpackages=['kernel-devel', 'kernel-firmware'])
        k = utils_koji.KojiPkgSpec(tag=koji_tag,
                                   build=koji_build,
                                   package='kernel',
                                   subpackages=['kernel'])

        c = utils_koji.KojiClient(koji_cmd)
        logging.info('Fetching kernel dependencies (-devel, -firmware)')
        c.get_pkgs(k_deps, job.tmpdir)
        logging.info(
            'Installing kernel dependencies (-devel, -firmware) '
            'through %s', install_type)
        k_deps_rpm_file_names = [
            os.path.join(job.tmpdir, rpm_file_name)
            for rpm_file_name in c.get_pkg_rpm_file_names(k_deps)
        ]
        utils.run('rpm -U --force %s' % " ".join(k_deps_rpm_file_names))

        c.get_pkgs(k, job.tmpdir)
        k_rpm = os.path.join(job.tmpdir, c.get_pkg_rpm_file_names(k)[0])
        host_kernel = job.kernel(k_rpm)
        host_kernel.install(install_vmlinux=False)
        utils.write_keyval(job.resultdir, {
            'software_version_kernel':
            " ".join(c.get_pkg_rpm_file_names(k_deps))
        })
        host_kernel.boot()

    elif install_type == 'git':
        logging.info('Chose to install host kernel through git, proceeding')

        repo = params.get('host_kernel_git_repo')
        repo_base = params.get('host_kernel_git_repo_base', None)
        branch = params.get('host_kernel_git_branch')
        commit = params.get('host_kernel_git_commit')
        patch_list = params.get('host_kernel_patch_list')
        if patch_list:
            patch_list = patch_list.split()
        kernel_config = params.get('host_kernel_config', None)

        repodir = os.path.join("/tmp", 'kernel_src')
        r = git.GitRepoHelper(uri=repo,
                              branch=branch,
                              destination_dir=repodir,
                              commit=commit,
                              base_uri=repo_base)
        r.execute()
        host_kernel = job.kernel(r.destination_dir)
        if patch_list:
            host_kernel.patch(patch_list)
        if kernel_config:
            host_kernel.config(kernel_config)
        host_kernel.build()
        host_kernel.install()
        git_repo_version = '%s:%s:%s' % (r.uri, r.branch, r.get_top_commit())
        utils.write_keyval(job.resultdir,
                           {'software_version_kernel': git_repo_version})
        host_kernel.boot()

    else:
        logging.info('Chose %s, using the current kernel for the host',
                     install_type)
        k_version = utils.system_output('uname -r', ignore_status=True)
        utils.write_keyval(job.resultdir,
                           {'software_version_kernel': k_version})
示例#40
0
 def _copy_file_to_test_dir(file_name, dest_dir):
     if not utils.is_url(file_name):
         file_name = os.path.join(test.bindir, file_name)
     dest = os.path.join(dest_dir, os.path.basename(file_name))
     # return the absolute path of file.
     return os.path.basename(utils.get_file(file_name, dest))
示例#41
0
def install_host_kernel(job, params):
    """
    Install a host kernel, given the appropriate params.

    @param job: Job object.
    @param params: Dict with host kernel install params.
    """
    install_type = params.get("host_kernel_install_type")

    if install_type == "rpm":
        logging.info("Installing host kernel through rpm")

        rpm_url = params.get("host_kernel_rpm_url")
        k_basename = os.path.basename(rpm_url)
        dst = os.path.join("/var/tmp", k_basename)
        k = utils.get_file(rpm_url, dst)
        host_kernel = job.kernel(k)
        host_kernel.install(install_vmlinux=False)
        utils.write_keyval(job.resultdir, {"software_version_kernel": k_basename})
        host_kernel.boot()

    elif install_type in ["koji", "brew"]:
        logging.info("Installing host kernel through koji/brew")

        koji_cmd = params.get("host_kernel_koji_cmd")
        koji_build = params.get("host_kernel_koji_build")
        koji_tag = params.get("host_kernel_koji_tag")

        k_deps = utils_koji.KojiPkgSpec(
            tag=koji_tag, build=koji_build, package="kernel", subpackages=["kernel-devel", "kernel-firmware"]
        )
        k = utils_koji.KojiPkgSpec(tag=koji_tag, build=koji_build, package="kernel", subpackages=["kernel"])

        c = utils_koji.KojiClient(koji_cmd)
        logging.info("Fetching kernel dependencies (-devel, -firmware)")
        c.get_pkgs(k_deps, job.tmpdir)
        logging.info("Installing kernel dependencies (-devel, -firmware) " "through %s", install_type)
        k_deps_rpm_file_names = [
            os.path.join(job.tmpdir, rpm_file_name) for rpm_file_name in c.get_pkg_rpm_file_names(k_deps)
        ]
        utils.run("rpm -U --force %s" % " ".join(k_deps_rpm_file_names))

        c.get_pkgs(k, job.tmpdir)
        k_rpm = os.path.join(job.tmpdir, c.get_pkg_rpm_file_names(k)[0])
        host_kernel = job.kernel(k_rpm)
        host_kernel.install(install_vmlinux=False)
        utils.write_keyval(job.resultdir, {"software_version_kernel": " ".join(c.get_pkg_rpm_file_names(k_deps))})
        host_kernel.boot()

    elif install_type == "git":
        logging.info("Chose to install host kernel through git, proceeding")

        repo = params.get("host_kernel_git_repo")
        repo_base = params.get("host_kernel_git_repo_base", None)
        branch = params.get("host_kernel_git_branch")
        commit = params.get("host_kernel_git_commit")
        patch_list = params.get("host_kernel_patch_list")
        if patch_list:
            patch_list = patch_list.split()
        kernel_config = params.get("host_kernel_config", None)

        repodir = os.path.join("/tmp", "kernel_src")
        r = git.GitRepoHelper(uri=repo, branch=branch, destination_dir=repodir, commit=commit, base_uri=repo_base)
        r.execute()
        host_kernel = job.kernel(r.destination_dir)
        if patch_list:
            host_kernel.patch(patch_list)
        if kernel_config:
            host_kernel.config(kernel_config)
        host_kernel.build()
        host_kernel.install()
        git_repo_version = "%s:%s:%s" % (r.uri, r.branch, r.get_top_commit())
        utils.write_keyval(job.resultdir, {"software_version_kernel": git_repo_version})
        host_kernel.boot()

    else:
        logging.info("Chose %s, using the current kernel for the host", install_type)
        k_version = utils.system_output("uname -r", ignore_status=True)
        utils.write_keyval(job.resultdir, {"software_version_kernel": k_version})
示例#42
0
 def _copy_file_to_test_dir(file_path):
     if utils.is_url(file_path):
         return file_path
     file_abs_path = os.path.join(test.bindir, file_path)
     dest = os.path.join(sub_test_path, os.path.basename(file_abs_path))
     return os.path.basename(utils.get_file(file_path, dest))
示例#43
0
    transfer_timeout = int(params.get("transfer_timeout", 360))
    login_timeout = int(params.get("login_timeout", 360))

    dir_name = test.tmpdir
    tmp_dir = params.get("tmp_dir", "/tmp/")
    host_path = os.path.join(dir_name, "iperf")

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=login_timeout)
    client_ip = vm.get_address(0)

    try:
        error.context("Test Env setup")
        iperf_url = linux_iperf_url
        utils.get_file(iperf_url, host_path)

        error.context("install iperf in host", logging.info)
        default_install_cmd = "tar zxvf %s; cd iperf-%s;"
        default_install_cmd += " ./configure; make; make install"
        install_cmd = params.get("linux_install_cmd", default_install_cmd)
        utils.system(install_cmd % (host_path, iperf_version))

        error.context("install iperf in guest", logging.info)
        if os_type == "linux":
            guest_path = (tmp_dir + "iperf.tgz")
            clean_cmd = "rm -rf %s iperf-%s" % (guest_path, iperf_version)
        else:
            guest_path = (tmp_dir + "iperf.exe")
            iperf_url = win_iperf_url
            utils.get_file(iperf_url, host_path)
示例#44
0
        for param in params.get("copy_to_local").split():
            l_value = params.get(param)
            if l_value:
                need_copy = True
                nfs_link = utils_misc.get_path(test.bindir, l_value)
                i_name = os.path.basename(l_value)
                local_link = os.path.join(local_dir, i_name)
                if os.path.isfile(local_link):
                    file_hash = utils.hash_file(local_link, "md5")
                    expected_hash = utils.hash_file(nfs_link, "md5")
                    if file_hash == expected_hash:
                        need_copy = False
                if need_copy:
                    msg = "Copy %s to %s in local host." % (i_name, local_link)
                    error.context(msg, logging.info)
                    utils.get_file(nfs_link, local_link)
                    params[param] = local_link

    unattended_install_config = UnattendedInstallConfig(test, params, vm)
    unattended_install_config.setup()

    # params passed explicitly, because they may have been updated by
    # unattended install config code, such as when params['url'] == auto
    vm.create(params=params)

    post_finish_str = params.get("post_finish_str", "Post set up finished")
    install_timeout = int(params.get("install_timeout", 4800))

    migrate_background = params.get("migrate_background") == "yes"
    if migrate_background:
        mig_timeout = float(params.get("mig_timeout", "3600"))
示例#45
0
    else:
        asset_info = virttest.asset.get_asset_info(asset_name)

    # Do not force extraction if integrity information is available
    if asset_info['sha1_url']:
        force = params.get("force_copy", "no") == "yes"
    else:
        force = params.get("force_copy", "yes") == "yes"

    try:
        error.context("Copy image '%s'" % image, logging.info)
        if utils.is_url(asset_info['url']):
            virttest.asset.download_file(asset_info, interactive=False,
                                         force=force)
        else:
            utils.get_file(asset_info['url'], asset_info['destination'])

    finally:
        sub_type = params.get("sub_type")
        if sub_type:
            error.context("Run sub test '%s'" % sub_type, logging.info)
            params['image_name'] += "-error"
            params['boot_once'] = "c"
            vm.create(params=params)
            virttest.utils_test.run_virt_sub_test(test, params, env,
                                                  params.get("sub_type"))


@error.context_aware
def run_file_transfer(test, params, env):
    """
示例#46
0
def install_host_kernel(job, params):
    """
    Install a host kernel, given the appropriate params.

    @param job: Job object.
    @param params: Dict with host kernel install params.
    """
    install_type = params.get('host_kernel_install_type')

    if install_type == 'rpm':
        logging.info('Installing host kernel through rpm')

        rpm_url = params.get('host_kernel_rpm_url')
        k_basename = os.path.basename(rpm_url)
        dst = os.path.join("/var/tmp", k_basename)
        k = utils.get_file(rpm_url, dst)
        host_kernel = job.kernel(k)
        host_kernel.install(install_vmlinux=False)
        utils.write_keyval(job.resultdir,
                           {'software_version_kernel': k_basename})
        host_kernel.boot()

    elif install_type in ['koji', 'brew']:
        logging.info('Installing host kernel through koji/brew')

        koji_cmd = params.get('host_kernel_koji_cmd')
        koji_build = params.get('host_kernel_koji_build')
        koji_tag = params.get('host_kernel_koji_tag')

        k_deps = utils_koji.KojiPkgSpec(tag=koji_tag, build=koji_build,
                                        package='kernel',
                                subpackages=['kernel-devel', 'kernel-firmware'])
        k = utils_koji.KojiPkgSpec(tag=koji_tag, build=koji_build,
                                   package='kernel', subpackages=['kernel'])

        c = utils_koji.KojiClient(koji_cmd)
        logging.info('Fetching kernel dependencies (-devel, -firmware)')
        c.get_pkgs(k_deps, job.tmpdir)
        logging.info('Installing kernel dependencies (-devel, -firmware) '
                     'through %s', install_type)
        k_deps_rpm_file_names = [os.path.join(job.tmpdir, rpm_file_name) for
                                 rpm_file_name in c.get_pkg_rpm_file_names(k_deps)]
        utils.run('rpm -U --force %s' % " ".join(k_deps_rpm_file_names))

        c.get_pkgs(k, job.tmpdir)
        k_rpm = os.path.join(job.tmpdir,
                             c.get_pkg_rpm_file_names(k)[0])
        host_kernel = job.kernel(k_rpm)
        host_kernel.install(install_vmlinux=False)
        utils.write_keyval(job.resultdir,
                           {'software_version_kernel':
                            " ".join(c.get_pkg_rpm_file_names(k_deps))})
        host_kernel.boot()

    elif install_type == 'git':
        logging.info('Chose to install host kernel through git, proceeding')

        repo = params.get('host_kernel_git_repo')
        repo_base = params.get('host_kernel_git_repo_base', None)
        branch = params.get('host_kernel_git_branch')
        commit = params.get('host_kernel_git_commit')
        patch_list = params.get('host_kernel_patch_list')
        if patch_list:
            patch_list = patch_list.split()
        kernel_config = params.get('host_kernel_config', None)

        repodir = os.path.join("/tmp", 'kernel_src')
        r = git.GitRepoHelper(uri=repo, branch=branch, destination_dir=repodir,
                              commit=commit, base_uri=repo_base)
        r.execute()
        host_kernel = job.kernel(r.destination_dir)
        if patch_list:
            host_kernel.patch(patch_list)
        if kernel_config:
            host_kernel.config(kernel_config)
        host_kernel.build()
        host_kernel.install()
        git_repo_version = '%s:%s:%s' % (r.uri, r.branch, r.get_top_commit())
        utils.write_keyval(job.resultdir,
                           {'software_version_kernel': git_repo_version})
        host_kernel.boot()

    else:
        logging.info('Chose %s, using the current kernel for the host',
                     install_type)
        k_version = utils.system_output('uname -r', ignore_status=True)
        utils.write_keyval(job.resultdir,
                           {'software_version_kernel': k_version})