def package_version_for_hash(hash, kernel_flavor='basic', distro='rhel', distro_version='8.0', machine_type='smithi'): """ Does what it says on the tin. Uses gitbuilder repos. :returns: a string. """ (arch, release, _os) = get_distro_defaults(distro, machine_type) if distro in (None, 'None'): distro = _os.name bp = get_builder_project()( 'ceph', dict( flavor=kernel_flavor, os_type=distro, os_version=distro_version, arch=arch, sha1=hash, ), ) if bp.distro == CONTAINER_DISTRO and bp.flavor == CONTAINER_FLAVOR: log.info('container build %s, checking for build_complete' % bp.distro) if not bp.build_complete: log.info('build not complete') return None return bp.version
def get_gitbuilder_hash(project=None, branch=None, flavor=None, machine_type=None, distro=None, distro_version=None): """ Find the hash representing the head of the project's repository via querying a gitbuilder repo. Will return None in the case of a 404 or any other HTTP error. """ # Alternate method for github-hosted projects - left here for informational # purposes # resp = requests.get( # 'https://api.github.com/repos/ceph/ceph/git/refs/heads/master') # hash = .json()['object']['sha'] (arch, release, _os) = get_distro_defaults(distro, machine_type) if distro is None: distro = _os.name bp = get_builder_project()( project, dict( branch=branch, flavor=flavor, os_type=distro, os_version=distro_version, arch=arch, ), ) return bp.sha1
def _get_builder_project(ctx, remote, config): return packaging.get_builder_project()( config.get('project', 'ceph'), config, remote=remote, ctx=ctx )
def get_distro_defaults(distro, machine_type): """ Given a distro (e.g. 'ubuntu') and machine type, return: (arch, release, pkg_type) This is used to default to: ('x86_64', 'trusty', 'deb') when passed 'ubuntu' and 'plana' ('armv7l', 'saucy', 'deb') when passed 'ubuntu' and 'saya' ('x86_64', 'wheezy', 'deb') when passed 'debian' ('x86_64', 'fedora20', 'rpm') when passed 'fedora' And ('x86_64', 'centos7', 'rpm') when passed anything else """ arch = 'x86_64' if distro in (None, 'None'): os_type = 'centos' os_version = '7' elif distro in ('rhel', 'centos'): os_type = 'centos' os_version = '7' elif distro == 'ubuntu': os_type = distro if machine_type == 'saya': os_version = '13.10' arch = 'armv7l' else: os_version = '14.04' elif distro == 'debian': os_type = distro os_version = '7' elif distro == 'fedora': os_type = distro os_version = '20' elif distro == 'opensuse': os_type = distro os_version = '15.1' else: raise ValueError("Invalid distro value passed: %s", distro) _os = OS(name=os_type, version=os_version) release = get_builder_project()._get_distro( _os.name, _os.version, _os.codename, ) template = "Defaults for machine_type {mtype} distro {distro}: " \ "arch={arch}, release={release}, pkg_type={pkg}" log.debug(template.format( mtype=machine_type, distro=_os.name, arch=arch, release=release, pkg=_os.package_type) ) return ( arch, release, _os, )
def check_packages(ctx, config): """ Checks gitbuilder to determine if there are missing packages for this job. If there are missing packages, fail the job. """ for task in ctx.config['tasks']: if task.keys()[0] == 'buildpackages': log.info("Checking packages skipped because " "the task buildpackages was found.") return log.info("Checking packages...") os_type = ctx.config.get("os_type") sha1 = ctx.config.get("sha1") # We can only do this check if there are a defined sha1 and os_type # in the job config. if os_type and sha1: package = get_builder_project()("ceph", ctx.config) template = "Checking packages for os_type '{os}', " \ "flavor '{flav}' and ceph hash '{ver}'" log.info( template.format( os=package.os_type, flav=package.flavor, ver=package.sha1, ) ) if package.version: log.info("Found packages for ceph version {ver}".format( ver=package.version )) else: msg = "Packages for distro '{d}' and ceph hash '{ver}' not found" msg = msg.format( d=package.distro, ver=package.sha1, ) log.error(msg) # set the failure message and update paddles with the status ctx.summary["failure_reason"] = msg set_status(ctx.summary, "dead") report.try_push_job_info(ctx.config, dict(status='dead')) raise VersionNotFoundError(package.base_url) else: log.info( "Checking packages skipped, missing os_type '{os}' or ceph hash '{ver}'".format( os=os_type, ver=sha1, ) )
def _yum_fix_repo_host(remote, project): """ Update the hostname to reflect the gitbuilder_host setting. """ # Skip this bit if we're not using gitbuilder if not isinstance(packaging.get_builder_project(), packaging.GitbuilderProject): return old_host = teuth_config._defaults['gitbuilder_host'] new_host = teuth_config.gitbuilder_host if new_host == old_host: return repo_path = '/etc/yum.repos.d/%s.repo' % project host_sed_expr = "'s/{0}/{1}/'".format(old_host, new_host) remote.run(args=[ 'if', 'test', '-f', repo_path, run.Raw(';'), 'then', 'sudo', 'sed', '-i', '-e', run.Raw(host_sed_expr), repo_path, run.Raw(';'), 'fi' ])
def _yum_fix_repo_host(remote, project): """ Update the hostname to reflect the gitbuilder_host setting. """ # Skip this bit if we're not using gitbuilder if not isinstance(packaging.get_builder_project(), packaging.GitbuilderProject): return old_host = teuth_config._defaults['gitbuilder_host'] new_host = teuth_config.gitbuilder_host if new_host == old_host: return repo_path = '/etc/yum.repos.d/%s.repo' % project host_sed_expr = "'s/{0}/{1}/'".format(old_host, new_host) remote.run( args=[ 'if', 'test', '-f', repo_path, run.Raw(';'), 'then', 'sudo', 'sed', '-i', '-e', run.Raw(host_sed_expr), repo_path, run.Raw(';'), 'fi'] )
def package_version_for_hash(hash, kernel_flavor='basic', distro='rhel', distro_version='7.0', machine_type='smithi'): """ Does what it says on the tin. Uses gitbuilder repos. :returns: a string. """ (arch, release, _os) = get_distro_defaults(distro, machine_type) if distro in (None, 'None'): distro = _os.name bp = get_builder_project()( 'ceph', dict( flavor=kernel_flavor, os_type=distro, os_version=distro_version, arch=arch, sha1=hash, ), ) return bp.version
def download_kernel(ctx, config): """ Supply each remote with a kernel package: - local kernels are copied over - gitbuilder kernels are downloaded - nothing is done for distro kernels :param ctx: Context :param config: Configuration """ procs = {} for role, src in config.items(): needs_download = False if src == 'distro': # don't need to download distro kernels log.debug("src is distro, skipping download") continue (role_remote, ) = ctx.cluster.only(role).remotes.keys() if isinstance(src, dict): # we're downloading a kernel from koji, the src dict here # is the build_info retrieved from koji using get_koji_build_info if src.get("id"): build_id = src["id"] log.info( "Downloading kernel with build_id {build_id} on {role}...". format(build_id=build_id, role=role)) needs_download = True baseurl = get_kojiroot_base_url(src) pkg_name = get_koji_package_name("kernel", src) elif src.get("task_id"): needs_download = True log.info( "Downloading kernel with task_id {task_id} on {role}...". format(task_id=src["task_id"], role=role)) baseurl = src["base_url"] # this var is also poorly named as it's not the package name, # but the full name of the rpm file to download. pkg_name = src["rpm_name"] elif src.find('/') >= 0: # local package - src is path log.info('Copying kernel package {path} to {role}...'.format( path=src, role=role)) role_remote.put_file(src, remote_pkg_path(role_remote)) else: # gitbuilder package - src is sha1 log.info('Downloading kernel {sha1} on {role}...'.format( sha1=src, role=role, )) needs_download = True builder = get_builder_project()( 'kernel', { 'sha1': src }, ctx=ctx, remote=role_remote, ) if teuth_config.use_shaman: if role_remote.os.package_type == 'rpm': arch = builder.arch baseurl = urlparse.urljoin(builder.base_url, '/'.join([arch, ''])) pkg_name = "kernel-%s.%s.rpm" % ( builder.version, arch, ) elif role_remote.os.package_type == 'deb': arch = 'amd64' # FIXME baseurl = urlparse.urljoin( builder.base_url, '/'.join([ 'pool', 'main', 'l', 'linux-%s' % builder.scm_version, '' ])) pkg_name = 'linux-image-%s_%s_%s.deb' % ( builder.scm_version, builder.version, arch, ) else: baseurl = builder.base_url + "/" pkg_name = gitbuilder_pkg_name(role_remote) log.info("fetching, builder baseurl is %s", baseurl) if needs_download: proc = role_remote.run(args=[ 'rm', '-f', remote_pkg_path(role_remote), run.Raw('&&'), 'echo', pkg_name, run.Raw('|'), 'wget', '-nv', '-O', remote_pkg_path(role_remote), '--base={url}'.format(url=baseurl), '--input-file=-', ], wait=False) procs[role_remote.name] = proc for name, proc in procs.items(): log.debug('Waiting for download/copy to %s to complete...', name) proc.wait()
def task(ctx, config): """ Make sure the specified kernel is installed. This can be a branch, tag, or sha1 of ceph-client.git or a local kernel package. To install ceph-client.git branch (default: master):: kernel: branch: testing To install ceph-client.git tag:: kernel: tag: v3.18 To install ceph-client.git sha1:: kernel: sha1: 275dd19ea4e84c34f985ba097f9cddb539f54a50 To install from a koji build_id:: kernel: koji: 416058 To install from a koji task_id:: kernel: koji_task: 9678206 When installing from koji you also need to set the urls for koji hub and the koji root in your teuthology.yaml config file. These are shown below with their default values:: kojihub_url: http://koji.fedoraproject.org/kojihub kojiroot_url: http://kojipkgs.fedoraproject.org/packages When installing from a koji task_id you also need to set koji_task_url, which is the base url used to download rpms from koji task results:: koji_task_url: https://kojipkgs.fedoraproject.org/work/ To install local rpm (target should be an rpm system):: kernel: rpm: /path/to/appropriately-named.rpm To install local deb (target should be a deb system):: kernel: deb: /path/to/appropriately-named.deb For rpm: or deb: to work it should be able to figure out sha1 from local kernel package basename, see get_sha1_from_pkg_name(). This means that you can't for example install a local tag - package built with upstream {rpm,deb}-pkg targets won't have a sha1 in its name. If you want to schedule a run and use a local kernel package, you have to copy the package over to a box teuthology workers are running on and specify a path to the package on that box. All of the above will install a specified kernel on all targets. You can specify different kernels for each role or for all roles of a certain type (more specific roles override less specific, see normalize_config() for details):: kernel: client: tag: v3.0 osd: branch: btrfs_fixes client.1: branch: more_specific osd.3: branch: master To wait 3 minutes for hosts to reboot (default: 300):: kernel: timeout: 180 To enable kdb:: kernel: kdb: true :param ctx: Context :param config: Configuration """ if config is None: config = {} assert isinstance(config, dict), \ "task kernel only supports a dictionary for configuration" overrides = ctx.config.get('overrides', {}).get('kernel', {}) config, timeout = normalize_and_apply_overrides(ctx, config, overrides) validate_config(ctx, config) log.info('config %s, timeout %d' % (config, timeout)) need_install = {} # sha1 to dl, or path to rpm or deb need_version = {} # utsrelease or sha1 kdb = {} remove_old_kernels(ctx) for role, role_config in config.items(): # gather information about this remote (role_remote, ) = ctx.cluster.only(role).remotes.keys() system_type = role_remote.os.name if role_config.get('rpm') or role_config.get('deb'): # We only care about path - deb: vs rpm: is meaningless, # rpm: just happens to be parsed first. Nothing is stopping # 'deb: /path/to/foo.rpm' and it will work provided remote's # os.package_type is 'rpm' and vice versa. path = role_config.get('rpm') if not path: path = role_config.get('deb') sha1 = get_sha1_from_pkg_name(path) assert sha1, "failed to extract commit hash from path %s" % path if need_to_install(ctx, role, sha1): need_install[role] = path need_version[role] = sha1 elif role_config.get('sha1') == 'distro': version = need_to_install_distro(role_remote) if version: need_install[role] = 'distro' need_version[role] = version elif role_config.get("koji") or role_config.get('koji_task'): # installing a kernel from koji build_id = role_config.get("koji") task_id = role_config.get("koji_task") if role_remote.os.package_type != "rpm": msg = ("Installing a kernel from koji is only supported " "on rpm based systems. System type is {system_type}.") msg = msg.format(system_type=system_type) log.error(msg) ctx.summary["failure_reason"] = msg ctx.summary["status"] = "dead" raise ConfigError(msg) # FIXME: this install should probably happen somewhere else # but I'm not sure where, so we'll leave it here for now. install_package('koji', role_remote) if build_id: # get information about this build from koji build_info = get_koji_build_info(build_id, role_remote, ctx) version = "{ver}-{rel}.x86_64".format( ver=build_info["version"], rel=build_info["release"]) elif task_id: # get information about results of this task from koji task_result = get_koji_task_result(task_id, role_remote, ctx) # this is not really 'build_info', it's a dict of information # about the kernel rpm from the task results, but for the sake # of reusing the code below I'll still call it that. build_info = get_koji_task_rpm_info('kernel', task_result['rpms']) # add task_id so we can know later that we're installing # from a task and not a build. build_info["task_id"] = task_id version = build_info["version"] if need_to_install(ctx, role, version): need_install[role] = build_info need_version[role] = version else: builder = get_builder_project()( "kernel", role_config, ctx=ctx, remote=role_remote, ) sha1 = builder.sha1 log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1)) ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1 if need_to_install(ctx, role, sha1): if teuth_config.use_shaman: version = builder.scm_version else: version = builder.version if not version: raise VersionNotFoundError(builder.base_url) need_install[role] = sha1 need_version[role] = version # enable or disable kdb if specified, otherwise do not touch if role_config.get('kdb') is not None: kdb[role] = role_config.get('kdb') if need_install: install_firmware(ctx, need_install) download_kernel(ctx, need_install) install_and_reboot(ctx, need_install) wait_for_reboot(ctx, need_version, timeout) enable_disable_kdb(ctx, kdb)
def process_role(ctx, config, timeout, role, role_config): need_install = None # sha1 to dl, or path to rpm or deb need_version = None # utsrelease or sha1 # gather information about this remote (role_remote,) = ctx.cluster.only(role).remotes.keys() system_type = role_remote.os.name if role_config.get('rpm') or role_config.get('deb'): # We only care about path - deb: vs rpm: is meaningless, # rpm: just happens to be parsed first. Nothing is stopping # 'deb: /path/to/foo.rpm' and it will work provided remote's # os.package_type is 'rpm' and vice versa. path = role_config.get('rpm') if not path: path = role_config.get('deb') sha1 = get_sha1_from_pkg_name(path) assert sha1, "failed to extract commit hash from path %s" % path if need_to_install(ctx, role, sha1): need_install = path need_version = sha1 elif role_config.get('sha1') == 'distro': version = need_to_install_distro(role_remote, role_config) if version: need_install = 'distro' need_version = version elif role_config.get("koji") or role_config.get('koji_task'): # installing a kernel from koji build_id = role_config.get("koji") task_id = role_config.get("koji_task") if role_remote.os.package_type != "rpm": msg = ( "Installing a kernel from koji is only supported " "on rpm based systems. System type is {system_type}." ) msg = msg.format(system_type=system_type) log.error(msg) ctx.summary["failure_reason"] = msg ctx.summary["status"] = "dead" raise ConfigError(msg) # FIXME: this install should probably happen somewhere else # but I'm not sure where, so we'll leave it here for now. install_package('koji', role_remote) if build_id: # get information about this build from koji build_info = get_koji_build_info(build_id, role_remote, ctx) version = "{ver}-{rel}.x86_64".format( ver=build_info["version"], rel=build_info["release"] ) elif task_id: # get information about results of this task from koji task_result = get_koji_task_result(task_id, role_remote, ctx) # this is not really 'build_info', it's a dict of information # about the kernel rpm from the task results, but for the sake # of reusing the code below I'll still call it that. build_info = get_koji_task_rpm_info( 'kernel', task_result['rpms'] ) # add task_id so we can know later that we're installing # from a task and not a build. build_info["task_id"] = task_id version = build_info["version"] if need_to_install(ctx, role, version): need_install = build_info need_version = version else: builder = get_builder_project()( "kernel", role_config, ctx=ctx, remote=role_remote, ) sha1 = builder.sha1 log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1)) ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1 if need_to_install(ctx, role, sha1): if teuth_config.use_shaman: version = builder.scm_version else: version = builder.version if not version: raise VersionNotFoundError(builder.base_url) need_install = sha1 need_version = version if need_install: install_firmware(ctx, {role: need_install}) download_kernel(ctx, {role: need_install}) install_and_reboot(ctx, {role: need_install}, config) wait_for_reboot(ctx, {role: need_version}, timeout, config) # enable or disable kdb if specified, otherwise do not touch if role_config.get('kdb') is not None: kdb = role_config.get('kdb') enable_disable_kdb(ctx, {role: kdb})