def install_packages(ctx, config): """ Download the packaged dependencies of Keystone. Remove install packages upon exit. The context passed in should be identical to the context passed in to the main task. """ assert isinstance(config, dict) log.info('Installing packages for Keystone...') deps = { 'deb': ['libffi-dev', 'libssl-dev', 'libldap2-dev', 'libsasl2-dev'], 'rpm': ['libffi-devel', 'openssl-devel'], } for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.iterkeys() for dep in deps[remote.os.package_type]: install_package(dep, remote) try: yield finally: log.info('Removing packaged dependencies of Keystone...') for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.iterkeys() for dep in deps[remote.os.package_type]: remove_package(dep, remote)
def install_packages(ctx, config): """ Download the packaged dependencies of Keystone. Remove install packages upon exit. The context passed in should be identical to the context passed in to the main task. """ assert isinstance(config, dict) log.info('Installing packages for Keystone...') packages = {} for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.keys() # use bindep to read which dependencies we need from keystone/bindep.txt toxvenv_sh(ctx, remote, ['pip', 'install', 'bindep']) packages[client] = toxvenv_sh( ctx, remote, [ 'bindep', '--brief', '--file', '{}/bindep.txt'.format( get_keystone_dir(ctx)) ], check_status=False).splitlines() # returns 1 on success? for dep in packages[client]: install_package(dep, remote) try: yield finally: log.info('Removing packaged dependencies of Keystone...') for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.keys() for dep in packages[client]: remove_package(dep, remote)
def install_packages(ctx, config): """ Download the packaged dependencies of Keystone. Remove install packages upon exit. The context passed in should be identical to the context passed in to the main task. """ assert isinstance(config, dict) log.info('Installing packages for Keystone...') deps = { 'deb': [ 'libffi-dev', 'libssl-dev', 'libldap2-dev', 'libsasl2-dev' ], 'rpm': [ 'libffi-devel', 'openssl-devel' ], } for (client, _) in config.items(): (remote,) = ctx.cluster.only(client).remotes.iterkeys() for dep in deps[remote.os.package_type]: install_package(dep, remote) try: yield finally: log.info('Removing packaged dependencies of Keystone...') for (client, _) in config.items(): (remote,) = ctx.cluster.only(client).remotes.iterkeys() for dep in deps[remote.os.package_type]: remove_package(dep, remote)
def test_install_package_deb(self): m_remote = Mock() m_remote.os.package_type = "deb" expected = [ 'DEBIAN_FRONTEND=noninteractive', 'sudo', '-E', 'apt-get', '-y', '--force-yes', 'install', 'apache2' ] packaging.install_package('apache2', m_remote) m_remote.run.assert_called_with(args=expected)
def install_extra_pkgs(client): ''' Install EXTRA_PKGS ''' try: for pkg in EXTRA_PKGS: packaging.install_package(pkg, client) yield finally: for pkg in EXTRA_PKGS: packaging.remove_package(pkg, client)
def prepare_nosetest_env(client): try: # extra dependencies that would be in the devmode venv if client.os.package_type == 'rpm': enable_epel(client, enable=True) for package in EXTRA_NOSETEST_PKGS: packaging.install_package(package, client) if client.os.package_type == 'rpm': enable_epel(client, enable=False) # install nose itself into the calamari venv, force it in case it's # already installed in the system, so we can invoke it by path without # fear that it's not present pip(client, 'nose', venv='/opt/calamari/venv', force=True) # install a later version of requests into the venv as well # (for precise) pip(client, 'requests', venv='/opt/calamari/venv', force=True) # link (setup.py develop) calamari/rest-api into the production venv # because production does not include calamari_rest.management, needed # for test_rest_api.py's ApiIntrospection args = 'cd calamari/rest-api'.split() + [run.Raw(';')] + \ 'sudo /opt/calamari/venv/bin/python setup.py develop'.split() client.run(args=args) # because, at least in Python 2.6/Centos, site.py uses # 'os.path.exists()' to process .pth file entries, and exists() uses # access(2) to check for existence, all the paths leading up to # $HOME/calamari/rest-api need to be searchable by all users of # the package, which will include the WSGI/Django app, running # as the Apache user. So make them all world-read-and-execute. args = 'sudo chmod a+x'.split() + \ ['.', './calamari', './calamari/rest-api'] client.run(args=args) # make one dummy request just to get the WSGI app to do # all its log creation here, before the chmod below (I'm # looking at you, graphite -- /var/log/calamari/info.log and # /var/log/calamari/exception.log) client.run(args='wget -q -O /dev/null http://localhost') # /var/log/calamari/* is root-or-apache write-only client.run(args='sudo chmod a+w /var/log/calamari/*') yield finally: args = 'cd calamari/rest-api'.split() + [run.Raw(';')] + \ 'sudo /opt/calamari/venv/bin/python setup.py develop -u'.split() client.run(args=args) for pkg in ('nose', 'requests'): pip(client, pkg, venv='/opt/calamari/venv', uninstall=True) for package in EXTRA_NOSETEST_PKGS: packaging.remove_package(package, client)
def server(ctx, config): """ task server: Calamari server setup. Add role 'calamari.server' to the remote that will run the webapp. 'calamari.restapi' role must be present to serve as the cluster-api target for calamari-server. Only one of calamari.server and calamari.restapi roles is supported currently. For example:: roles: - [calamari.server] - [mon.0, calamari.restapi] - [osd.0, osd.1] tasks: - calamari.restapi: - calamari.server: """ overrides = ctx.config.get('overrides', {}) teuthology.deep_merge(config, overrides.get('calamari.server', {})) remote = _remotes(ctx, lambda r: r.startswith('calamari.server')) if not remote: raise RuntimeError('No role configured') restapi_remote = _remotes(ctx, lambda r: r.startswith('calamari.restapi')) if not restapi_remote: raise RuntimeError('Must supply calamari.restapi role') remote = remote[0] restapi_remote = restapi_remote[0] try: # sqlite3 command is required; on some platforms it's already # there and not removable (required for, say yum) sqlite_package = pkg.get_package_name('sqlite', remote) if sqlite_package and not pkg.install_package(sqlite_package, remote): raise RuntimeError('{} install failed'.format(sqlite_package)) if not pkg.install_package('calamari-server', remote) or \ not pkg.install_package('calamari-clients', remote) or \ not _disable_default_nginx(remote) or \ not _setup_calamari_cluster(remote, restapi_remote): raise RuntimeError('Server installation failure') log.info('client/server setup complete') yield finally: pkg.remove_package('calamari-server', remote) pkg.remove_package('calamari-clients', remote) if sqlite_package: pkg.remove_package(sqlite_package, remote)
def test_install_package_rpm(self): m_remote = Mock() m_remote.os.package_type = "rpm" expected = [ 'sudo', 'yum', '-y', 'install', 'httpd' ] packaging.install_package('httpd', m_remote) m_remote.run.assert_called_with(args=expected)
def test_install_package_rpm(self, m_misc): m_misc.get_system_type.return_value = "rpm" m_remote = Mock() expected = [ 'sudo', 'yum', '-y', 'install', 'httpd' ] packaging.install_package('httpd', m_remote) m_remote.run.assert_called_with(args=expected)
def test_install_package_deb(self, m_misc): m_misc.get_system_type.return_value = "deb" m_remote = Mock() expected = [ 'DEBIAN_FRONTEND=noninteractive', 'sudo', '-E', 'apt-get', '-y', 'install', 'apache2' ] packaging.install_package('apache2', m_remote) m_remote.run.assert_called_with(args=expected)
def test_install_package_deb(self): m_remote = Mock() m_remote.os.package_type = "deb" expected = [ 'DEBIAN_FRONTEND=noninteractive', 'sudo', '-E', 'apt-get', '-y', 'install', 'apache2' ] packaging.install_package('apache2', m_remote) m_remote.run.assert_called_with(args=expected)
def agent(ctx, config): """ task agent calamari.agent: install stats collection (for each role of type 'mon.' or 'osd.') For example:: roles: - [osd.0, mon.a] - [osd.1] tasks: - calamari.agent: """ log.info('calamari.agent starting') overrides = ctx.config.get('overrides', {}) teuthology.deep_merge(config, overrides.get('calamari.agent', {})) # agent gets installed on any remote with role mon or osd def needs_agent(role): for type in 'mon.', 'osd.': if role.startswith(type): return True return False remotes = _remotes(ctx, needs_agent) if remotes is None: raise RuntimeError('No role configured') try: for rem in remotes: log.info('Installing calamari-agent on %s', rem) pkg.install_package('calamari-agent', rem) server_remote = _remotes(ctx, lambda r: r.startswith('calamari.server')) if not server_remote: raise RuntimeError('No calamari.server role available') server_remote = server_remote[0] # why isn't shortname available by default? serverhost = server_remote.name.split('@')[1] log.info('configuring Diamond for {}'.format(serverhost)) if not _edit_diamond_config(rem, serverhost): raise RuntimeError( 'Diamond config edit failed on {0}'.format(rem) ) yield finally: for rem in remotes: pkg.remove_package('calamari-agent', rem)
def agent(ctx, config): """ task agent calamari.agent: install stats collection (for each role of type 'mon.' or 'osd.') For example:: roles: - [osd.0, mon.a] - [osd.1] tasks: - calamari.agent: """ log.info('calamari.agent starting') overrides = ctx.config.get('overrides', {}) teuthology.deep_merge(config, overrides.get('calamari.agent', {})) # agent gets installed on any remote with role mon or osd def needs_agent(role): for type in 'mon.', 'osd.': if role.startswith(type): return True return False remotes = _remotes(ctx, needs_agent) if remotes is None: raise RuntimeError('No role configured') try: for rem in remotes: log.info('Installing calamari-agent on %s', rem) pkg.install_package('calamari-agent', rem) server_remote = _remotes(ctx, lambda r: r.startswith('calamari.server')) if not server_remote: raise RuntimeError('No calamari.server role available') server_remote = server_remote[0] # why isn't shortname available by default? serverhost = server_remote.name.split('@')[1] log.info('configuring Diamond for {}'.format(serverhost)) if not _edit_diamond_config(rem, serverhost): raise RuntimeError( 'Diamond config edit failed on {0}'.format(rem)) yield finally: for rem in remotes: pkg.remove_package('calamari-agent', rem)
def install_dnsmasq(remote): """ If dnsmasq is not installed, install it for the duration of the task. """ try: existing = packaging.get_package_version(remote, 'dnsmasq') except: existing = None if existing is None: packaging.install_package('dnsmasq', remote) try: yield finally: if existing is None: packaging.remove_package('dnsmasq', remote)
def install_block_rbd_driver(ctx, config): """ Make sure qemu rbd block driver (block-rbd.so) is installed """ for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() if remote.os.package_type == 'rpm': block_rbd_pkg = 'qemu-kvm-block-rbd' else: block_rbd_pkg = 'qemu-block-extra' install_package(block_rbd_pkg, remote) try: yield finally: for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() remove_package(block_rbd_pkg, remote)
def install_python3(ctx, config): assert isinstance(config, dict) log.info('Installing Python3 for Tempest') installed = [] for (client, _) in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() try: packaging.get_package_version(remote, 'python3') except: packaging.install_package('python3', remote) installed.append(client) try: yield finally: log.info('Removing Python3 required by Tempest...') for client in installed: (remote,) = ctx.cluster.only(client).remotes.keys() packaging.remove_package('python3', remote)
def install_packages(ctx, config): """ Download the packaged dependencies of PyKMIP. Remove install packages upon exit. The context passed in should be identical to the context passed in to the main task. """ assert isinstance(config, dict) log.info('Installing system dependenies for PyKMIP...') packages = {} for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.keys() # use bindep to read which dependencies we need from temp/bindep.txt fd, local_temp_path = tempfile.mkstemp(suffix='.txt', prefix='bindep-') os.write(fd, _bindep_txt.encode()) os.close(fd) fd, remote_temp_path = tempfile.mkstemp(suffix='.txt', prefix='bindep-') os.close(fd) remote.put_file(local_temp_path, remote_temp_path) os.remove(local_temp_path) run_in_pykmip_venv(ctx, remote, ['pip', 'install', 'bindep']) r = run_in_pykmip_venv( ctx, remote, ['bindep', '--brief', '--file', remote_temp_path], stdout=BytesIO(), check_status=False) # returns 1 on success? packages[client] = r.stdout.getvalue().decode().splitlines() for dep in packages[client]: install_package(dep, remote) try: yield finally: log.info('Removing system dependencies of PyKMIP...') for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.keys() for dep in packages[client]: remove_package(dep, remote)
def restapi(ctx, config): """ task restapi Calamari Rest API For example:: roles: - [mon.a, osd.0, osd.1, calamari.restapi] - [osd.2, osd.3] tasks: - calamari.restapi: """ overrides = ctx.config.get('overrides', {}) teuthology.deep_merge(config, overrides.get('calamari.restapi', {})) remotes_and_roles = \ ctx.cluster.only(lambda r: r.startswith('calamari.restapi')).remotes if remotes_and_roles is None: raise RuntimeError('No role configured') # check that the role selected also has at least one mon role for rem, roles in remotes_and_roles.iteritems(): if not any([r for r in roles if r.startswith('mon.')]): raise RuntimeError('no mon on remote with roles %s', roles) try: for rem in remotes_and_roles.iterkeys(): log.info(rem) pkg.install_package('calamari-restapi', rem) yield finally: for rem in remotes_and_roles.iterkeys(): pkg.remove_package('calamari-restapi', rem)
def run_fio(remote, config, rbd_test_dir): """ create fio config file with options based on above config get the fio from github, generate binary, and use it to run on the generated fio config file """ fio_config = NamedTemporaryFile(prefix='fio_rbd_', dir='/tmp/', delete=False) fio_config.write('[global]\n') if config.get('io-engine'): ioengine = config['io-engine'] fio_config.write('ioengine={ioe}\n'.format(ioe=ioengine)) else: fio_config.write('ioengine=sync\n') if config.get('bs'): bs = config['bs'] fio_config.write('bs={bs}\n'.format(bs=bs)) else: fio_config.write('bs=4k\n') iodepth = config.get('io-depth', 2) fio_config.write('iodepth={iod}\n'.format(iod=iodepth)) if config.get('fio-io-size'): size = config['fio-io-size'] fio_config.write('size={size}\n'.format(size=size)) else: fio_config.write('size=100m\n') fio_config.write('time_based\n') if config.get('runtime'): runtime = config['runtime'] fio_config.write('runtime={runtime}\n'.format(runtime=runtime)) else: fio_config.write('runtime=1800\n') fio_config.write('allow_file_create=0\n') image_size = 10240 if config.get('image_size'): image_size = config['image_size'] formats = [1, 2] features = [['layering'], ['striping'], ['exclusive-lock', 'object-map']] fio_version = '2.21' if config.get('formats'): formats = config['formats'] if config.get('features'): features = config['features'] if config.get('fio-version'): fio_version = config['fio-version'] # handle package required for ioengine, if any sn = remote.shortname ioengine_pkg = get_ioengine_package_name(ioengine, remote) if ioengine_pkg: install_package(ioengine_pkg, remote) fio_config.write('norandommap\n') if ioengine == 'rbd': fio_config.write('clientname=admin\n') fio_config.write('pool=rbd\n') fio_config.write('invalidate=0\n') elif ioengine == 'libaio': fio_config.write('direct=1\n') for frmt in formats: for feature in features: log.info("Creating rbd images on {sn}".format(sn=sn)) feature_name = '-'.join(feature) rbd_name = 'i{i}f{f}{sn}'.format(i=frmt, f=feature_name, sn=sn) rbd_snap_name = 'i{i}f{f}{sn}@i{i}f{f}{sn}Snap'.format( i=frmt, f=feature_name, sn=sn) rbd_clone_name = 'i{i}f{f}{sn}Clone'.format(i=frmt, f=feature_name, sn=sn) create_args = [ 'rbd', 'create', '--size', '{size}'.format(size=image_size), '--image', rbd_name, '--image-format', '{f}'.format(f=frmt) ] map(lambda x: create_args.extend(['--image-feature', x]), feature) remote.run(args=create_args) remote.run(args=['rbd', 'info', rbd_name]) if ioengine != 'rbd': rbd_dev = run_rbd_map(remote, rbd_name, iodepth) if config.get('test-clone-io'): log.info("Testing clones using fio") remote.run(args=['rbd', 'snap', 'create', rbd_snap_name]) remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name]) remote.run( args=['rbd', 'clone', rbd_snap_name, rbd_clone_name]) rbd_clone_dev = run_rbd_map(remote, rbd_clone_name, iodepth) fio_config.write('[{rbd_dev}]\n'.format(rbd_dev=rbd_dev)) if config.get('rw'): rw = config['rw'] fio_config.write('rw={rw}\n'.format(rw=rw)) else: fio_config.write('rw=randrw\n') fio_config.write( 'filename={rbd_dev}\n'.format(rbd_dev=rbd_dev)) if config.get('test-clone-io'): fio_config.write('[{rbd_clone_dev}]\n'.format( rbd_clone_dev=rbd_clone_dev)) fio_config.write('rw={rw}\n'.format(rw=rw)) fio_config.write('filename={rbd_clone_dev}\n'.format( rbd_clone_dev=rbd_clone_dev)) else: if config.get('test-clone-io'): log.info("Testing clones using fio") remote.run(args=['rbd', 'snap', 'create', rbd_snap_name]) remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name]) remote.run( args=['rbd', 'clone', rbd_snap_name, rbd_clone_name]) fio_config.write('[{img_name}]\n'.format(img_name=rbd_name)) if config.get('rw'): rw = config['rw'] fio_config.write('rw={rw}\n'.format(rw=rw)) else: fio_config.write('rw=randrw\n') fio_config.write( 'rbdname={img_name}\n'.format(img_name=rbd_name)) if config.get('test-clone-io'): fio_config.write('[{clone_img_name}]\n'.format( clone_img_name=rbd_clone_name)) fio_config.write('rw={rw}\n'.format(rw=rw)) fio_config.write('rbdname={clone_img_name}\n'.format( clone_img_name=rbd_clone_name)) fio_config.close() remote.put_file(fio_config.name, fio_config.name) try: log.info("Running rbd feature - fio test on {sn}".format(sn=sn)) fio = "https://github.com/axboe/fio/archive/fio-" + fio_version + ".tar.gz" remote.run(args=[ 'mkdir', run.Raw(rbd_test_dir), ]) remote.run(args=[ 'cd', run.Raw(rbd_test_dir), run.Raw(';'), 'wget', fio, run.Raw(';'), run.Raw('tar -xvf fio*tar.gz'), run.Raw(';'), run.Raw('cd fio-fio*'), 'configure', run.Raw(';'), 'make' ]) remote.run(args=['ceph', '-s']) remote.run(args=[ run.Raw('{tdir}/fio-fio-{v}/fio --showcmd {f}'.format( tdir=rbd_test_dir, v=fio_version, f=fio_config.name)) ]) remote.run(args=[ 'sudo', run.Raw('{tdir}/fio-fio-{v}/fio {f}'.format( tdir=rbd_test_dir, v=fio_version, f=fio_config.name)) ]) remote.run(args=['ceph', '-s']) finally: out = StringIO.StringIO() remote.run(args=['rbd', 'showmapped', '--format=json'], stdout=out) mapped_images = json.loads(out.getvalue()) if mapped_images: log.info("Unmapping rbd images on {sn}".format(sn=sn)) for image in mapped_images.itervalues(): remote.run(args=['sudo', 'rbd', 'unmap', str(image['device'])]) log.info("Cleaning up fio install") remote.run(args=['rm', '-rf', run.Raw(rbd_test_dir)]) if ioengine_pkg: remove_package(ioengine_pkg, remote)
def task(ctx, config): """ Make sure the specified kernel is installed. This can be a branch, tag, or sha1 of ceph-client.git or a local kernel package. To install ceph-client.git branch (default: master):: kernel: branch: testing To install ceph-client.git tag:: kernel: tag: v3.18 To install ceph-client.git sha1:: kernel: sha1: 275dd19ea4e84c34f985ba097f9cddb539f54a50 To install from a koji build_id:: kernel: koji: 416058 To install from a koji task_id:: kernel: koji_task: 9678206 When installing from koji you also need to set the urls for koji hub and the koji root in your teuthology.yaml config file. These are shown below with their default values:: kojihub_url: http://koji.fedoraproject.org/kojihub kojiroot_url: http://kojipkgs.fedoraproject.org/packages When installing from a koji task_id you also need to set koji_task_url, which is the base url used to download rpms from koji task results:: koji_task_url: https://kojipkgs.fedoraproject.org/work/ To install local rpm (target should be an rpm system):: kernel: rpm: /path/to/appropriately-named.rpm To install local deb (target should be a deb system):: kernel: deb: /path/to/appropriately-named.deb For rpm: or deb: to work it should be able to figure out sha1 from local kernel package basename, see get_sha1_from_pkg_name(). This means that you can't for example install a local tag - package built with upstream {rpm,deb}-pkg targets won't have a sha1 in its name. If you want to schedule a run and use a local kernel package, you have to copy the package over to a box teuthology workers are running on and specify a path to the package on that box. All of the above will install a specified kernel on all targets. You can specify different kernels for each role or for all roles of a certain type (more specific roles override less specific, see normalize_config() for details):: kernel: client: tag: v3.0 osd: branch: btrfs_fixes client.1: branch: more_specific osd.3: branch: master To wait 3 minutes for hosts to reboot (default: 300):: kernel: timeout: 180 To enable kdb:: kernel: kdb: true :param ctx: Context :param config: Configuration """ if config is None: config = {} assert isinstance(config, dict), \ "task kernel only supports a dictionary for configuration" overrides = ctx.config.get('overrides', {}).get('kernel', {}) config, timeout = normalize_and_apply_overrides(ctx, config, overrides) validate_config(ctx, config) log.info('config %s, timeout %d' % (config, timeout)) need_install = {} # sha1 to dl, or path to rpm or deb need_version = {} # utsrelease or sha1 kdb = {} remove_old_kernels(ctx) for role, role_config in config.items(): # gather information about this remote (role_remote, ) = ctx.cluster.only(role).remotes.keys() system_type = role_remote.os.name if role_config.get('rpm') or role_config.get('deb'): # We only care about path - deb: vs rpm: is meaningless, # rpm: just happens to be parsed first. Nothing is stopping # 'deb: /path/to/foo.rpm' and it will work provided remote's # os.package_type is 'rpm' and vice versa. path = role_config.get('rpm') if not path: path = role_config.get('deb') sha1 = get_sha1_from_pkg_name(path) assert sha1, "failed to extract commit hash from path %s" % path if need_to_install(ctx, role, sha1): need_install[role] = path need_version[role] = sha1 elif role_config.get('sha1') == 'distro': version = need_to_install_distro(role_remote) if version: need_install[role] = 'distro' need_version[role] = version elif role_config.get("koji") or role_config.get('koji_task'): # installing a kernel from koji build_id = role_config.get("koji") task_id = role_config.get("koji_task") if role_remote.os.package_type != "rpm": msg = ("Installing a kernel from koji is only supported " "on rpm based systems. System type is {system_type}.") msg = msg.format(system_type=system_type) log.error(msg) ctx.summary["failure_reason"] = msg ctx.summary["status"] = "dead" raise ConfigError(msg) # FIXME: this install should probably happen somewhere else # but I'm not sure where, so we'll leave it here for now. install_package('koji', role_remote) if build_id: # get information about this build from koji build_info = get_koji_build_info(build_id, role_remote, ctx) version = "{ver}-{rel}.x86_64".format( ver=build_info["version"], rel=build_info["release"]) elif task_id: # get information about results of this task from koji task_result = get_koji_task_result(task_id, role_remote, ctx) # this is not really 'build_info', it's a dict of information # about the kernel rpm from the task results, but for the sake # of reusing the code below I'll still call it that. build_info = get_koji_task_rpm_info('kernel', task_result['rpms']) # add task_id so we can know later that we're installing # from a task and not a build. build_info["task_id"] = task_id version = build_info["version"] if need_to_install(ctx, role, version): need_install[role] = build_info need_version[role] = version else: builder = get_builder_project()( "kernel", role_config, ctx=ctx, remote=role_remote, ) sha1 = builder.sha1 log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1)) ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1 if need_to_install(ctx, role, sha1): if teuth_config.use_shaman: version = builder.scm_version else: version = builder.version if not version: raise VersionNotFoundError(builder.base_url) need_install[role] = sha1 need_version[role] = version # enable or disable kdb if specified, otherwise do not touch if role_config.get('kdb') is not None: kdb[role] = role_config.get('kdb') if need_install: install_firmware(ctx, need_install) download_kernel(ctx, need_install) install_and_reboot(ctx, need_install) wait_for_reboot(ctx, need_version, timeout) enable_disable_kdb(ctx, kdb)
def run_fio(remote, config, rbd_test_dir): """ create fio config file with options based on above config get the fio from github, generate binary, and use it to run on the generated fio config file """ fio_config=NamedTemporaryFile(prefix='fio_rbd_', dir='/tmp/', delete=False) fio_config.write('[global]\n') if config.get('io-engine'): ioengine=config['io-engine'] fio_config.write('ioengine={ioe}\n'.format(ioe=ioengine)) else: fio_config.write('ioengine=sync\n') if config.get('bs'): bs=config['bs'] fio_config.write('bs={bs}\n'.format(bs=bs)) else: fio_config.write('bs=4k\n') iodepth = config.get('io-depth', 2) fio_config.write('iodepth={iod}\n'.format(iod=iodepth)) if config.get('fio-io-size'): size=config['fio-io-size'] fio_config.write('size={size}\n'.format(size=size)) else: fio_config.write('size=100m\n') fio_config.write('time_based\n') if config.get('runtime'): runtime=config['runtime'] fio_config.write('runtime={runtime}\n'.format(runtime=runtime)) else: fio_config.write('runtime=1800\n') fio_config.write('allow_file_create=0\n') image_size=10240 if config.get('image_size'): image_size=config['image_size'] formats=[1,2] features=[['layering'],['striping'],['exclusive-lock','object-map']] fio_version='2.21' if config.get('formats'): formats=config['formats'] if config.get('features'): features=config['features'] if config.get('fio-version'): fio_version=config['fio-version'] # handle package required for ioengine, if any sn=remote.shortname ioengine_pkg = get_ioengine_package_name(ioengine, remote) if ioengine_pkg: install_package(ioengine_pkg, remote) fio_config.write('norandommap\n') if ioengine == 'rbd': fio_config.write('clientname=admin\n') fio_config.write('pool=rbd\n') fio_config.write('invalidate=0\n') elif ioengine == 'libaio': fio_config.write('direct=1\n') for frmt in formats: for feature in features: log.info("Creating rbd images on {sn}".format(sn=sn)) feature_name = '-'.join(feature) rbd_name = 'i{i}f{f}{sn}'.format(i=frmt,f=feature_name,sn=sn) rbd_snap_name = 'i{i}f{f}{sn}@i{i}f{f}{sn}Snap'.format(i=frmt,f=feature_name,sn=sn) rbd_clone_name = 'i{i}f{f}{sn}Clone'.format(i=frmt,f=feature_name,sn=sn) create_args=['rbd', 'create', '--size', '{size}'.format(size=image_size), '--image', rbd_name, '--image-format', '{f}'.format(f=frmt)] map(lambda x: create_args.extend(['--image-feature', x]), feature) remote.run(args=create_args) remote.run(args=['rbd', 'info', rbd_name]) if ioengine != 'rbd': rbd_dev = run_rbd_map(remote, rbd_name, iodepth) if config.get('test-clone-io'): log.info("Testing clones using fio") remote.run(args=['rbd', 'snap', 'create', rbd_snap_name]) remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name]) remote.run(args=['rbd', 'clone', rbd_snap_name, rbd_clone_name]) rbd_clone_dev = run_rbd_map(remote, rbd_clone_name, iodepth) fio_config.write('[{rbd_dev}]\n'.format(rbd_dev=rbd_dev)) if config.get('rw'): rw=config['rw'] fio_config.write('rw={rw}\n'.format(rw=rw)) else: fio_config .write('rw=randrw\n') fio_config.write('filename={rbd_dev}\n'.format(rbd_dev=rbd_dev)) if config.get('test-clone-io'): fio_config.write('[{rbd_clone_dev}]\n'.format(rbd_clone_dev=rbd_clone_dev)) fio_config.write('rw={rw}\n'.format(rw=rw)) fio_config.write('filename={rbd_clone_dev}\n'.format(rbd_clone_dev=rbd_clone_dev)) else: if config.get('test-clone-io'): log.info("Testing clones using fio") remote.run(args=['rbd', 'snap', 'create', rbd_snap_name]) remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name]) remote.run(args=['rbd', 'clone', rbd_snap_name, rbd_clone_name]) fio_config.write('[{img_name}]\n'.format(img_name=rbd_name)) if config.get('rw'): rw=config['rw'] fio_config.write('rw={rw}\n'.format(rw=rw)) else: fio_config.write('rw=randrw\n') fio_config.write('rbdname={img_name}\n'.format(img_name=rbd_name)) if config.get('test-clone-io'): fio_config.write('[{clone_img_name}]\n'.format(clone_img_name=rbd_clone_name)) fio_config.write('rw={rw}\n'.format(rw=rw)) fio_config.write('rbdname={clone_img_name}\n'.format(clone_img_name=rbd_clone_name)) fio_config.close() remote.put_file(fio_config.name,fio_config.name) try: log.info("Running rbd feature - fio test on {sn}".format(sn=sn)) fio = "https://github.com/axboe/fio/archive/fio-" + fio_version + ".tar.gz" remote.run(args=['mkdir', run.Raw(rbd_test_dir),]) remote.run(args=['cd' , run.Raw(rbd_test_dir), run.Raw(';'), 'wget' , fio , run.Raw(';'), run.Raw('tar -xvf fio*tar.gz'), run.Raw(';'), run.Raw('cd fio-fio*'), 'configure', run.Raw(';') ,'make']) remote.run(args=['ceph', '-s']) remote.run(args=[run.Raw('{tdir}/fio-fio-{v}/fio --showcmd {f}'.format(tdir=rbd_test_dir,v=fio_version,f=fio_config.name))]) remote.run(args=['sudo', run.Raw('{tdir}/fio-fio-{v}/fio {f}'.format(tdir=rbd_test_dir,v=fio_version,f=fio_config.name))]) remote.run(args=['ceph', '-s']) finally: out=StringIO.StringIO() remote.run(args=['rbd','showmapped', '--format=json'], stdout=out) mapped_images = json.loads(out.getvalue()) if mapped_images: log.info("Unmapping rbd images on {sn}".format(sn=sn)) for image in mapped_images.itervalues(): remote.run(args=['sudo', 'rbd', 'unmap', str(image['device'])]) log.info("Cleaning up fio install") remote.run(args=['rm','-rf', run.Raw(rbd_test_dir)]) if ioengine_pkg: remove_package(ioengine_pkg, remote)
def test_install_package_rpm(self): m_remote = Mock() m_remote.os.package_type = "rpm" expected = ["sudo", "yum", "-y", "install", "httpd"] packaging.install_package("httpd", m_remote) m_remote.run.assert_called_with(args=expected)
def test_install_package_deb(self): m_remote = Mock() m_remote.os.package_type = "deb" expected = ["DEBIAN_FRONTEND=noninteractive", "sudo", "-E", "apt-get", "-y", "install", "apache2"] packaging.install_package("apache2", m_remote) m_remote.run.assert_called_with(args=expected)
def process_role(ctx, config, timeout, role, role_config): need_install = None # sha1 to dl, or path to rpm or deb need_version = None # utsrelease or sha1 # gather information about this remote (role_remote,) = ctx.cluster.only(role).remotes.keys() system_type = role_remote.os.name if role_config.get('rpm') or role_config.get('deb'): # We only care about path - deb: vs rpm: is meaningless, # rpm: just happens to be parsed first. Nothing is stopping # 'deb: /path/to/foo.rpm' and it will work provided remote's # os.package_type is 'rpm' and vice versa. path = role_config.get('rpm') if not path: path = role_config.get('deb') sha1 = get_sha1_from_pkg_name(path) assert sha1, "failed to extract commit hash from path %s" % path if need_to_install(ctx, role, sha1): need_install = path need_version = sha1 elif role_config.get('sha1') == 'distro': version = need_to_install_distro(role_remote, role_config) if version: need_install = 'distro' need_version = version elif role_config.get("koji") or role_config.get('koji_task'): # installing a kernel from koji build_id = role_config.get("koji") task_id = role_config.get("koji_task") if role_remote.os.package_type != "rpm": msg = ( "Installing a kernel from koji is only supported " "on rpm based systems. System type is {system_type}." ) msg = msg.format(system_type=system_type) log.error(msg) ctx.summary["failure_reason"] = msg ctx.summary["status"] = "dead" raise ConfigError(msg) # FIXME: this install should probably happen somewhere else # but I'm not sure where, so we'll leave it here for now. install_package('koji', role_remote) if build_id: # get information about this build from koji build_info = get_koji_build_info(build_id, role_remote, ctx) version = "{ver}-{rel}.x86_64".format( ver=build_info["version"], rel=build_info["release"] ) elif task_id: # get information about results of this task from koji task_result = get_koji_task_result(task_id, role_remote, ctx) # this is not really 'build_info', it's a dict of information # about the kernel rpm from the task results, but for the sake # of reusing the code below I'll still call it that. build_info = get_koji_task_rpm_info( 'kernel', task_result['rpms'] ) # add task_id so we can know later that we're installing # from a task and not a build. build_info["task_id"] = task_id version = build_info["version"] if need_to_install(ctx, role, version): need_install = build_info need_version = version else: builder = get_builder_project()( "kernel", role_config, ctx=ctx, remote=role_remote, ) sha1 = builder.sha1 log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1)) ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1 if need_to_install(ctx, role, sha1): if teuth_config.use_shaman: version = builder.scm_version else: version = builder.version if not version: raise VersionNotFoundError(builder.base_url) need_install = sha1 need_version = version if need_install: install_firmware(ctx, {role: need_install}) download_kernel(ctx, {role: need_install}) install_and_reboot(ctx, {role: need_install}, config) wait_for_reboot(ctx, {role: need_version}, timeout, config) # enable or disable kdb if specified, otherwise do not touch if role_config.get('kdb') is not None: kdb = role_config.get('kdb') enable_disable_kdb(ctx, {role: kdb})