def install_packages(ctx, config): """ Download the packaged dependencies of Keystone. Remove install packages upon exit. The context passed in should be identical to the context passed in to the main task. """ assert isinstance(config, dict) log.info('Installing packages for Keystone...') deps = { 'deb': ['libffi-dev', 'libssl-dev', 'libldap2-dev', 'libsasl2-dev'], 'rpm': ['libffi-devel', 'openssl-devel'], } for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.iterkeys() for dep in deps[remote.os.package_type]: install_package(dep, remote) try: yield finally: log.info('Removing packaged dependencies of Keystone...') for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.iterkeys() for dep in deps[remote.os.package_type]: remove_package(dep, remote)
def install_packages(ctx, config): """ Download the packaged dependencies of Keystone. Remove install packages upon exit. The context passed in should be identical to the context passed in to the main task. """ assert isinstance(config, dict) log.info('Installing packages for Keystone...') packages = {} for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.keys() # use bindep to read which dependencies we need from keystone/bindep.txt toxvenv_sh(ctx, remote, ['pip', 'install', 'bindep']) packages[client] = toxvenv_sh( ctx, remote, [ 'bindep', '--brief', '--file', '{}/bindep.txt'.format( get_keystone_dir(ctx)) ], check_status=False).splitlines() # returns 1 on success? for dep in packages[client]: install_package(dep, remote) try: yield finally: log.info('Removing packaged dependencies of Keystone...') for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.keys() for dep in packages[client]: remove_package(dep, remote)
def install_packages(ctx, config): """ Download the packaged dependencies of Keystone. Remove install packages upon exit. The context passed in should be identical to the context passed in to the main task. """ assert isinstance(config, dict) log.info('Installing packages for Keystone...') deps = { 'deb': [ 'libffi-dev', 'libssl-dev', 'libldap2-dev', 'libsasl2-dev' ], 'rpm': [ 'libffi-devel', 'openssl-devel' ], } for (client, _) in config.items(): (remote,) = ctx.cluster.only(client).remotes.iterkeys() for dep in deps[remote.os.package_type]: install_package(dep, remote) try: yield finally: log.info('Removing packaged dependencies of Keystone...') for (client, _) in config.items(): (remote,) = ctx.cluster.only(client).remotes.iterkeys() for dep in deps[remote.os.package_type]: remove_package(dep, remote)
def test_remove_package_deb(self): m_remote = Mock() m_remote.os.package_type = "deb" expected = [ 'DEBIAN_FRONTEND=noninteractive', 'sudo', '-E', 'apt-get', '-y', 'purge', 'apache2' ] packaging.remove_package('apache2', m_remote) m_remote.run.assert_called_with(args=expected)
def install_extra_pkgs(client): ''' Install EXTRA_PKGS ''' try: for pkg in EXTRA_PKGS: packaging.install_package(pkg, client) yield finally: for pkg in EXTRA_PKGS: packaging.remove_package(pkg, client)
def prepare_nosetest_env(client): try: # extra dependencies that would be in the devmode venv if client.os.package_type == 'rpm': enable_epel(client, enable=True) for package in EXTRA_NOSETEST_PKGS: packaging.install_package(package, client) if client.os.package_type == 'rpm': enable_epel(client, enable=False) # install nose itself into the calamari venv, force it in case it's # already installed in the system, so we can invoke it by path without # fear that it's not present pip(client, 'nose', venv='/opt/calamari/venv', force=True) # install a later version of requests into the venv as well # (for precise) pip(client, 'requests', venv='/opt/calamari/venv', force=True) # link (setup.py develop) calamari/rest-api into the production venv # because production does not include calamari_rest.management, needed # for test_rest_api.py's ApiIntrospection args = 'cd calamari/rest-api'.split() + [run.Raw(';')] + \ 'sudo /opt/calamari/venv/bin/python setup.py develop'.split() client.run(args=args) # because, at least in Python 2.6/Centos, site.py uses # 'os.path.exists()' to process .pth file entries, and exists() uses # access(2) to check for existence, all the paths leading up to # $HOME/calamari/rest-api need to be searchable by all users of # the package, which will include the WSGI/Django app, running # as the Apache user. So make them all world-read-and-execute. args = 'sudo chmod a+x'.split() + \ ['.', './calamari', './calamari/rest-api'] client.run(args=args) # make one dummy request just to get the WSGI app to do # all its log creation here, before the chmod below (I'm # looking at you, graphite -- /var/log/calamari/info.log and # /var/log/calamari/exception.log) client.run(args='wget -q -O /dev/null http://localhost') # /var/log/calamari/* is root-or-apache write-only client.run(args='sudo chmod a+w /var/log/calamari/*') yield finally: args = 'cd calamari/rest-api'.split() + [run.Raw(';')] + \ 'sudo /opt/calamari/venv/bin/python setup.py develop -u'.split() client.run(args=args) for pkg in ('nose', 'requests'): pip(client, pkg, venv='/opt/calamari/venv', uninstall=True) for package in EXTRA_NOSETEST_PKGS: packaging.remove_package(package, client)
def test_remove_package_rpm(self, m_misc): m_misc.get_system_type.return_value = "rpm" m_remote = Mock() expected = [ 'sudo', 'yum', '-y', 'erase', 'httpd' ] packaging.remove_package('httpd', m_remote) m_remote.run.assert_called_with(args=expected)
def test_remove_package_rpm(self): m_remote = Mock() m_remote.os.package_type = "rpm" expected = [ 'sudo', 'yum', '-y', 'erase', 'httpd' ] packaging.remove_package('httpd', m_remote) m_remote.run.assert_called_with(args=expected)
def test_remove_package_deb(self, m_misc): m_misc.get_system_type.return_value = "deb" m_remote = Mock() expected = [ 'DEBIAN_FRONTEND=noninteractive', 'sudo', '-E', 'apt-get', '-y', 'purge', 'apache2' ] packaging.remove_package('apache2', m_remote) m_remote.run.assert_called_with(args=expected)
def agent(ctx, config): """ task agent calamari.agent: install stats collection (for each role of type 'mon.' or 'osd.') For example:: roles: - [osd.0, mon.a] - [osd.1] tasks: - calamari.agent: """ log.info('calamari.agent starting') overrides = ctx.config.get('overrides', {}) teuthology.deep_merge(config, overrides.get('calamari.agent', {})) # agent gets installed on any remote with role mon or osd def needs_agent(role): for type in 'mon.', 'osd.': if role.startswith(type): return True return False remotes = _remotes(ctx, needs_agent) if remotes is None: raise RuntimeError('No role configured') try: for rem in remotes: log.info('Installing calamari-agent on %s', rem) pkg.install_package('calamari-agent', rem) server_remote = _remotes(ctx, lambda r: r.startswith('calamari.server')) if not server_remote: raise RuntimeError('No calamari.server role available') server_remote = server_remote[0] # why isn't shortname available by default? serverhost = server_remote.name.split('@')[1] log.info('configuring Diamond for {}'.format(serverhost)) if not _edit_diamond_config(rem, serverhost): raise RuntimeError( 'Diamond config edit failed on {0}'.format(rem) ) yield finally: for rem in remotes: pkg.remove_package('calamari-agent', rem)
def agent(ctx, config): """ task agent calamari.agent: install stats collection (for each role of type 'mon.' or 'osd.') For example:: roles: - [osd.0, mon.a] - [osd.1] tasks: - calamari.agent: """ log.info('calamari.agent starting') overrides = ctx.config.get('overrides', {}) teuthology.deep_merge(config, overrides.get('calamari.agent', {})) # agent gets installed on any remote with role mon or osd def needs_agent(role): for type in 'mon.', 'osd.': if role.startswith(type): return True return False remotes = _remotes(ctx, needs_agent) if remotes is None: raise RuntimeError('No role configured') try: for rem in remotes: log.info('Installing calamari-agent on %s', rem) pkg.install_package('calamari-agent', rem) server_remote = _remotes(ctx, lambda r: r.startswith('calamari.server')) if not server_remote: raise RuntimeError('No calamari.server role available') server_remote = server_remote[0] # why isn't shortname available by default? serverhost = server_remote.name.split('@')[1] log.info('configuring Diamond for {}'.format(serverhost)) if not _edit_diamond_config(rem, serverhost): raise RuntimeError( 'Diamond config edit failed on {0}'.format(rem)) yield finally: for rem in remotes: pkg.remove_package('calamari-agent', rem)
def install_dnsmasq(remote): """ If dnsmasq is not installed, install it for the duration of the task. """ try: existing = packaging.get_package_version(remote, 'dnsmasq') except: existing = None if existing is None: packaging.install_package('dnsmasq', remote) try: yield finally: if existing is None: packaging.remove_package('dnsmasq', remote)
def install_block_rbd_driver(ctx, config): """ Make sure qemu rbd block driver (block-rbd.so) is installed """ for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() if remote.os.package_type == 'rpm': block_rbd_pkg = 'qemu-kvm-block-rbd' else: block_rbd_pkg = 'qemu-block-extra' install_package(block_rbd_pkg, remote) try: yield finally: for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() remove_package(block_rbd_pkg, remote)
def install_python3(ctx, config): assert isinstance(config, dict) log.info('Installing Python3 for Tempest') installed = [] for (client, _) in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() try: packaging.get_package_version(remote, 'python3') except: packaging.install_package('python3', remote) installed.append(client) try: yield finally: log.info('Removing Python3 required by Tempest...') for client in installed: (remote,) = ctx.cluster.only(client).remotes.keys() packaging.remove_package('python3', remote)
def install_packages(ctx, config): """ Download the packaged dependencies of PyKMIP. Remove install packages upon exit. The context passed in should be identical to the context passed in to the main task. """ assert isinstance(config, dict) log.info('Installing system dependenies for PyKMIP...') packages = {} for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.keys() # use bindep to read which dependencies we need from temp/bindep.txt fd, local_temp_path = tempfile.mkstemp(suffix='.txt', prefix='bindep-') os.write(fd, _bindep_txt.encode()) os.close(fd) fd, remote_temp_path = tempfile.mkstemp(suffix='.txt', prefix='bindep-') os.close(fd) remote.put_file(local_temp_path, remote_temp_path) os.remove(local_temp_path) run_in_pykmip_venv(ctx, remote, ['pip', 'install', 'bindep']) r = run_in_pykmip_venv( ctx, remote, ['bindep', '--brief', '--file', remote_temp_path], stdout=BytesIO(), check_status=False) # returns 1 on success? packages[client] = r.stdout.getvalue().decode().splitlines() for dep in packages[client]: install_package(dep, remote) try: yield finally: log.info('Removing system dependencies of PyKMIP...') for (client, _) in config.items(): (remote, ) = ctx.cluster.only(client).remotes.keys() for dep in packages[client]: remove_package(dep, remote)
def restapi(ctx, config): """ task restapi Calamari Rest API For example:: roles: - [mon.a, osd.0, osd.1, calamari.restapi] - [osd.2, osd.3] tasks: - calamari.restapi: """ overrides = ctx.config.get('overrides', {}) teuthology.deep_merge(config, overrides.get('calamari.restapi', {})) remotes_and_roles = \ ctx.cluster.only(lambda r: r.startswith('calamari.restapi')).remotes if remotes_and_roles is None: raise RuntimeError('No role configured') # check that the role selected also has at least one mon role for rem, roles in remotes_and_roles.iteritems(): if not any([r for r in roles if r.startswith('mon.')]): raise RuntimeError('no mon on remote with roles %s', roles) try: for rem in remotes_and_roles.iterkeys(): log.info(rem) pkg.install_package('calamari-restapi', rem) yield finally: for rem in remotes_and_roles.iterkeys(): pkg.remove_package('calamari-restapi', rem)
def server(ctx, config): """ task server: Calamari server setup. Add role 'calamari.server' to the remote that will run the webapp. 'calamari.restapi' role must be present to serve as the cluster-api target for calamari-server. Only one of calamari.server and calamari.restapi roles is supported currently. For example:: roles: - [calamari.server] - [mon.0, calamari.restapi] - [osd.0, osd.1] tasks: - calamari.restapi: - calamari.server: """ overrides = ctx.config.get('overrides', {}) teuthology.deep_merge(config, overrides.get('calamari.server', {})) remote = _remotes(ctx, lambda r: r.startswith('calamari.server')) if not remote: raise RuntimeError('No role configured') restapi_remote = _remotes(ctx, lambda r: r.startswith('calamari.restapi')) if not restapi_remote: raise RuntimeError('Must supply calamari.restapi role') remote = remote[0] restapi_remote = restapi_remote[0] try: # sqlite3 command is required; on some platforms it's already # there and not removable (required for, say yum) sqlite_package = pkg.get_package_name('sqlite', remote) if sqlite_package and not pkg.install_package(sqlite_package, remote): raise RuntimeError('{} install failed'.format(sqlite_package)) if not pkg.install_package('calamari-server', remote) or \ not pkg.install_package('calamari-clients', remote) or \ not _disable_default_nginx(remote) or \ not _setup_calamari_cluster(remote, restapi_remote): raise RuntimeError('Server installation failure') log.info('client/server setup complete') yield finally: pkg.remove_package('calamari-server', remote) pkg.remove_package('calamari-clients', remote) if sqlite_package: pkg.remove_package(sqlite_package, remote)
def run_fio(remote, config, rbd_test_dir): """ create fio config file with options based on above config get the fio from github, generate binary, and use it to run on the generated fio config file """ fio_config = NamedTemporaryFile(prefix='fio_rbd_', dir='/tmp/', delete=False) fio_config.write('[global]\n') if config.get('io-engine'): ioengine = config['io-engine'] fio_config.write('ioengine={ioe}\n'.format(ioe=ioengine)) else: fio_config.write('ioengine=sync\n') if config.get('bs'): bs = config['bs'] fio_config.write('bs={bs}\n'.format(bs=bs)) else: fio_config.write('bs=4k\n') iodepth = config.get('io-depth', 2) fio_config.write('iodepth={iod}\n'.format(iod=iodepth)) if config.get('fio-io-size'): size = config['fio-io-size'] fio_config.write('size={size}\n'.format(size=size)) else: fio_config.write('size=100m\n') fio_config.write('time_based\n') if config.get('runtime'): runtime = config['runtime'] fio_config.write('runtime={runtime}\n'.format(runtime=runtime)) else: fio_config.write('runtime=1800\n') fio_config.write('allow_file_create=0\n') image_size = 10240 if config.get('image_size'): image_size = config['image_size'] formats = [1, 2] features = [['layering'], ['striping'], ['exclusive-lock', 'object-map']] fio_version = '2.21' if config.get('formats'): formats = config['formats'] if config.get('features'): features = config['features'] if config.get('fio-version'): fio_version = config['fio-version'] # handle package required for ioengine, if any sn = remote.shortname ioengine_pkg = get_ioengine_package_name(ioengine, remote) if ioengine_pkg: install_package(ioengine_pkg, remote) fio_config.write('norandommap\n') if ioengine == 'rbd': fio_config.write('clientname=admin\n') fio_config.write('pool=rbd\n') fio_config.write('invalidate=0\n') elif ioengine == 'libaio': fio_config.write('direct=1\n') for frmt in formats: for feature in features: log.info("Creating rbd images on {sn}".format(sn=sn)) feature_name = '-'.join(feature) rbd_name = 'i{i}f{f}{sn}'.format(i=frmt, f=feature_name, sn=sn) rbd_snap_name = 'i{i}f{f}{sn}@i{i}f{f}{sn}Snap'.format( i=frmt, f=feature_name, sn=sn) rbd_clone_name = 'i{i}f{f}{sn}Clone'.format(i=frmt, f=feature_name, sn=sn) create_args = [ 'rbd', 'create', '--size', '{size}'.format(size=image_size), '--image', rbd_name, '--image-format', '{f}'.format(f=frmt) ] map(lambda x: create_args.extend(['--image-feature', x]), feature) remote.run(args=create_args) remote.run(args=['rbd', 'info', rbd_name]) if ioengine != 'rbd': rbd_dev = run_rbd_map(remote, rbd_name, iodepth) if config.get('test-clone-io'): log.info("Testing clones using fio") remote.run(args=['rbd', 'snap', 'create', rbd_snap_name]) remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name]) remote.run( args=['rbd', 'clone', rbd_snap_name, rbd_clone_name]) rbd_clone_dev = run_rbd_map(remote, rbd_clone_name, iodepth) fio_config.write('[{rbd_dev}]\n'.format(rbd_dev=rbd_dev)) if config.get('rw'): rw = config['rw'] fio_config.write('rw={rw}\n'.format(rw=rw)) else: fio_config.write('rw=randrw\n') fio_config.write( 'filename={rbd_dev}\n'.format(rbd_dev=rbd_dev)) if config.get('test-clone-io'): fio_config.write('[{rbd_clone_dev}]\n'.format( rbd_clone_dev=rbd_clone_dev)) fio_config.write('rw={rw}\n'.format(rw=rw)) fio_config.write('filename={rbd_clone_dev}\n'.format( rbd_clone_dev=rbd_clone_dev)) else: if config.get('test-clone-io'): log.info("Testing clones using fio") remote.run(args=['rbd', 'snap', 'create', rbd_snap_name]) remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name]) remote.run( args=['rbd', 'clone', rbd_snap_name, rbd_clone_name]) fio_config.write('[{img_name}]\n'.format(img_name=rbd_name)) if config.get('rw'): rw = config['rw'] fio_config.write('rw={rw}\n'.format(rw=rw)) else: fio_config.write('rw=randrw\n') fio_config.write( 'rbdname={img_name}\n'.format(img_name=rbd_name)) if config.get('test-clone-io'): fio_config.write('[{clone_img_name}]\n'.format( clone_img_name=rbd_clone_name)) fio_config.write('rw={rw}\n'.format(rw=rw)) fio_config.write('rbdname={clone_img_name}\n'.format( clone_img_name=rbd_clone_name)) fio_config.close() remote.put_file(fio_config.name, fio_config.name) try: log.info("Running rbd feature - fio test on {sn}".format(sn=sn)) fio = "https://github.com/axboe/fio/archive/fio-" + fio_version + ".tar.gz" remote.run(args=[ 'mkdir', run.Raw(rbd_test_dir), ]) remote.run(args=[ 'cd', run.Raw(rbd_test_dir), run.Raw(';'), 'wget', fio, run.Raw(';'), run.Raw('tar -xvf fio*tar.gz'), run.Raw(';'), run.Raw('cd fio-fio*'), 'configure', run.Raw(';'), 'make' ]) remote.run(args=['ceph', '-s']) remote.run(args=[ run.Raw('{tdir}/fio-fio-{v}/fio --showcmd {f}'.format( tdir=rbd_test_dir, v=fio_version, f=fio_config.name)) ]) remote.run(args=[ 'sudo', run.Raw('{tdir}/fio-fio-{v}/fio {f}'.format( tdir=rbd_test_dir, v=fio_version, f=fio_config.name)) ]) remote.run(args=['ceph', '-s']) finally: out = StringIO.StringIO() remote.run(args=['rbd', 'showmapped', '--format=json'], stdout=out) mapped_images = json.loads(out.getvalue()) if mapped_images: log.info("Unmapping rbd images on {sn}".format(sn=sn)) for image in mapped_images.itervalues(): remote.run(args=['sudo', 'rbd', 'unmap', str(image['device'])]) log.info("Cleaning up fio install") remote.run(args=['rm', '-rf', run.Raw(rbd_test_dir)]) if ioengine_pkg: remove_package(ioengine_pkg, remote)
def run_fio(remote, config, rbd_test_dir): """ create fio config file with options based on above config get the fio from github, generate binary, and use it to run on the generated fio config file """ fio_config=NamedTemporaryFile(prefix='fio_rbd_', dir='/tmp/', delete=False) fio_config.write('[global]\n') if config.get('io-engine'): ioengine=config['io-engine'] fio_config.write('ioengine={ioe}\n'.format(ioe=ioengine)) else: fio_config.write('ioengine=sync\n') if config.get('bs'): bs=config['bs'] fio_config.write('bs={bs}\n'.format(bs=bs)) else: fio_config.write('bs=4k\n') iodepth = config.get('io-depth', 2) fio_config.write('iodepth={iod}\n'.format(iod=iodepth)) if config.get('fio-io-size'): size=config['fio-io-size'] fio_config.write('size={size}\n'.format(size=size)) else: fio_config.write('size=100m\n') fio_config.write('time_based\n') if config.get('runtime'): runtime=config['runtime'] fio_config.write('runtime={runtime}\n'.format(runtime=runtime)) else: fio_config.write('runtime=1800\n') fio_config.write('allow_file_create=0\n') image_size=10240 if config.get('image_size'): image_size=config['image_size'] formats=[1,2] features=[['layering'],['striping'],['exclusive-lock','object-map']] fio_version='2.21' if config.get('formats'): formats=config['formats'] if config.get('features'): features=config['features'] if config.get('fio-version'): fio_version=config['fio-version'] # handle package required for ioengine, if any sn=remote.shortname ioengine_pkg = get_ioengine_package_name(ioengine, remote) if ioengine_pkg: install_package(ioengine_pkg, remote) fio_config.write('norandommap\n') if ioengine == 'rbd': fio_config.write('clientname=admin\n') fio_config.write('pool=rbd\n') fio_config.write('invalidate=0\n') elif ioengine == 'libaio': fio_config.write('direct=1\n') for frmt in formats: for feature in features: log.info("Creating rbd images on {sn}".format(sn=sn)) feature_name = '-'.join(feature) rbd_name = 'i{i}f{f}{sn}'.format(i=frmt,f=feature_name,sn=sn) rbd_snap_name = 'i{i}f{f}{sn}@i{i}f{f}{sn}Snap'.format(i=frmt,f=feature_name,sn=sn) rbd_clone_name = 'i{i}f{f}{sn}Clone'.format(i=frmt,f=feature_name,sn=sn) create_args=['rbd', 'create', '--size', '{size}'.format(size=image_size), '--image', rbd_name, '--image-format', '{f}'.format(f=frmt)] map(lambda x: create_args.extend(['--image-feature', x]), feature) remote.run(args=create_args) remote.run(args=['rbd', 'info', rbd_name]) if ioengine != 'rbd': rbd_dev = run_rbd_map(remote, rbd_name, iodepth) if config.get('test-clone-io'): log.info("Testing clones using fio") remote.run(args=['rbd', 'snap', 'create', rbd_snap_name]) remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name]) remote.run(args=['rbd', 'clone', rbd_snap_name, rbd_clone_name]) rbd_clone_dev = run_rbd_map(remote, rbd_clone_name, iodepth) fio_config.write('[{rbd_dev}]\n'.format(rbd_dev=rbd_dev)) if config.get('rw'): rw=config['rw'] fio_config.write('rw={rw}\n'.format(rw=rw)) else: fio_config .write('rw=randrw\n') fio_config.write('filename={rbd_dev}\n'.format(rbd_dev=rbd_dev)) if config.get('test-clone-io'): fio_config.write('[{rbd_clone_dev}]\n'.format(rbd_clone_dev=rbd_clone_dev)) fio_config.write('rw={rw}\n'.format(rw=rw)) fio_config.write('filename={rbd_clone_dev}\n'.format(rbd_clone_dev=rbd_clone_dev)) else: if config.get('test-clone-io'): log.info("Testing clones using fio") remote.run(args=['rbd', 'snap', 'create', rbd_snap_name]) remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name]) remote.run(args=['rbd', 'clone', rbd_snap_name, rbd_clone_name]) fio_config.write('[{img_name}]\n'.format(img_name=rbd_name)) if config.get('rw'): rw=config['rw'] fio_config.write('rw={rw}\n'.format(rw=rw)) else: fio_config.write('rw=randrw\n') fio_config.write('rbdname={img_name}\n'.format(img_name=rbd_name)) if config.get('test-clone-io'): fio_config.write('[{clone_img_name}]\n'.format(clone_img_name=rbd_clone_name)) fio_config.write('rw={rw}\n'.format(rw=rw)) fio_config.write('rbdname={clone_img_name}\n'.format(clone_img_name=rbd_clone_name)) fio_config.close() remote.put_file(fio_config.name,fio_config.name) try: log.info("Running rbd feature - fio test on {sn}".format(sn=sn)) fio = "https://github.com/axboe/fio/archive/fio-" + fio_version + ".tar.gz" remote.run(args=['mkdir', run.Raw(rbd_test_dir),]) remote.run(args=['cd' , run.Raw(rbd_test_dir), run.Raw(';'), 'wget' , fio , run.Raw(';'), run.Raw('tar -xvf fio*tar.gz'), run.Raw(';'), run.Raw('cd fio-fio*'), 'configure', run.Raw(';') ,'make']) remote.run(args=['ceph', '-s']) remote.run(args=[run.Raw('{tdir}/fio-fio-{v}/fio --showcmd {f}'.format(tdir=rbd_test_dir,v=fio_version,f=fio_config.name))]) remote.run(args=['sudo', run.Raw('{tdir}/fio-fio-{v}/fio {f}'.format(tdir=rbd_test_dir,v=fio_version,f=fio_config.name))]) remote.run(args=['ceph', '-s']) finally: out=StringIO.StringIO() remote.run(args=['rbd','showmapped', '--format=json'], stdout=out) mapped_images = json.loads(out.getvalue()) if mapped_images: log.info("Unmapping rbd images on {sn}".format(sn=sn)) for image in mapped_images.itervalues(): remote.run(args=['sudo', 'rbd', 'unmap', str(image['device'])]) log.info("Cleaning up fio install") remote.run(args=['rm','-rf', run.Raw(rbd_test_dir)]) if ioengine_pkg: remove_package(ioengine_pkg, remote)
def test_remove_package_rpm(self): m_remote = Mock() m_remote.os.package_type = "rpm" expected = ["sudo", "yum", "-y", "erase", "httpd"] packaging.remove_package("httpd", m_remote) m_remote.run.assert_called_with(args=expected)
def test_remove_package_deb(self): m_remote = Mock() m_remote.os.package_type = "deb" expected = ["DEBIAN_FRONTEND=noninteractive", "sudo", "-E", "apt-get", "-y", "purge", "apache2"] packaging.remove_package("apache2", m_remote) m_remote.run.assert_called_with(args=expected)