Exemplo n.º 1
0
def need_to_install_distro(ctx, role):
    """
    Installing kernels on rpm won't setup grub/boot into them.
    This installs the newest kernel package and checks its version
    and compares against current (uname -r) and returns true if newest != current.
    Similar check for deb.
    """
    (role_remote,) = ctx.cluster.only(role).remotes.keys()
    system_type = teuthology.get_system_type(role_remote)
    output, err_mess = StringIO(), StringIO()
    role_remote.run(args=['uname', '-r' ], stdout=output, stderr=err_mess )
    current = output.getvalue().strip()
    if system_type == 'rpm':
        role_remote.run(args=['sudo', 'yum', 'install', '-y', 'kernel' ], stdout=output, stderr=err_mess )
        #reset stringIO output.
        output, err_mess = StringIO(), StringIO()
        role_remote.run(args=['rpm', '-q', 'kernel', '--last' ], stdout=output, stderr=err_mess )
        newest=output.getvalue().split()[0]

    if system_type == 'deb':
        distribution = teuthology.get_system_type(role_remote, distro=True)
        newest = get_version_from_pkg(role_remote, distribution)

    output.close()
    err_mess.close()
    if current in newest:
        return False
    log.info('Not newest distro kernel. Curent: {cur} Expected: {new}'.format(cur=current, new=newest))
    return True
Exemplo n.º 2
0
def install_distro_kernel(remote):
    """
    RPM: Find newest kernel on the machine and update grub to use kernel + reboot.
    DEB: Find newest kernel. Parse grub.cfg to figure out the entryname/subentry.
    then modify 01_ceph_kernel to have correct entry + updategrub + reboot.
    """
    system_type = teuthology.get_system_type(remote)
    distribution = ''
    if system_type == 'rpm':
        output, err_mess = StringIO(), StringIO()
        remote.run(args=['rpm', '-q', 'kernel', '--last' ], stdout=output, stderr=err_mess )
        newest=output.getvalue().split()[0].split('kernel-')[1]
        log.info('Distro Kernel Version: {version}'.format(version=newest))
        update_grub_rpm(remote, newest)
        remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
        output.close()
        err_mess.close()
        return

    if system_type == 'deb':
        distribution = teuthology.get_system_type(remote, distro=True)
        newversion = get_version_from_pkg(remote, distribution)
        if 'ubuntu' in distribution:
            grub2conf = teuthology.get_file(remote, '/boot/grub/grub.cfg', True)
            submenu = ''
            menuentry = ''
            for line in grub2conf.split('\n'):
                if 'submenu' in line:
                    submenu = line.split('submenu ')[1]
                    # Ubuntu likes to be sneaky and change formatting of
                    # grub.cfg between quotes/doublequotes between versions
                    if submenu.startswith("'"):
                        submenu = submenu.split("'")[1]
                    if submenu.startswith('"'):
                        submenu = submenu.split('"')[1]
                if 'menuentry' in line:
                    if newversion in line and 'recovery' not in line:
                        menuentry = line.split('\'')[1]
                        break
            if submenu:
                grubvalue = submenu + '>' + menuentry
            else:
                grubvalue = menuentry
            grubfile = 'cat <<EOF\nset default="' + grubvalue + '"\nEOF'
            teuthology.delete_file(remote, '/etc/grub.d/01_ceph_kernel', sudo=True, force=True)
            teuthology.sudo_write_file(remote, '/etc/grub.d/01_ceph_kernel', StringIO(grubfile), '755')
            log.info('Distro Kernel Version: {version}'.format(version=newversion))
            remote.run(args=['sudo', 'update-grub'])
            remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False )
            return

        if 'debian' in distribution:
            grub2_kernel_select_generic(remote, newversion, 'deb')
            log.info('Distro Kernel Version: {version}'.format(version=newversion))
            remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
            return
Exemplo n.º 3
0
def upgrade_common(ctx, config, deploy_style):
    """
    Common code for upgrading
    """

    assert config is None or isinstance(config, dict), \
        "install.upgrade only supports a dictionary for configuration"

    for i in config.keys():
            assert config.get(i) is None or isinstance(
                config.get(i), dict), 'host supports dictionary'

    project = config.get('project', 'ceph')

    # use 'install' overrides here, in case the upgrade target is left
    # unspecified/implicit.
    install_overrides = ctx.config.get(
        'overrides', {}).get('install', {}).get(project, {})
    log.info('project %s config %s overrides %s', project, config, install_overrides)

    # FIXME: extra_pkgs is not distro-agnostic
    extra_pkgs = config.get('extra_packages', [])
    log.info('extra packages: {packages}'.format(packages=extra_pkgs))

    # build a normalized remote -> config dict
    remotes = {}
    if 'all' in config:
        for remote in ctx.cluster.remotes.iterkeys():
            remotes[remote] = config.get('all')
    else:
        for role in config.keys():
            (remote,) = ctx.cluster.only(role).remotes.iterkeys()
            if remote in remotes:
                log.warn('remote %s came up twice (role %s)', remote, role)
                continue
            remotes[remote] = config.get(role)

    for remote, node in remotes.iteritems():
        if not node:
            node = {}

        this_overrides = copy.deepcopy(install_overrides)
        if 'sha1' in node or 'tag' in node or 'branch' in node:
            log.info('config contains sha1|tag|branch, removing those keys from override')
            this_overrides.pop('sha1', None)
            this_overrides.pop('tag', None)
            this_overrides.pop('branch', None)
        teuthology.deep_merge(node, this_overrides)
        log.info('remote %s config %s', remote, node)

        system_type = teuthology.get_system_type(remote)
        assert system_type in ('deb', 'rpm')
        pkgs = PACKAGES[project][system_type]
        log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
            proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
            # FIXME: again, make extra_pkgs distro-agnostic
        pkgs += extra_pkgs
        node['project'] = project
        
        deploy_style(ctx, node, remote, pkgs, system_type)
Exemplo n.º 4
0
    def install_dependencies(self):
        system_type = misc.get_system_type(self.first_mon)

        if system_type == 'rpm':
            install_cmd = ['sudo', 'yum', '-y', 'install']
            cbt_depends = ['python-yaml', 'python-lxml', 'librbd-devel', 'pdsh']
        else:
            install_cmd = ['sudo', 'apt-get', '-y', '--force-yes', 'install']
            cbt_depends = ['python-yaml', 'python-lxml', 'librbd-dev']
        self.first_mon.run(args=install_cmd + cbt_depends)
         
        # install fio
        testdir = misc.get_testdir(self.ctx)
        self.first_mon.run(
            args=[
                'git', 'clone', '-b', 'master',
                'https://github.com/axboe/fio.git',
                '{tdir}/fio'.format(tdir=testdir)
            ]
        )
        self.first_mon.run(
            args=[
                'cd', os.path.join(testdir, 'fio'), run.Raw('&&'),
                './configure', run.Raw('&&'),
                'make'
            ]
        )
Exemplo n.º 5
0
def upgrade_common(ctx, config, deploy_style):
    """
    Common code for upgrading
    """
    remotes = upgrade_remote_to_config(ctx, config)
    project = config.get('project', 'ceph')

    # FIXME: extra_pkgs is not distro-agnostic
    extra_pkgs = config.get('extra_packages', [])
    log.info('extra packages: {packages}'.format(packages=extra_pkgs))

    for remote, node in remotes.iteritems():

        system_type = teuthology.get_system_type(remote)
        assert system_type in ('deb', 'rpm')
        pkgs = get_package_list(ctx, config)[system_type]
        excluded_packages = config.get('exclude_packages', list())
        pkgs = list(set(pkgs).difference(set(excluded_packages)))
        log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
            proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
            # FIXME: again, make extra_pkgs distro-agnostic
        pkgs += extra_pkgs

        deploy_style(ctx, node, remote, pkgs, system_type)
        verify_package_version(ctx, node, remote)
    return len(remotes)
Exemplo n.º 6
0
def remove_sources(ctx, config):
    remove_sources_pkgs = {"deb": _remove_sources_list_deb, "rpm": _remove_sources_list_rpm}
    log.info("Removing {proj} sources lists".format(proj=config.get("project", "ceph")))
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(remove_sources_pkgs[system_type], remote, config.get("project", "ceph"))
Exemplo n.º 7
0
def get_version_from_rpm(remote, sha1):
    """
    Get Actual version string from kernel file RPM URL.
    """
    system_type, system_ver = teuthology.get_system_type(remote, distro=True, version=True)
    if '.' in system_ver:
       system_ver = system_ver.split('.')[0]
    ldist = '{system_type}{system_ver}'.format(system_type=system_type, system_ver=system_ver)
    _, rpm_url = teuthology.get_ceph_binary_url(
        package='kernel',
        sha1=sha1,
        format='rpm',
        flavor='basic',
        arch='x86_64',
        dist=ldist,
        )
    kernel_url = urlparse.urljoin(rpm_url, 'kernel.x86_64.rpm')
    kerninfo, kern_err = StringIO(), StringIO()
    remote.run(args=['rpm', '-qp', kernel_url ], stdout=kerninfo, stderr=kern_err)
    kernelstring = ''
    if '\n' in kerninfo.getvalue():
        kernelstring = kerninfo.getvalue().split('\n')[0]
    else:
        kernelstring = kerninfo.getvalue()
    return kernelstring, kernel_url
Exemplo n.º 8
0
def install_package(package, remote):
    """
    Install 'package' on 'remote'
    Assumes repo has already been set up (perhaps with install_repo)
    """
    log.info('Installing package %s on %s', package, remote)
    flavor = misc.get_system_type(remote)
    if flavor == 'deb':
        pkgcmd = ['DEBIAN_FRONTEND=noninteractive',
                  'sudo',
                  '-E',
                  'apt-get',
                  '-y',
                  'install',
                  '{package}'.format(package=package)]
    elif flavor == 'rpm':
        pkgcmd = ['sudo',
                  'yum',
                  '-y',
                  'install',
                  '{package}'.format(package=package)]
    else:
        log.error('install_package: bad flavor ' + flavor + '\n')
        return False
    return remote.run(args=pkgcmd)
Exemplo n.º 9
0
def get_ioengine_package_name(ioengine, remote):
    system_type = teuthology.get_system_type(remote)
    if ioengine == 'rbd':
        return 'librbd1-devel' if system_type == 'rpm' else 'librbd-dev'
    elif ioengine == 'libaio':
        return 'libaio-devel' if system_type == 'rpm' else 'libaio-dev'
    else:
        return None
Exemplo n.º 10
0
def install_firmware(ctx, config):
    """
    Go to the github to get the latest firmware.

    :param ctx: Context
    :param config: Configuration
    """
    linux_firmware_git_upstream = 'git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git'
    uri = teuth_config.linux_firmware_git_url or linux_firmware_git_upstream
    fw_dir = '/lib/firmware/updates'

    for role in config.iterkeys():
        if config[role].find('distro') >= 0:
            log.info('Skipping firmware on distro kernel');
            return
        (role_remote,) = ctx.cluster.only(role).remotes.keys()
        package_type = teuthology.get_system_type(role_remote)
        if package_type == 'rpm':
            return
        log.info('Installing linux-firmware on {role}...'.format(role=role))
        role_remote.run(
            args=[
                # kludge around mysterious 0-byte .git/HEAD files
                'cd', fw_dir,
                run.Raw('&&'),
                'test', '-d', '.git',
                run.Raw('&&'),
                'test', '!', '-s', '.git/HEAD',
                run.Raw('&&'),
                'sudo', 'rm', '-rf', '.git',
                run.Raw(';'),
                # init
                'sudo', 'install', '-d', '-m0755', fw_dir,
                run.Raw('&&'),
                'cd', fw_dir,
                run.Raw('&&'),
                'sudo', 'git', 'init',
                ],
            )
        role_remote.run(
            args=[
                'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'config',
                '--get', 'remote.origin.url', run.Raw('>/dev/null'),
                run.Raw('||'),
                'sudo', 'git', '--git-dir=%s/.git' % fw_dir,
                'remote', 'add', 'origin', uri,
                ],
            )
        role_remote.run(
            args=[
                'cd', fw_dir,
                run.Raw('&&'),
                'sudo', 'git', 'fetch', 'origin',
                run.Raw('&&'),
                'sudo', 'git', 'reset', '--hard', 'origin/master'
                ],
            )
Exemplo n.º 11
0
def install_packages(ctx, pkgs, config):
    """
    installs Debian packages.
    """
    install_pkgs = {"deb": _update_deb_package_list_and_install, "rpm": _update_rpm_package_list_and_install}
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(install_pkgs[system_type], ctx, remote, pkgs[system_type], config)
Exemplo n.º 12
0
def remove_packages(ctx, config, pkgs):
    remove_pkgs = {
        "deb": _remove_deb,
        "rpm": _remove_rpm,
    }
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(remove_pkgs[system_type], ctx, config, remote, pkgs[system_type])
Exemplo n.º 13
0
def get_service_name(service, rem):
    """
    Find the remote-specific name of the generic 'service'
    """
    flavor = misc.get_system_type(rem)
    try:
        return _SERVICE_MAP[service][flavor]
    except KeyError:
        return None
Exemplo n.º 14
0
def start_apache(ctx, config, on_client = None, except_client = None):
    """
    Start apache on remote sites.
    """
    log.info('Starting apache...')
    testdir = teuthology.get_testdir(ctx)
    apaches = {}
    clients_to_run = [on_client]
    if on_client is None:
        clients_to_run = config.keys()
    for client in clients_to_run:
        cluster_name, daemon_type, client_id = teuthology.split_role(client)
        client_with_cluster = cluster_name + '.' + daemon_type + '.' + client_id
        if client == except_client:
            continue
        (remote,) = ctx.cluster.only(client).remotes.keys()
        system_type = teuthology.get_system_type(remote)
        if system_type == 'deb':
            apache_name = 'apache2'
        else:
            try:
                remote.run(
                    args=[
                        'stat',
                        '/usr/sbin/httpd.worker',
                    ],
                )
                apache_name = '/usr/sbin/httpd.worker'
            except CommandFailedError:
                apache_name = '/usr/sbin/httpd'

        proc = remote.run(
            args=[
                'adjust-ulimits',
                'daemon-helper',
                'kill',
                apache_name,
                '-X',
                '-f',
                '{tdir}/apache/apache.{client_with_cluster}.conf'.format(tdir=testdir,
                                                            client_with_cluster=client_with_cluster),
                ],
            logger=log.getChild(client),
            stdin=run.PIPE,
            wait=False,
            )
        apaches[client_with_cluster] = proc

    try:
        yield
    finally:
        log.info('Stopping apache...')
        for client, proc in apaches.iteritems():
            proc.stdin.close()

        run.wait(apaches.itervalues())
Exemplo n.º 15
0
def need_to_install_distro(ctx, role):
    """
    Installing kernels on rpm won't setup grub/boot into them.
    This installs the newest kernel package and checks its version
    and compares against current (uname -r) and returns true if newest != current.
    Similar check for deb.
    """
    (role_remote,) = ctx.cluster.only(role).remotes.keys()
    system_type = teuthology.get_system_type(role_remote)
    output, err_mess = StringIO(), StringIO()
    role_remote.run(args=['uname', '-r' ], stdout=output, stderr=err_mess )
    current = output.getvalue().strip()
    if system_type == 'rpm':
        role_remote.run(args=['sudo', 'yum', 'install', '-y', 'kernel'], stdout=output, stderr=err_mess )
        if 'Nothing to do' in output.getvalue():
            output.truncate(0), err_mess.truncate(0)
            role_remote.run(args=['echo', 'no', run.Raw('|'), 'sudo', 'yum', 'reinstall', 'kernel', run.Raw('||'), 'true'], stdout=output, stderr=err_mess )
            if 'Skipping the running kernel' in err_mess.getvalue():
                # Current running kernel is already newest and updated
                log.info('Newest distro kernel already installed/running')
                return False
            else:
                output.truncate(0), err_mess.truncate(0)
                role_remote.run(args=['sudo', 'yum', 'reinstall', '-y', 'kernel', run.Raw('||'), 'true'], stdout=output, stderr=err_mess )
        #reset stringIO output.
        output.truncate(0), err_mess.truncate(0)
        role_remote.run(args=['rpm', '-q', 'kernel', '--last' ], stdout=output, stderr=err_mess )
        for kernel in output.getvalue().split():
            if kernel.startswith('kernel'):
                if 'ceph' not in kernel:
                    newest = kernel.split('kernel-')[1]
                    break

    if system_type == 'deb':
        distribution = teuthology.get_system_type(role_remote, distro=True)
        newest = get_version_from_pkg(role_remote, distribution)

    output.close()
    err_mess.close()
    if current in newest:
        return False
    log.info('Not newest distro kernel. Curent: {cur} Expected: {new}'.format(cur=current, new=newest))
    return True
Exemplo n.º 16
0
def get_package_name(pkg, rem):
    """
    Find the remote-specific name of the generic 'pkg'
    """
    flavor = misc.get_system_type(rem)

    try:
        return _PACKAGE_MAP[pkg][flavor]
    except KeyError:
        return None
Exemplo n.º 17
0
def install_packages(ctx, pkgs, config):
    """
    Installs packages on each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param pkgs: list of packages names to install
    :param config: the config dict
    """
    install_pkgs = {"deb": _update_deb_package_list_and_install, "rpm": _update_rpm_package_list_and_install}
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(install_pkgs[system_type], ctx, remote, pkgs[system_type], config)
Exemplo n.º 18
0
def remove_sources(ctx, config):
    """
    Removes repo source files from each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    remove_sources_pkgs = {"deb": _remove_sources_list_deb, "rpm": _remove_sources_list_rpm}
    log.info("Removing {proj} sources lists".format(proj=config.get("project", "ceph")))
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(remove_sources_pkgs[system_type], remote, config.get("project", "ceph"))
Exemplo n.º 19
0
def remove_packages(ctx, config, pkgs):
    """
    Removes packages from each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    :param pkgs: list of packages names to remove
    """
    remove_pkgs = {"deb": _remove_deb, "rpm": _remove_rpm}
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(remove_pkgs[system_type], ctx, config, remote, pkgs[system_type])
Exemplo n.º 20
0
def start_apache(ctx, config):
    """
    Start apache on remote sites.
    """
    log.info('Starting apache...')
    testdir = teuthology.get_testdir(ctx)
    apaches = {}
    for client in config.iterkeys():
        (remote,) = ctx.cluster.only(client).remotes.keys()
        system_type = teuthology.get_system_type(remote)
        if system_type == 'deb':
            apache_name = 'apache2'
        else:
            try:
                remote.run(
                    args=[
                        'stat',
                        '/usr/sbin/httpd.worker',
                    ],
                )
                apache_name = '/usr/sbin/httpd.worker'
            except CommandFailedError:
                apache_name = '/usr/sbin/httpd'

        proc = remote.run(
            args=[
                'adjust-ulimits',
                'daemon-helper',
                'kill',
                apache_name,
                '-X',
                '-f',
                '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
                                                            client=client),
                ],
            logger=log.getChild(client),
            stdin=run.PIPE,
            wait=False,
            )
        apaches[client] = proc

    try:
        yield
    finally:
        log.info('Stopping apache...')
        for client, proc in apaches.iteritems():
            proc.stdin.close()

        run.wait(apaches.itervalues())
Exemplo n.º 21
0
Arquivo: cbt.py Projeto: C2python/ceph
    def end(self):
        super(CBT, self).end()
        testdir = misc.get_testdir(self.ctx)
        self.first_mon.run(
            args=[
                'rm', '--one-file-system', '-rf', '--',
                '{tdir}/cbt'.format(tdir=testdir),
            ]
        )
        benchmark_type = self.cbt_config.get('benchmarks').keys()[0]
        if benchmark_type == 'librbdfio':
            self.first_mon.run(
                args=[
                    'rm', '--one-file-system', '-rf', '--',
                    '{tdir}/fio'.format(tdir=testdir),
                ]
            )

        if benchmark_type == 'cosbench':
            os_version = misc.get_system_type(self.first_mon, False, True)
            if os_version == '18.04':
                cosbench_version = 'cosbench-0.4.2.c3.1'
            else:
                cosbench_version = '0.4.2.c3'
            self.first_mon.run(
                args=[
                    'rm', '--one-file-system', '-rf', '--',
                    '{tdir}/cos'.format(tdir=testdir),
                ]
            )
            self.first_mon.run(
                args=[
                    'rm', '--one-file-system', '-rf', '--',
                    '{tdir}/{version}'.format(tdir=testdir, version=cosbench_version),
                ]
            )
            self.first_mon.run(
                args=[
                    'rm', '--one-file-system', '-rf', '--',
                    '{tdir}/{version}.zip'.format(tdir=testdir, version=cosbench_version),
                ]
            )
            self.first_mon.run(
                args=[
                    'rm', '--one-file-system', '-rf', '--',
                    '{tdir}/xml'.format(tdir=testdir),
                ]
            )
Exemplo n.º 22
0
def install_packages(ctx, pkgs, config):
    """
    Installs packages on each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param pkgs: list of packages names to install
    :param config: the config dict
    """
    install_pkgs = {
        "deb": deb._update_package_list_and_install,
        "rpm": rpm._update_package_list_and_install,
    }
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(
                install_pkgs[system_type],
                ctx, remote, pkgs[system_type], config)

    for remote in ctx.cluster.remotes.iterkeys():
        # verifies that the install worked as expected
        verify_package_version(ctx, config, remote)
Exemplo n.º 23
0
def install_packages(ctx, pkgs, config):
    """
    Installs packages on each remote in ctx.

    :param ctx: the argparse.Namespace object
    :param pkgs: list of packages names to install
    :param config: the config dict
    """
    install_pkgs = {
        "deb": deb._update_package_list_and_install,
        "rpm": rpm._update_package_list_and_install,
    }
    with parallel() as p:
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            p.spawn(
                install_pkgs[system_type],
                ctx, remote, pkgs[system_type], config)

    for remote in ctx.cluster.remotes.iterkeys():
        # verifies that the install worked as expected
        verify_package_version(ctx, config, remote)
Exemplo n.º 24
0
def upgrade_common(ctx, config, deploy_style):
    """
    Common code for upgrading
    """
    remotes = upgrade_remote_to_config(ctx, config)
    project = config.get('project', 'ceph')

    extra_pkgs = config.get('extra_packages', [])
    log.info('extra packages: {packages}'.format(packages=extra_pkgs))

    for remote, node in remotes.items():

        system_type = teuthology.get_system_type(remote)
        assert system_type in ('deb', 'rpm')
        pkgs = get_package_list(ctx, config)[system_type]
        log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
            proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
        if isinstance(extra_pkgs, dict):
            pkgs += extra_pkgs.get(system_type, [])
        else:
            pkgs += extra_pkgs

        installed_version = packaging.get_package_version(remote, 'ceph-common')
        upgrade_version = get_upgrade_version(ctx, node, remote)
        log.info("Ceph {s} upgrade from {i} to {u}".format(
            s=system_type,
            i=installed_version,
            u=upgrade_version
        ))
        if _upgrade_is_downgrade(installed_version, upgrade_version):
            raise RuntimeError(
                "An attempt to upgrade from a higher version to a lower one "
                "will always fail. Hint: check tags in the target git branch."
            )


        deploy_style(ctx, node, remote, pkgs, system_type)
        verify_package_version(ctx, node, remote)
    return len(remotes)
Exemplo n.º 25
0
def upgrade_common(ctx, config, deploy_style):
    """
    Common code for upgrading
    """
    remotes = upgrade_remote_to_config(ctx, config)
    project = config.get('project', 'ceph')

    extra_pkgs = config.get('extra_packages', [])
    log.info('extra packages: {packages}'.format(packages=extra_pkgs))

    for remote, node in remotes.iteritems():

        system_type = teuthology.get_system_type(remote)
        assert system_type in ('deb', 'rpm')
        pkgs = get_package_list(ctx, config)[system_type]
        log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
            proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
        if isinstance(extra_pkgs, dict):
            pkgs += extra_pkgs.get(system_type, [])
        else:
            pkgs += extra_pkgs

        installed_version = packaging.get_package_version(remote, 'ceph-common')
        upgrade_version = get_upgrade_version(ctx, node, remote)
        log.info("Ceph {s} upgrade from {i} to {u}".format(
            s=system_type,
            i=installed_version,
            u=upgrade_version
        ))
	if _upgrade_is_downgrade(installed_version, upgrade_version):
            raise RuntimeError(
                "An attempt to upgrade from a higher version to a lower one "
                "will always fail. Hint: check tags in the target git branch."
            )


        deploy_style(ctx, node, remote, pkgs, system_type)
        verify_package_version(ctx, node, remote)
    return len(remotes)
Exemplo n.º 26
0
    def install_deps(self):
        from teuthology.misc import get_system_type

        distro, version = get_system_type(self.mount_a.client_remote,
                                          distro=True,
                                          version=True)
        distro = distro.lower()
        major_ver_num = int(version.split('.')[0])  # only keep major release
        # number

        # we keep fedora here so that right deps are installed when this test
        # is run locally by a dev.
        if distro in ('redhatenterpriseserver', 'redhatenterprise', 'fedora',
                      'centos'):
            deps = """acl attr automake bc dbench dump e2fsprogs fio \
            gawk gcc indent libtool lvm2 make psmisc quota sed \
            xfsdump xfsprogs \
            libacl-devel libattr-devel libaio-devel libuuid-devel \
            xfsprogs-devel btrfs-progs-devel python2 sqlite""".split()
            deps_old_distros = ['xfsprogs-qa-devel']

            if distro != 'fedora' and major_ver_num > 7:
                deps.remove('btrfs-progs-devel')

            args = ['sudo', 'yum', 'install', '-y'] + deps + deps_old_distros
        elif distro == 'ubuntu':
            deps = """xfslibs-dev uuid-dev libtool-bin \
            e2fsprogs automake gcc libuuid1 quota attr libattr1-dev make \
            libacl1-dev libaio-dev xfsprogs libgdbm-dev gawk fio dbench \
            uuid-runtime python sqlite3""".split()

            if major_ver_num >= 19:
                deps[deps.index('python')] = 'python2'
            args = ['sudo', 'apt-get', 'install', '-y'] + deps
        else:
            raise RuntimeError('expected a yum based or a apt based system')

        self.mount_a.client_remote.run(args=args, omit_sudo=False)
Exemplo n.º 27
0
def remove_package(package, remote):
    """
    Remove package from remote
    """
    flavor = misc.get_system_type(remote)
    if flavor == 'deb':
        pkgcmd = ['DEBIAN_FRONTEND=noninteractive',
                  'sudo',
                  '-E',
                  'apt-get',
                  '-y',
                  'purge',
                  '{package}'.format(package=package)]
    elif flavor == 'rpm':
        pkgcmd = ['sudo',
                  'yum',
                  '-y',
                  'erase',
                  '{package}'.format(package=package)]
    else:
        log.error('remove_package: bad flavor ' + flavor + '\n')
        return False
    return remote.run(args=pkgcmd)
Exemplo n.º 28
0
def upgrade_common(ctx, config, deploy_style):
    """
    Common code for upgrading
    """
    remotes = upgrade_remote_to_config(ctx, config)
    project = config.get('project', 'ceph')

    # FIXME: extra_pkgs is not distro-agnostic
    extra_pkgs = config.get('extra_packages', [])
    log.info('extra packages: {packages}'.format(packages=extra_pkgs))

    for remote, node in remotes.iteritems():

        system_type = teuthology.get_system_type(remote)
        assert system_type in ('deb', 'rpm')
        pkgs = get_package_list(ctx, config)[system_type]
        log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
            proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
        # FIXME: again, make extra_pkgs distro-agnostic
        pkgs += extra_pkgs

        deploy_style(ctx, node, remote, pkgs, system_type)
        verify_package_version(ctx, node, remote)
    return len(remotes)
Exemplo n.º 29
0
def start_apache(ctx, config):
    log.info('Starting apache...')
    testdir = teuthology.get_testdir(ctx)
    apaches = {}
    for client in config.iterkeys():
        (remote, ) = ctx.cluster.only(client).remotes.keys()
        system_type = teuthology.get_system_type(remote)
        if system_type == 'deb':
            apache_name = 'apache2'
        else:
            apache_name = '/usr/sbin/httpd'
        proc = remote.run(
            args=[
                'adjust-ulimits',
                'daemon-helper',
                'kill',
                apache_name,
                '-X',
                '-f',
                '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
                                                            client=client),
            ],
            logger=log.getChild(client),
            stdin=run.PIPE,
            wait=False,
        )
        apaches[client] = proc

    try:
        yield
    finally:
        log.info('Stopping apache...')
        for client, proc in apaches.iteritems():
            proc.stdin.close()

        run.wait(apaches.itervalues())
Exemplo n.º 30
0
def download_ceph_deploy(ctx, config):
    """
    Downloads ceph-deploy from the ceph.com git mirror and (by default)
    switches to the master branch. If the `ceph-deploy-branch` is specified, it
    will use that instead. The `bootstrap` script is ran, with the argument
    obtained from `python_version`, if specified.
    """
    # use mon.a for ceph_admin
    (ceph_admin,) = ctx.cluster.only('mon.a').remotes.keys()

    try:
        py_ver = str(config['python_version'])
    except KeyError:
        pass
    else:
        supported_versions = ['2', '3']
        if py_ver not in supported_versions:
            raise ValueError("python_version must be: {}, not {}".format(
                ' or '.join(supported_versions), py_ver
            ))

        log.info("Installing Python")
        system_type = teuthology.get_system_type(ceph_admin)

        if system_type == 'rpm':
            package = 'python36' if py_ver == '3' else 'python'
            ctx.cluster.run(args=[
                'sudo', 'yum', '-y', 'install',
                package, 'python-virtualenv'
            ])
        else:
            package = 'python3' if py_ver == '3' else 'python'
            ctx.cluster.run(args=[
                'sudo', 'apt-get', '-y', '--force-yes', 'install',
                package, 'python-virtualenv'
            ])

    log.info('Downloading ceph-deploy...')
    testdir = teuthology.get_testdir(ctx)
    ceph_deploy_branch = config.get('ceph-deploy-branch', 'master')

    ceph_admin.run(
        args=[
            'git', 'clone', '-b', ceph_deploy_branch,
            teuth_config.ceph_git_base_url + 'ceph-deploy.git',
            '{tdir}/ceph-deploy'.format(tdir=testdir),
        ],
    )
    args = [
        'cd',
        '{tdir}/ceph-deploy'.format(tdir=testdir),
        run.Raw('&&'),
        './bootstrap',
    ]
    try:
        args.append(str(config['python_version']))
    except KeyError:
        pass
    ceph_admin.run(args=args)

    try:
        yield
    finally:
        log.info('Removing ceph-deploy ...')
        ceph_admin.run(
            args=[
                'rm',
                '-rf',
                '{tdir}/ceph-deploy'.format(tdir=testdir),
            ],
        )
Exemplo n.º 31
0
def ship_apache_configs(ctx, config, role_endpoints):
    """
    Ship apache config and rgw.fgci to all clients.  Clean up on termination
    """
    assert isinstance(config, dict)
    assert isinstance(role_endpoints, dict)
    testdir = teuthology.get_testdir(ctx)
    log.info('Shipping apache config and rgw.fcgi...')
    src = os.path.join(os.path.dirname(__file__), 'apache.conf.template')
    for client, conf in config.iteritems():
        (remote, ) = ctx.cluster.only(client).remotes.keys()
        system_type = teuthology.get_system_type(remote)
        if not conf:
            conf = {}
        idle_timeout = conf.get('idle_timeout', 30)
        if system_type == 'deb':
            mod_path = '/usr/lib/apache2/modules'
            print_continue = 'on'
        else:
            mod_path = '/usr/lib64/httpd/modules'
            print_continue = 'off'
        host, port = role_endpoints[client]
        with file(src, 'rb') as f:
            conf = f.read().format(
                testdir=testdir,
                mod_path=mod_path,
                print_continue=print_continue,
                host=host,
                port=port,
                client=client,
                idle_timeout=idle_timeout,
            )
            teuthology.write_file(
                remote=remote,
                path='{tdir}/apache/apache.{client}.conf'.format(
                    tdir=testdir, client=client),
                data=conf,
            )
        teuthology.write_file(
            remote=remote,
            path='{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
                tdir=testdir, client=client),
            data="""#!/bin/sh
ulimit -c unlimited
exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring --rgw-socket-path {tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock

""".format(tdir=testdir, client=client))
        remote.run(args=[
            'chmod',
            'a=rx',
            '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir,
                                                            client=client),
        ], )
    try:
        yield
    finally:
        log.info('Removing apache config...')
        for client in config.iterkeys():
            ctx.cluster.only(client).run(args=[
                'rm',
                '-f',
                '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
                                                            client=client),
                run.Raw('&&'),
                'rm',
                '-f',
                '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir,
                                                                client=client),
            ], )
Exemplo n.º 32
0
def ship_apache_configs(ctx, config, role_endpoints):
    """
    Ship apache config and rgw.fgci to all clients.  Clean up on termination
    """
    assert isinstance(config, dict)
    assert isinstance(role_endpoints, dict)
    testdir = teuthology.get_testdir(ctx)
    log.info('Shipping apache config and rgw.fcgi...')
    src = os.path.join(os.path.dirname(__file__), 'apache.conf.template')
    for client, conf in config.iteritems():
        (remote,) = ctx.cluster.only(client).remotes.keys()
        system_type = teuthology.get_system_type(remote)
        if not conf:
            conf = {}
        idle_timeout = conf.get('idle_timeout', 30)
        if system_type == 'deb':
            mod_path = '/usr/lib/apache2/modules'
            print_continue = 'on'
        else:
            mod_path = '/usr/lib64/httpd/modules'
            print_continue = 'off'
        host, port = role_endpoints[client]
        with file(src, 'rb') as f:
            conf = f.read().format(
                testdir=testdir,
                mod_path=mod_path,
                print_continue=print_continue,
                host=host,
                port=port,
                client=client,
                idle_timeout=idle_timeout,
                )
            teuthology.write_file(
                remote=remote,
                path='{tdir}/apache/apache.{client}.conf'.format(
                    tdir=testdir,
                    client=client),
                data=conf,
                )
        teuthology.write_file(
            remote=remote,
            path='{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
                tdir=testdir,
                client=client),
            data="""#!/bin/sh
ulimit -c unlimited
exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring --rgw-socket-path {tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock

""".format(tdir=testdir, client=client)
            )
        remote.run(
            args=[
                'chmod',
                'a=rx',
                '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir,
                                                                client=client),
                ],
            )
    try:
        yield
    finally:
        log.info('Removing apache config...')
        for client in config.iterkeys():
            ctx.cluster.only(client).run(
                args=[
                    'rm',
                    '-f',
                    '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
                                                                client=client),
                    run.Raw('&&'),
                    'rm',
                    '-f',
                    '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
                        tdir=testdir,
                        client=client),
                    ],
                )
Exemplo n.º 33
0
    def end(self):
        super(CBT, self).end()
        testdir = misc.get_testdir(self.ctx)
        self.first_mon.run(args=[
            'rm',
            '--one-file-system',
            '-rf',
            '--',
            '{tdir}/cbt'.format(tdir=testdir),
        ])
        benchmark_type = self.cbt_config.get('benchmarks').keys()[0]
        if benchmark_type in ['librbdfio', 'fio']:
            self.first_mon.run(args=[
                'rm',
                '--one-file-system',
                '-rf',
                '--',
                '{tdir}/fio'.format(tdir=testdir),
            ])

        if benchmark_type == 'cosbench':
            os_version = misc.get_system_type(self.first_mon, False, True)
            if os_version == '18.04':
                cosbench_version = 'cosbench-0.4.2.c3.1'
            else:
                cosbench_version = '0.4.2.c3'
            # note: stop-all requires 'nc'
            self.first_mon.run(args=[
                'cd', testdir,
                run.Raw('&&'), 'cd', 'cos',
                run.Raw('&&'), 'sh', 'stop-all.sh',
                run.Raw('||'), 'true'
            ])
            self.first_mon.run(
                args=['sudo', 'killall', '-9', 'java',
                      run.Raw('||'), 'true'])
            self.first_mon.run(args=[
                'rm',
                '--one-file-system',
                '-rf',
                '--',
                '{tdir}/cos'.format(tdir=testdir),
            ])
            self.first_mon.run(args=[
                'rm',
                '--one-file-system',
                '-rf',
                '--',
                '{tdir}/{version}'.format(tdir=testdir,
                                          version=cosbench_version),
            ])
            self.first_mon.run(args=[
                'rm',
                '--one-file-system',
                '-rf',
                '--',
                '{tdir}/{version}.zip'.format(tdir=testdir,
                                              version=cosbench_version),
            ])
            self.first_mon.run(args=[
                'rm',
                '--one-file-system',
                '-rf',
                '--',
                '{tdir}/xml'.format(tdir=testdir),
            ])
Exemplo n.º 34
0
def ship_apache_configs(ctx, config, role_endpoints, on_client = None,
                        except_client = None):
    """
    Ship apache config and rgw.fgci to all clients.  Clean up on termination
    """
    assert isinstance(config, dict)
    assert isinstance(role_endpoints, dict)
    testdir = teuthology.get_testdir(ctx)
    log.info('Shipping apache config and rgw.fcgi...')
    src = os.path.join(os.path.dirname(__file__), 'apache.conf.template')
    clients_to_create_as = [on_client]
    if on_client is None:
        clients_to_create_as = config.keys()
    for client in clients_to_create_as:
        if client == except_client:
            continue
        (remote,) = ctx.cluster.only(client).remotes.keys()
        system_type = teuthology.get_system_type(remote)
        conf = config.get(client)
        if not conf:
            conf = {}
        idle_timeout = conf.get('idle_timeout', ctx.rgw.default_idle_timeout)
        if system_type == 'deb':
            mod_path = '/usr/lib/apache2/modules'
            print_continue = 'on'
            user = '******'
            group = 'www-data'
            apache24_modconfig = '''
  IncludeOptional /etc/apache2/mods-available/mpm_event.conf
  IncludeOptional /etc/apache2/mods-available/mpm_event.load
'''
        else:
            mod_path = '/usr/lib64/httpd/modules'
            print_continue = 'off'
            user = '******'
            group = 'apache'
            apache24_modconfig = \
                'IncludeOptional /etc/httpd/conf.modules.d/00-mpm.conf'
        host, port = role_endpoints[client]

        # decide if we want to use mod_fastcgi or mod_proxy_fcgi
        template_dir = os.path.dirname(__file__)
        fcgi_config = os.path.join(template_dir,
                                   'mod_proxy_fcgi.tcp.conf.template')
        if ctx.rgw.use_fastcgi:
            log.info("Apache is configured to use mod_fastcgi")
            fcgi_config = os.path.join(template_dir,
                                       'mod_fastcgi.conf.template')
        elif _use_uds_with_fcgi(remote):
            log.info("Apache is configured to use mod_proxy_fcgi with UDS")
            fcgi_config = os.path.join(template_dir,
                                       'mod_proxy_fcgi.uds.conf.template')
        else:
            log.info("Apache is configured to use mod_proxy_fcgi with TCP")

        with file(fcgi_config, 'rb') as f:
            fcgi_config = f.read()
        with file(src, 'rb') as f:
            conf = f.read() + fcgi_config
            conf = conf.format(
                testdir=testdir,
                mod_path=mod_path,
                print_continue=print_continue,
                host=host,
                port=port,
                client=client,
                idle_timeout=idle_timeout,
                user=user,
                group=group,
                apache24_modconfig=apache24_modconfig,
                )
            teuthology.write_file(
                remote=remote,
                path='{tdir}/apache/apache.{client}.conf'.format(
                    tdir=testdir,
                    client=client),
                data=conf,
                )
        rgw_options = []
        if ctx.rgw.use_fastcgi or _use_uds_with_fcgi(remote):
            rgw_options = [
                '--rgw-socket-path',
                '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format(
                    tdir=testdir,
                    client=client
                ),
                '--rgw-frontends',
                'fastcgi',
            ]
        else:
            rgw_options = [
                '--rgw-socket-path', '""',
                '--rgw-print-continue', 'false',
                '--rgw-frontends',
                'fastcgi socket_port=9000 socket_host=0.0.0.0',
            ]

        teuthology.write_file(
            remote=remote,
            path='{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
                tdir=testdir,
                client=client),
            data="""#!/bin/sh
ulimit -c unlimited
exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring {rgw_options}

""".format(tdir=testdir, client=client, rgw_options=" ".join(rgw_options))
            )
        remote.run(
            args=[
                'chmod',
                'a=rx',
                '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir,
                                                                client=client),
                ],
            )
    try:
        yield
    finally:
        log.info('Removing apache config...')
        for client in clients_to_create_as:
            ctx.cluster.only(client).run(
                args=[
                    'rm',
                    '-f',
                    '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
                                                                client=client),
                    run.Raw('&&'),
                    'rm',
                    '-f',
                    '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
                        tdir=testdir,
                        client=client),
                    ],
                )
Exemplo n.º 35
0
def upgrade_common(ctx, config, deploy_style):
    """
    Common code for upgrading
    """
    assert config is None or isinstance(config, dict), "install.upgrade only supports a dictionary for configuration"

    project = config.get("project", "ceph")

    # use 'install' overrides here, in case the upgrade target is left
    # unspecified/implicit.
    install_overrides = ctx.config.get("overrides", {}).get("install", {}).get(project, {})
    log.info("project %s config %s overrides %s", project, config, install_overrides)

    # FIXME: extra_pkgs is not distro-agnostic
    extra_pkgs = config.get("extra_packages", [])
    log.info("extra packages: {packages}".format(packages=extra_pkgs))

    # build a normalized remote -> config dict
    remotes = {}
    if "all" in config:
        for remote in ctx.cluster.remotes.iterkeys():
            remotes[remote] = config.get("all")
    else:
        for role in config.keys():
            remotes_dict = ctx.cluster.only(role).remotes
            if not remotes_dict:
                # This is a regular config argument, not a role
                continue
            remote = remotes_dict.keys()[0]
            if remote in remotes:
                log.warn("remote %s came up twice (role %s)", remote, role)
                continue
            remotes[remote] = config.get(role)

    for remote, node in remotes.iteritems():
        if not node:
            node = {}

        this_overrides = copy.deepcopy(install_overrides)
        if "sha1" in node or "tag" in node or "branch" in node:
            log.info("config contains sha1|tag|branch, removing those keys from override")
            this_overrides.pop("sha1", None)
            this_overrides.pop("tag", None)
            this_overrides.pop("branch", None)
        teuthology.deep_merge(node, this_overrides)
        log.info("remote %s config %s", remote, node)

        system_type = teuthology.get_system_type(remote)
        assert system_type in ("deb", "rpm")
        pkgs = PACKAGES[project][system_type]
        excluded_packages = config.get("exclude_packages", list())
        pkgs = list(set(pkgs).difference(set(excluded_packages)))
        log.info(
            "Upgrading {proj} {system_type} packages: {pkgs}".format(
                proj=project, system_type=system_type, pkgs=", ".join(pkgs)
            )
        )
        # FIXME: again, make extra_pkgs distro-agnostic
        pkgs += extra_pkgs
        node["project"] = project

        deploy_style(ctx, node, remote, pkgs, system_type)
        verify_package_version(ctx, node, remote)
Exemplo n.º 36
0
def download_kernel(ctx, config):
    """
    Download a Debian kernel and copy the assocated linux image.

    :param ctx: Context
    :param config: Configuration
    """
    procs = {}
    #Don't need to download distro kernels
    for role, src in config.iteritems():
        (role_remote, ) = ctx.cluster.only(role).remotes.keys()
        if src.find('distro') >= 0:
            log.info('Installing newest kernel distro')
            return
        package_type = teuthology.get_system_type(role_remote)
        if src.find('/') >= 0:
            # local deb
            log.info('Copying kernel deb {path} to {role}...'.format(
                path=src, role=role))
            f = open(src, 'r')
            proc = role_remote.run(args=[
                'python',
                '-c',
                'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
                '/tmp/linux-image.deb',
            ],
                                   wait=False,
                                   stdin=f)
            procs[role_remote.name] = proc
        else:
            log.info('Downloading kernel {sha1} on {role}...'.format(
                sha1=src, role=role))
            if package_type == 'rpm':
                dist, ver = teuthology.get_system_type(role_remote,
                                                       distro=True,
                                                       version=True)
                if '.' in ver:
                    ver = ver.split('.')[0]
                ldist = '{dist}{ver}'.format(dist=dist, ver=ver)
                _, rpm_url = teuthology.get_ceph_binary_url(
                    package='kernel',
                    sha1=src,
                    format='rpm',
                    flavor='basic',
                    arch='x86_64',
                    dist=ldist,
                )

                kernel_url = urlparse.urljoin(rpm_url, 'kernel.x86_64.rpm')
                output, err_mess = StringIO(), StringIO()
                role_remote.run(
                    args=['sudo', 'yum', 'list', 'installed', 'kernel'],
                    stdout=output,
                    stderr=err_mess)
                # Check if short (first 8 digits) sha1 is in uname output as expected
                short_sha1 = src[0:7]
                if short_sha1 in output.getvalue():
                    output.close()
                    err_mess.close()
                    continue
                output.close()
                err_mess.close()
                proc = role_remote.run(
                    args=['sudo', 'yum', 'install', '-y', kernel_url],
                    wait=False)
                procs[role_remote.name] = proc
                continue

            larch, ldist = _find_arch_and_dist(ctx)
            _, deb_url = teuthology.get_ceph_binary_url(
                package='kernel',
                sha1=src,
                format='deb',
                flavor='basic',
                arch=larch,
                dist=ldist,
            )

            log.info('fetching kernel from {url}'.format(url=deb_url))
            proc = role_remote.run(args=[
                'sudo',
                'rm',
                '-f',
                '/tmp/linux-image.deb',
                run.Raw('&&'),
                'echo',
                'linux-image.deb',
                run.Raw('|'),
                'wget',
                '-nv',
                '-O',
                '/tmp/linux-image.deb',
                '--base={url}'.format(url=deb_url),
                '--input-file=-',
            ],
                                   wait=False)
            procs[role_remote.name] = proc

    for name, proc in procs.iteritems():
        log.debug('Waiting for download/copy to %s to complete...', name)
        proc.wait()
Exemplo n.º 37
0
def install_and_reboot(ctx, config):
    """
    Install and reboot the kernel.  This mostly performs remote
    installation operations.   The code does check for Arm images
    and skips grub operations if the kernel is Arm.  Otherwise, it
    extracts kernel titles from submenu entries and makes the appropriate
    grub calls.   The assumptions here are somewhat simplified in that
    it expects kernel entries to be present under submenu entries.

    :param ctx: Context
    :param config: Configuration
    """
    procs = {}
    kernel_title = ''
    for role, src in config.iteritems():
        (role_remote, ) = ctx.cluster.only(role).remotes.keys()
        if isinstance(src, str) and src.find('distro') >= 0:
            log.info('Installing distro kernel on {role}...'.format(role=role))
            install_kernel(role_remote)
            continue

        log.info('Installing kernel {src} on {role}...'.format(src=src,
                                                               role=role))
        system_type = teuthology.get_system_type(role_remote)
        if system_type == 'rpm':
            proc = role_remote.run(args=[
                'sudo',
                'rpm',
                '-ivh',
                '--oldpackage',
                '--replacefiles',
                '--replacepkgs',
                remote_pkg_path(role_remote),
            ])
            install_kernel(role_remote, remote_pkg_path(role_remote))
            continue

        # TODO: Refactor this into install_kernel() so that it handles all
        # cases for both rpm and deb packages.
        proc = role_remote.run(
            args=[
                # install the kernel deb
                'sudo',
                'dpkg',
                '-i',
                remote_pkg_path(role_remote),
            ], )

        # collect kernel image name from the .deb
        kernel_title = get_image_version(role_remote,
                                         remote_pkg_path(role_remote))
        log.info('searching for kernel {}'.format(kernel_title))

        if kernel_title.endswith("-highbank"):
            _no_grub_link('vmlinuz', role_remote, kernel_title)
            _no_grub_link('initrd.img', role_remote, kernel_title)
            proc = role_remote.run(
                args=[
                    'sudo',
                    'shutdown',
                    '-r',
                    'now',
                ],
                wait=False,
            )
            procs[role_remote.name] = proc
            continue

        # look for menuentry for our kernel, and collect any
        # submenu entries for their titles.  Assume that if our
        # kernel entry appears later in the file than a submenu entry,
        # it's actually nested under that submenu.  If it gets more
        # complex this will totally break.

        cmdout = StringIO()
        proc = role_remote.run(
            args=[
                'egrep', '(submenu|menuentry.*' + kernel_title + ').*{',
                '/boot/grub/grub.cfg'
            ],
            stdout=cmdout,
        )
        submenu_title = ''
        default_title = ''
        for l in cmdout.getvalue().split('\n'):
            fields = shlex.split(l)
            if len(fields) >= 2:
                command, title = fields[:2]
                if command == 'submenu':
                    submenu_title = title + '>'
                if command == 'menuentry':
                    if title.endswith(kernel_title):
                        default_title = title
                        break
        cmdout.close()
        log.info('submenu_title:{}'.format(submenu_title))
        log.info('default_title:{}'.format(default_title))

        proc = role_remote.run(
            args=[
                # use the title(s) to construct the content of
                # the grub menu entry, so we can default to it.
                '/bin/echo',
                '-e',
                r'cat <<EOF\nset default="' + submenu_title + \
                    default_title + r'"\nEOF\n',
                # make it look like an emacs backup file so
                # unfortunately timed update-grub runs don't pick it
                # up yet; use sudo tee so we are able to write to /etc
                run.Raw('|'),
                'sudo',
                'tee',
                '--',
                '/etc/grub.d/01_ceph_kernel.tmp~',
                run.Raw('>/dev/null'),
                run.Raw('&&'),
                'sudo',
                'chmod',
                'a+x',
                '--',
                '/etc/grub.d/01_ceph_kernel.tmp~',
                run.Raw('&&'),
                'sudo',
                'mv',
                '--',
                '/etc/grub.d/01_ceph_kernel.tmp~',
                '/etc/grub.d/01_ceph_kernel',
                # update grub again so it accepts our default
                run.Raw('&&'),
                'sudo',
                'update-grub',
                run.Raw('&&'),
                'rm',
                remote_pkg_path(role_remote),
                run.Raw('&&'),
                'sudo',
                'shutdown',
                '-r',
                'now',
                ],
            wait=False,
            )
        procs[role_remote.name] = proc

    for name, proc in procs.iteritems():
        log.debug('Waiting for install on %s to complete...', name)
        proc.wait()
Exemplo n.º 38
0
def install_kernel(remote, path=None):
    """
    A bit of misnomer perhaps - the actual kernel package is installed
    elsewhere, this function deals with initrd and grub.  Currently the
    following cases are handled:
      - local, gitbuilder, distro for rpm packages
      - distro for deb packages - see TODO in install_and_reboot()

    TODO: reboots should be issued from install_and_reboot()

    :param path: package path (for local and gitbuilder cases)
    """
    system_type = teuthology.get_system_type(remote)
    if system_type == 'rpm':
        if path:
            version = get_image_version(remote, path)
            # This is either a gitbuilder or a local package and both of these
            # could have been built with upstream rpm targets with specs that
            # don't have a %post section at all, which means no initrd.
            maybe_generate_initrd_rpm(remote, path, version)
        else:
            version = get_latest_image_version_rpm(remote)
        update_grub_rpm(remote, version)
        remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False)
        return

    if system_type == 'deb':
        distribution = teuthology.get_system_type(remote, distro=True)
        newversion = get_latest_image_version_deb(remote, distribution)
        if 'ubuntu' in distribution:
            grub2conf = teuthology.get_file(remote, '/boot/grub/grub.cfg',
                                            True)
            submenu = ''
            menuentry = ''
            for line in grub2conf.split('\n'):
                if 'submenu' in line:
                    submenu = line.split('submenu ')[1]
                    # Ubuntu likes to be sneaky and change formatting of
                    # grub.cfg between quotes/doublequotes between versions
                    if submenu.startswith("'"):
                        submenu = submenu.split("'")[1]
                    if submenu.startswith('"'):
                        submenu = submenu.split('"')[1]
                if 'menuentry' in line:
                    if newversion in line and 'recovery' not in line:
                        menuentry = line.split('\'')[1]
                        break
            if submenu:
                grubvalue = submenu + '>' + menuentry
            else:
                grubvalue = menuentry
            grubfile = 'cat <<EOF\nset default="' + grubvalue + '"\nEOF'
            teuthology.delete_file(remote,
                                   '/etc/grub.d/01_ceph_kernel',
                                   sudo=True,
                                   force=True)
            teuthology.sudo_write_file(remote, '/etc/grub.d/01_ceph_kernel',
                                       StringIO(grubfile), '755')
            log.info(
                'Distro Kernel Version: {version}'.format(version=newversion))
            remote.run(args=['sudo', 'update-grub'])
            remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False)
            return

        if 'debian' in distribution:
            grub2_kernel_select_generic(remote, newversion, 'deb')
            log.info(
                'Distro Kernel Version: {version}'.format(version=newversion))
            remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False)
            return
Exemplo n.º 39
0
def download_kernel(ctx, config):
    """
    Supply each remote with a kernel package:
      - local kernels are copied over
      - gitbuilder kernels are downloaded
      - nothing is done for distro kernels

    :param ctx: Context
    :param config: Configuration
    """
    procs = {}
    for role, src in config.iteritems():
        needs_download = False

        if src == 'distro':
            # don't need to download distro kernels
            log.debug("src is distro, skipping download")
            continue

        (role_remote, ) = ctx.cluster.only(role).remotes.keys()
        if isinstance(src, dict):
            # we're downloading a kernel from koji, the src dict here
            # is the build_info retrieved from koji using get_koji_build_info
            build_id = src["id"]
            log.info(
                "Downloading kernel with build_id {build_id} on {role}...".
                format(build_id=build_id, role=role))
            needs_download = True
            baseurl = get_kojiroot_base_url(src)
            pkg_name = get_koji_package_name("kernel", src)
        elif src.find('/') >= 0:
            # local package - src is path
            log.info('Copying kernel package {path} to {role}...'.format(
                path=src, role=role))
            f = open(src, 'r')
            proc = role_remote.run(args=[
                'python',
                '-c',
                'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
                remote_pkg_path(role_remote),
            ],
                                   wait=False,
                                   stdin=f)
            procs[role_remote.name] = proc
        else:
            # gitbuilder package - src is sha1
            log.info('Downloading kernel {sha1} on {role}...'.format(
                sha1=src, role=role))
            needs_download = True
            package_type = role_remote.os.package_type
            if package_type == 'rpm':
                system_type, system_ver = teuthology.get_system_type(
                    role_remote, distro=True, version=True)
                if '.' in system_ver:
                    system_ver = system_ver.split('.')[0]
                ldist = '{system_type}{system_ver}'.format(
                    system_type=system_type, system_ver=system_ver)
                larch = 'x86_64'
            elif package_type == 'deb':
                ldist, larch = role_remote.os.codename, role_remote.arch
            else:
                raise UnsupportedPackageTypeError(role_remote)

            _, baseurl = teuthology.get_ceph_binary_url(
                package='kernel',
                sha1=src,
                format=package_type,
                flavor='basic',
                arch=larch,
                dist=ldist,
            )

            pkg_name = gitbuilder_pkg_name(role_remote)

            log.info("fetching, gitbuilder baseurl is %s", baseurl)

        if needs_download:
            proc = role_remote.run(args=[
                'rm',
                '-f',
                remote_pkg_path(role_remote),
                run.Raw('&&'),
                'echo',
                pkg_name,
                run.Raw('|'),
                'wget',
                '-nv',
                '-O',
                remote_pkg_path(role_remote),
                '--base={url}'.format(url=baseurl),
                '--input-file=-',
            ],
                                   wait=False)
            procs[role_remote.name] = proc

    for name, proc in procs.iteritems():
        log.debug('Waiting for download/copy to %s to complete...', name)
        proc.wait()
Exemplo n.º 40
0
def install_firmware(ctx, config):
    """
    Go to the github to get the latest firmware.

    :param ctx: Context
    :param config: Configuration
    """
    linux_firmware_git_upstream = 'git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git'
    uri = teuth_config.linux_firmware_git_url or linux_firmware_git_upstream
    fw_dir = '/lib/firmware/updates'

    for role in config.iterkeys():
        if isinstance(config[role], str) and config[role].find('distro') >= 0:
            log.info('Skipping firmware on distro kernel')
            return
        (role_remote, ) = ctx.cluster.only(role).remotes.keys()
        package_type = teuthology.get_system_type(role_remote)
        if package_type == 'rpm':
            return
        log.info('Installing linux-firmware on {role}...'.format(role=role))
        role_remote.run(
            args=[
                # kludge around mysterious 0-byte .git/HEAD files
                'cd',
                fw_dir,
                run.Raw('&&'),
                'test',
                '-d',
                '.git',
                run.Raw('&&'),
                'test',
                '!',
                '-s',
                '.git/HEAD',
                run.Raw('&&'),
                'sudo',
                'rm',
                '-rf',
                '.git',
                run.Raw(';'),
                # init
                'sudo',
                'install',
                '-d',
                '-m0755',
                fw_dir,
                run.Raw('&&'),
                'cd',
                fw_dir,
                run.Raw('&&'),
                'sudo',
                'git',
                'init',
            ], )
        role_remote.run(args=[
            'sudo',
            'git',
            '--git-dir=%s/.git' % fw_dir,
            'config',
            '--get',
            'remote.origin.url',
            run.Raw('>/dev/null'),
            run.Raw('||'),
            'sudo',
            'git',
            '--git-dir=%s/.git' % fw_dir,
            'remote',
            'add',
            'origin',
            uri,
        ], )
        role_remote.run(args=[
            'cd', fw_dir,
            run.Raw('&&'), 'sudo', 'git', 'fetch', 'origin',
            run.Raw('&&'), 'sudo', 'git', 'reset', '--hard', 'origin/master'
        ], )
Exemplo n.º 41
0
def upgrade(ctx, config):
    """
    upgrades project debian packages.

    For example::

        tasks:
        - install.upgrade:
             all:
                branch: end
    or
        tasks:
        - install.upgrade:
             mon.a:
                branch: end
             osd.0:
                branch: other
    """
    assert config is None or isinstance(config, dict), \
        "install.upgrade only supports a dictionary for configuration"

    for i in config.keys():
            assert isinstance(config.get(i), dict), 'host supports dictionary'

    branch = None

    project = config.get('project', 'ceph')

    # FIXME: extra_pkgs is not distro-agnostic
    extra_pkgs = config.get('extra_packages', [])
    log.info('extra packages: {packages}'.format(packages=extra_pkgs))

    if config.get('all') is not None:
        node = config.get('all')
        for var, branch_val in node.iteritems():
            if var == 'branch' or var == 'tag' or var == 'sha1':
                branch = branch_val
        for remote in ctx.cluster.remotes.iterkeys():
            system_type = teuthology.get_system_type(remote)
            assert system_type in ('deb', 'rpm')
            pkgs = PACKAGES[project][system_type]
            log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
                proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
            # FIXME: again, make extra_pkgs distro-agnostic
            pkgs += extra_pkgs
            if system_type == 'deb':
                _upgrade_deb_packages(ctx, config, remote, pkgs, branch)
            elif system_type == 'rpm':
                _upgrade_rpm_packages(ctx, config, remote, pkgs, branch)
    # FIXME: I highly doubt if this needs to be a separate codepath.
    else:
        list_roles = []
        for role in config.keys():
            (remote,) = ctx.cluster.only(role).remotes.iterkeys()
            kkeys = config.get(role)
            if remote in list_roles:
                continue
            else:
                for var, branch_val in kkeys.iteritems():
                    if var == 'branch' or var == 'tag' or var == 'sha1':
                        branch = branch_val
                        system_type = teuthology.get_system_type(remote)
                        assert system_type in ('deb', 'rpm')
                        pkgs = PACKAGES[project][system_type]
                        if system_type == 'deb':
                            _upgrade_deb_packages(ctx, config, remote, pkgs, branch)
                        elif system_type == 'rpm':
                            _upgrade_rpm_packages(ctx, config, remote, pkgs, branch)
                        list_roles.append(remote)
    yield
Exemplo n.º 42
0
def cli_test(ctx, config):
    """
     ceph-deploy cli to exercise most commonly use cli's and ensure
     all commands works and also startup the init system.

    """
    log.info('Ceph-deploy Test')
    if config is None:
        config = {}
    test_branch = ''
    conf_dir = teuthology.get_testdir(ctx) + "/cdtest"

    def execute_cdeploy(admin, cmd, path):
        """Execute ceph-deploy commands """
        """Either use git path or repo path """
        args = ['cd', conf_dir, run.Raw(';')]
        if path:
            args.append('{path}/ceph-deploy/ceph-deploy'.format(path=path))
        else:
            args.append('ceph-deploy')
        args.append(run.Raw(cmd))
        ec = admin.run(args=args, check_status=False).exitstatus
        if ec != 0:
            raise RuntimeError(
                "failed during ceph-deploy cmd: {cmd} , ec={ec}".format(cmd=cmd, ec=ec))

    if config.get('rhbuild'):
        path = None
    else:
        path = teuthology.get_testdir(ctx)
        # test on branch from config eg: wip-* , master or next etc
        # packages for all distro's should exist for wip*
        if ctx.config.get('branch'):
            branch = ctx.config.get('branch')
            test_branch = ' --dev={branch} '.format(branch=branch)
    mons = ctx.cluster.only(teuthology.is_type('mon'))
    for node, role in mons.remotes.items():
        admin = node
        admin.run(args=['mkdir', conf_dir], check_status=False)
        nodename = admin.shortname
    system_type = teuthology.get_system_type(admin)
    if config.get('rhbuild'):
        admin.run(args=['sudo', 'yum', 'install', 'ceph-deploy', '-y'])
    log.info('system type is %s', system_type)
    osds = ctx.cluster.only(teuthology.is_type('osd'))

    for remote, roles in osds.remotes.items():
        devs = teuthology.get_scratch_devices(remote)
        log.info("roles %s", roles)
        if (len(devs) < 3):
            log.error(
                'Test needs minimum of 3 devices, only found %s',
                str(devs))
            raise RuntimeError("Needs minimum of 3 devices ")

    conf_path = '{conf_dir}/ceph.conf'.format(conf_dir=conf_dir)
    new_cmd = 'new ' + nodename
    execute_cdeploy(admin, new_cmd, path)
    if config.get('conf') is not None:
        confp = config.get('conf')
        for section, keys in confp.items():
            lines = '[{section}]\n'.format(section=section)
            teuthology.append_lines_to_file(admin, conf_path, lines,
                                            sudo=True)
            for key, value in keys.items():
                log.info("[%s] %s = %s" % (section, key, value))
                lines = '{key} = {value}\n'.format(key=key, value=value)
                teuthology.append_lines_to_file(admin, conf_path, lines,
                                                sudo=True)
    new_mon_install = 'install {branch} --mon '.format(
        branch=test_branch) + nodename
    new_mgr_install = 'install {branch} --mgr '.format(
        branch=test_branch) + nodename
    new_osd_install = 'install {branch} --osd '.format(
        branch=test_branch) + nodename
    new_admin = 'install {branch} --cli '.format(branch=test_branch) + nodename
    create_initial = 'mon create-initial '
    mgr_create = 'mgr create ' + nodename
    # either use create-keys or push command
    push_keys = 'admin ' + nodename
    execute_cdeploy(admin, new_mon_install, path)
    execute_cdeploy(admin, new_mgr_install, path)
    execute_cdeploy(admin, new_osd_install, path)
    execute_cdeploy(admin, new_admin, path)
    execute_cdeploy(admin, create_initial, path)
    execute_cdeploy(admin, mgr_create, path)
    execute_cdeploy(admin, push_keys, path)

    for i in range(3):
        zap_disk = 'disk zap ' + "{n}:{d}".format(n=nodename, d=devs[i])
        prepare = 'osd prepare ' + "{n}:{d}".format(n=nodename, d=devs[i])
        execute_cdeploy(admin, zap_disk, path)
        execute_cdeploy(admin, prepare, path)

    log.info("list files for debugging purpose to check file permissions")
    admin.run(args=['ls', run.Raw('-lt'), conf_dir])
    remote.run(args=['sudo', 'ceph', '-s'], check_status=False)
    r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
    out = r.stdout.getvalue()
    log.info('Ceph health: %s', out.rstrip('\n'))
    log.info("Waiting for cluster to become healthy")
    with contextutil.safe_while(sleep=10, tries=6,
                                action='check health') as proceed:
        while proceed():
            r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
            out = r.stdout.getvalue()
            if (out.split(None, 1)[0] == 'HEALTH_OK'):
                break
    rgw_install = 'install {branch} --rgw {node}'.format(
        branch=test_branch,
        node=nodename,
    )
    rgw_create = 'rgw create ' + nodename
    execute_cdeploy(admin, rgw_install, path)
    execute_cdeploy(admin, rgw_create, path)
    log.info('All ceph-deploy cli tests passed')
    try:
        yield
    finally:
        log.info("cleaning up")
        ctx.cluster.run(args=['sudo', 'systemctl', 'stop', 'ceph.target'],
                        check_status=False)
        time.sleep(4)
        for i in range(3):
            umount_dev = "{d}1".format(d=devs[i])
            r = remote.run(args=['sudo', 'umount', run.Raw(umount_dev)])
        cmd = 'purge ' + nodename
        execute_cdeploy(admin, cmd, path)
        cmd = 'purgedata ' + nodename
        execute_cdeploy(admin, cmd, path)
        log.info("Removing temporary dir")
        admin.run(
            args=[
                'rm',
                run.Raw('-rf'),
                run.Raw(conf_dir)],
            check_status=False)
        if config.get('rhbuild'):
            admin.run(args=['sudo', 'yum', 'remove', 'ceph-deploy', '-y'])
Exemplo n.º 43
0
def upgrade_common(ctx, config, deploy_style):
    """
    Common code for upgrading
    """
    assert config is None or isinstance(config, dict), \
        "install.upgrade only supports a dictionary for configuration"

    project = config.get('project', 'ceph')

    # use 'install' overrides here, in case the upgrade target is left
    # unspecified/implicit.
    install_overrides = ctx.config.get('overrides',
                                       {}).get('install', {}).get(project, {})
    log.info('project %s config %s overrides %s', project, config,
             install_overrides)

    # FIXME: extra_pkgs is not distro-agnostic
    extra_pkgs = config.get('extra_packages', [])
    log.info('extra packages: {packages}'.format(packages=extra_pkgs))

    # build a normalized remote -> config dict
    remotes = {}
    if 'all' in config:
        for remote in ctx.cluster.remotes.iterkeys():
            remotes[remote] = config.get('all')
    else:
        for role in config.keys():
            remotes_dict = ctx.cluster.only(role).remotes
            if not remotes_dict:
                # This is a regular config argument, not a role
                continue
            remote = remotes_dict.keys()[0]
            if remote in remotes:
                log.warn('remote %s came up twice (role %s)', remote, role)
                continue
            remotes[remote] = config.get(role)

    for remote, node in remotes.iteritems():
        if not node:
            node = {}

        this_overrides = copy.deepcopy(install_overrides)
        if 'sha1' in node or 'tag' in node or 'branch' in node:
            log.info(
                'config contains sha1|tag|branch, removing those keys from override'
            )
            this_overrides.pop('sha1', None)
            this_overrides.pop('tag', None)
            this_overrides.pop('branch', None)
        teuthology.deep_merge(node, this_overrides)
        log.info('remote %s config %s', remote, node)

        system_type = teuthology.get_system_type(remote)
        assert system_type in ('deb', 'rpm')
        pkgs = PACKAGES[project][system_type]
        excluded_packages = config.get('exclude_packages', list())
        pkgs = list(set(pkgs).difference(set(excluded_packages)))
        log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
            proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
        # FIXME: again, make extra_pkgs distro-agnostic
        pkgs += extra_pkgs
        node['project'] = project

        deploy_style(ctx, node, remote, pkgs, system_type)
Exemplo n.º 44
0
 def system_type(self):
     """
     System type decorator
     """
     return misc.get_system_type(self)
Exemplo n.º 45
0
def download_ceph_deploy(ctx, config):
    """
    Downloads ceph-deploy from the ceph.com git mirror and (by default)
    switches to the master branch. If the `ceph-deploy-branch` is specified, it
    will use that instead. The `bootstrap` script is ran, with the argument
    obtained from `python_version`, if specified.
    """
    ceph_admin = ctx.cluster.only(teuthology.get_first_mon(ctx, config))

    try:
        py_ver = str(config['python_version'])
    except KeyError:
        pass
    else:
        supported_versions = ['2', '3']
        if py_ver not in supported_versions:
            raise ValueError("python_version must be: {}, not {}".format(
                ' or '.join(supported_versions), py_ver
            ))

        log.info("Installing Python")
        for admin in ceph_admin.remotes:
            system_type = teuthology.get_system_type(admin)

        if system_type == 'rpm':
            package = 'python34' if py_ver == '3' else 'python'
            ctx.cluster.run(args=[
                'sudo', 'yum', '-y', 'install',
                package, 'python-virtualenv'
            ])
        else:
            package = 'python3' if py_ver == '3' else 'python'
            ctx.cluster.run(args=[
                'sudo', 'apt-get', '-y', '--force-yes', 'install',
                package, 'python-virtualenv'
            ])

    log.info('Downloading ceph-deploy...')
    testdir = teuthology.get_testdir(ctx)
    ceph_deploy_branch = config.get('ceph-deploy-branch', 'master')

    ceph_admin.run(
        args=[
            'git', 'clone', '-b', ceph_deploy_branch,
            teuth_config.ceph_git_base_url + 'ceph-deploy.git',
            '{tdir}/ceph-deploy'.format(tdir=testdir),
        ],
    )
    args = [
        'cd',
        '{tdir}/ceph-deploy'.format(tdir=testdir),
        run.Raw('&&'),
        './bootstrap',
    ]
    try:
        args.append(str(config['python_version']))
    except KeyError:
        pass
    ceph_admin.run(args=args)

    try:
        yield
    finally:
        log.info('Removing ceph-deploy ...')
        ceph_admin.run(
            args=[
                'rm',
                '-rf',
                '{tdir}/ceph-deploy'.format(tdir=testdir),
            ],
        )
Exemplo n.º 46
0
def cli_test(ctx, config):
    """
     ceph-deploy cli to exercise most commonly use cli's and ensure
     all commands works and also startup the init system.
        
    """
    log.info('Ceph-deploy Test')
    if config is None:
        config = {}

    test_branch = ''
    if config.get('rhbuild'):
        path = None
    else:
        path = teuthology.get_testdir(ctx)
        # test on branch from config eg: wip-* , master or next etc
        # packages for all distro's should exist for wip*
        if ctx.config.get('branch'):
            branch = ctx.config.get('branch')
            test_branch = ' --dev={branch} '.format(branch=branch)
    mons = ctx.cluster.only(teuthology.is_type('mon'))
    for node, role in mons.remotes.iteritems():
        admin = node
        admin.run(args=['mkdir', '~/', 'cdtest'], check_status=False)
        nodename = admin.shortname
    system_type = teuthology.get_system_type(admin)
    if config.get('rhbuild'):
        admin.run(args=['sudo', 'yum', 'install', 'ceph-deploy', '-y'])
    log.info('system type is %s', system_type)
    osds = ctx.cluster.only(teuthology.is_type('osd'))

    for remote, roles in osds.remotes.iteritems():
        devs = teuthology.get_scratch_devices(remote)
        log.info("roles %s", roles)
        if (len(devs) < 3):
            log.error('Test needs minimum of 3 devices, only found %s',
                      str(devs))
            raise RuntimeError("Needs minimum of 3 devices ")

    new_cmd = 'new ' + nodename
    new_mon_install = 'install {branch} --mon '.format(
        branch=test_branch) + nodename
    new_osd_install = 'install {branch} --osd '.format(
        branch=test_branch) + nodename
    new_admin = 'install {branch} --cli '.format(branch=test_branch) + nodename
    create_initial = '--overwrite-conf mon create-initial '
    execute_cdeploy(admin, new_cmd, path)
    execute_cdeploy(admin, new_mon_install, path)
    execute_cdeploy(admin, new_osd_install, path)
    execute_cdeploy(admin, new_admin, path)
    execute_cdeploy(admin, create_initial, path)

    for i in range(3):
        zap_disk = 'disk zap ' + "{n}:{d}".format(n=nodename, d=devs[i])
        prepare = 'osd prepare ' + "{n}:{d}".format(n=nodename, d=devs[i])
        execute_cdeploy(admin, zap_disk, path)
        execute_cdeploy(admin, prepare, path)

    admin.run(args=['ls', run.Raw('-lt'), run.Raw('~/cdtest/')])
    time.sleep(4)
    remote.run(args=['sudo', 'ceph', '-s'], check_status=False)
    r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
    out = r.stdout.getvalue()
    log.info('Ceph health: %s', out.rstrip('\n'))
    if out.split(None, 1)[0] == 'HEALTH_WARN':
        log.info('All ceph-deploy cli tests passed')
    else:
        raise RuntimeError("Failed to reach HEALTH_WARN State")

    #test rgw cli
    rgw_install = 'install {branch} --rgw {node}'.format(
        branch=test_branch,
        node=nodename,
    )
    rgw_create = 'rgw create ' + nodename
    execute_cdeploy(admin, rgw_install, path)
    execute_cdeploy(admin, rgw_create, path)
    try:
        yield
    finally:
        log.info("cleaning up")
        ctx.cluster.run(args=[
            'sudo', 'stop', 'ceph-all',
            run.Raw('||'), 'sudo', 'service', 'ceph', 'stop',
            run.Raw('||'), 'sudo', 'systemctl', 'stop', 'ceph.target'
        ],
                        check_status=False)
        time.sleep(4)
        for i in range(3):
            umount_dev = "{d}1".format(d=devs[i])
            r = remote.run(args=['sudo', 'umount', run.Raw(umount_dev)])
        cmd = 'purge ' + nodename
        execute_cdeploy(admin, cmd, path)
        cmd = 'purgedata ' + nodename
        execute_cdeploy(admin, cmd, path)
        admin.run(args=['rm', run.Raw('-rf'), run.Raw('~/cdtest/*')])
        admin.run(args=['rmdir', run.Raw('~/cdtest')])
        if config.get('rhbuild'):
            admin.run(args=['sudo', 'yum', 'remove', 'ceph-deploy', '-y'])
Exemplo n.º 47
0
def cli_test(ctx, config):
    """
     ceph-deploy cli to exercise most commonly use cli's and ensure
     all commands works and also startup the init system.

    """
    log.info('Ceph-deploy Test')
    if config is None:
        config = {}
    test_branch = ''
    conf_dir = teuthology.get_testdir(ctx) + "/cdtest"

    def execute_cdeploy(admin, cmd, path):
        """Execute ceph-deploy commands """
        """Either use git path or repo path """
        args = ['cd', conf_dir, run.Raw(';')]
        if path:
            args.append('{path}/ceph-deploy/ceph-deploy'.format(path=path));
        else:
            args.append('ceph-deploy')
        args.append(run.Raw(cmd))
        ec = admin.run(args=args, check_status=False).exitstatus
        if ec != 0:
            raise RuntimeError(
                "failed during ceph-deploy cmd: {cmd} , ec={ec}".format(cmd=cmd, ec=ec))

    if config.get('rhbuild'):
        path = None
    else:
        path = teuthology.get_testdir(ctx)
        # test on branch from config eg: wip-* , master or next etc
        # packages for all distro's should exist for wip*
        if ctx.config.get('branch'):
            branch = ctx.config.get('branch')
            test_branch = ' --dev={branch} '.format(branch=branch)
    mons = ctx.cluster.only(teuthology.is_type('mon'))
    for node, role in mons.remotes.iteritems():
        admin = node
        admin.run(args=['mkdir', conf_dir], check_status=False)
        nodename = admin.shortname
    system_type = teuthology.get_system_type(admin)
    if config.get('rhbuild'):
        admin.run(args=['sudo', 'yum', 'install', 'ceph-deploy', '-y'])
    log.info('system type is %s', system_type)
    osds = ctx.cluster.only(teuthology.is_type('osd'))

    for remote, roles in osds.remotes.iteritems():
        devs = teuthology.get_scratch_devices(remote)
        log.info("roles %s", roles)
        if (len(devs) < 3):
            log.error(
                'Test needs minimum of 3 devices, only found %s',
                str(devs))
            raise RuntimeError("Needs minimum of 3 devices ")

    conf_path = '{conf_dir}/ceph.conf'.format(conf_dir=conf_dir)
    new_cmd = 'new ' + nodename
    execute_cdeploy(admin, new_cmd, path)
    if config.get('conf') is not None:
        confp = config.get('conf')
        for section, keys in confp.iteritems():
            lines = '[{section}]\n'.format(section=section)
            teuthology.append_lines_to_file(admin, conf_path, lines,
                                            sudo=True)
            for key, value in keys.iteritems():
                log.info("[%s] %s = %s" % (section, key, value))
                lines = '{key} = {value}\n'.format(key=key, value=value)
                teuthology.append_lines_to_file(admin, conf_path, lines,
                                                sudo=True)
    new_mon_install = 'install {branch} --mon '.format(
        branch=test_branch) + nodename
    new_mgr_install = 'install {branch} --mgr '.format(
        branch=test_branch) + nodename
    new_osd_install = 'install {branch} --osd '.format(
        branch=test_branch) + nodename
    new_admin = 'install {branch} --cli '.format(branch=test_branch) + nodename
    create_initial = 'mon create-initial '
    # either use create-keys or push command
    push_keys = 'admin ' + nodename
    execute_cdeploy(admin, new_mon_install, path)
    execute_cdeploy(admin, new_mgr_install, path)
    execute_cdeploy(admin, new_osd_install, path)
    execute_cdeploy(admin, new_admin, path)
    execute_cdeploy(admin, create_initial, path)
    execute_cdeploy(admin, push_keys, path)

    for i in range(3):
        zap_disk = 'disk zap ' + "{n}:{d}".format(n=nodename, d=devs[i])
        prepare = 'osd prepare ' + "{n}:{d}".format(n=nodename, d=devs[i])
        execute_cdeploy(admin, zap_disk, path)
        execute_cdeploy(admin, prepare, path)

    log.info("list files for debugging purpose to check file permissions")
    admin.run(args=['ls', run.Raw('-lt'), conf_dir])
    remote.run(args=['sudo', 'ceph', '-s'], check_status=False)
    r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
    out = r.stdout.getvalue()
    log.info('Ceph health: %s', out.rstrip('\n'))
    log.info("Waiting for cluster to become healthy")
    with contextutil.safe_while(sleep=10, tries=6,
                                action='check health') as proceed:
       while proceed():
           r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
           out = r.stdout.getvalue()
           if (out.split(None,1)[0] == 'HEALTH_OK'):
               break
    rgw_install = 'install {branch} --rgw {node}'.format(
        branch=test_branch,
        node=nodename,
    )
    rgw_create = 'rgw create ' + nodename
    execute_cdeploy(admin, rgw_install, path)
    execute_cdeploy(admin, rgw_create, path)
    log.info('All ceph-deploy cli tests passed')
    try:
        yield
    finally:
        log.info("cleaning up")
        ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
                              'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
                              'sudo', 'systemctl', 'stop', 'ceph.target'],
                        check_status=False)
        time.sleep(4)
        for i in range(3):
            umount_dev = "{d}1".format(d=devs[i])
            r = remote.run(args=['sudo', 'umount', run.Raw(umount_dev)])
        cmd = 'purge ' + nodename
        execute_cdeploy(admin, cmd, path)
        cmd = 'purgedata ' + nodename
        execute_cdeploy(admin, cmd, path)
        log.info("Removing temporary dir")
        admin.run(
            args=[
                'rm',
                run.Raw('-rf'),
                run.Raw(conf_dir)],
            check_status=False)
        if config.get('rhbuild'):
            admin.run(args=['sudo', 'yum', 'remove', 'ceph-deploy', '-y'])
Exemplo n.º 48
0
def upgrade(ctx, config):
    """
    Upgrades packages for a given project.

    For example::

        tasks:
        - install.upgrade:
             all:
                branch: end

    or specify specific roles::

        tasks:
        - install.upgrade:
             mon.a:
                branch: end
             osd.0:
                branch: other

    or rely on the overrides for the target version::

        overrides:
          install:
            ceph:
              sha1: ...
        tasks:
        - install.upgrade:
            all:

    (HACK: the overrides will *only* apply the sha1/branch/tag if those
    keys are not present in the config.)

    :param ctx: the argparse.Namespace object
    :param config: the config dict
    """
    assert config is None or isinstance(config, dict), \
        "install.upgrade only supports a dictionary for configuration"

    for i in config.keys():
        assert config.get(i) is None or isinstance(
            config.get(i), dict), 'host supports dictionary'

    project = config.get('project', 'ceph')

    # use 'install' overrides here, in case the upgrade target is left
    # unspecified/implicit.
    install_overrides = ctx.config.get('overrides',
                                       {}).get('install', {}).get(project, {})
    log.info('project %s config %s overrides %s', project, config,
             install_overrides)

    # FIXME: extra_pkgs is not distro-agnostic
    extra_pkgs = config.get('extra_packages', [])
    log.info('extra packages: {packages}'.format(packages=extra_pkgs))

    # build a normalized remote -> config dict
    remotes = {}
    if 'all' in config:
        for remote in ctx.cluster.remotes.iterkeys():
            remotes[remote] = config.get('all')
    else:
        for role in config.keys():
            (remote, ) = ctx.cluster.only(role).remotes.iterkeys()
            if remote in remotes:
                log.warn('remote %s came up twice (role %s)', remote, role)
                continue
            remotes[remote] = config.get(role)

    for remote, node in remotes.iteritems():
        if not node:
            node = {}

        this_overrides = copy.deepcopy(install_overrides)
        if 'sha1' in node or 'tag' in node or 'branch' in node:
            log.info(
                'config contains sha1|tag|branch, removing those keys from override'
            )
            this_overrides.pop('sha1', None)
            this_overrides.pop('tag', None)
            this_overrides.pop('branch', None)
        teuthology.deep_merge(node, this_overrides)
        log.info('remote %s config %s', remote, node)

        system_type = teuthology.get_system_type(remote)
        assert system_type in ('deb', 'rpm')
        pkgs = PACKAGES[project][system_type]
        log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
            proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
        # FIXME: again, make extra_pkgs distro-agnostic
        pkgs += extra_pkgs
        node['project'] = project
        if system_type == 'deb':
            _upgrade_deb_packages(ctx, node, remote, pkgs)
        elif system_type == 'rpm':
            _upgrade_rpm_packages(ctx, node, remote, pkgs)

    yield
Exemplo n.º 49
0
 def system_type(self):
     return misc.get_system_type(self)
Exemplo n.º 50
0
def cli_test(ctx, config):
    """
     ceph-deploy cli to exercise most commonly use cli's and ensure
     all commands works and also startup the init system.
        
    """
    log.info('Ceph-deploy Test')
    if config is None:
        config = {}
        
    if config.get('rhbuild'):
        path=None
    else:
        path = teuthology.get_testdir(ctx)  
    mons = ctx.cluster.only(teuthology.is_type('mon'))
    for node,role in mons.remotes.iteritems():
        admin=node
        admin.run( args=[ 'mkdir', '~/', 'cdtest' ],check_status=False)
        nodename=admin.shortname
    system_type = teuthology.get_system_type(admin)
    if config.get('rhbuild'):
        admin.run(args = ['sudo', 'yum', 'install', 'ceph-deploy', '-y'])
    log.info('system type is %s', system_type)
    osds = ctx.cluster.only(teuthology.is_type('osd'))
   
    for remote,roles in osds.remotes.iteritems():
        devs = teuthology.get_scratch_devices(remote)
        log.info("roles %s" , roles)
        if (len(devs) < 3):
            log.error('Test needs minimum of 3 devices, only found %s', str(devs))
            raise RuntimeError ( "Needs minimum of 3 devices ")
    
    new_cmd= 'new ' + nodename
    new_mon_install = 'install --mon ' + nodename
    new_osd_install = 'install --osd ' + nodename
    new_admin = 'install --cli ' + nodename
    create_initial= '--overwrite-conf mon create-initial ' + nodename
    execute_cdeploy(admin,new_cmd,path)
    execute_cdeploy(admin,new_mon_install,path)
    execute_cdeploy(admin,new_osd_install,path)
    execute_cdeploy(admin,new_admin,path)
    execute_cdeploy(admin,create_initial,path)

    for i in range(3):
        zap_disk = 'disk zap '  + "{n}:{d}".format(n=nodename,d=devs[i])
        prepare= 'osd prepare ' + "{n}:{d}".format(n=nodename,d=devs[i])
        execute_cdeploy(admin,zap_disk,path)
        execute_cdeploy(admin,prepare,path)
        
    admin.run(args=['ls',run.Raw('-lt'),run.Raw('~/cdtest/')])
    time.sleep(4)
    remote.run(args=['sudo', 'ceph','-s'],check_status=False)
    r = remote.run(args=['sudo', 'ceph','health'],stdout=StringIO())
    out = r.stdout.getvalue()
    log.info('Ceph health: %s', out.rstrip('\n'))
    if out.split(None, 1)[0] == 'HEALTH_WARN':
        log.info('All ceph-deploy cli tests passed')
    else:
       raise RuntimeError ( "Failed to reach HEALTH_WARN State")
    
    #test rgw cli
    rgw_install = 'install --rgw ' + nodename
    rgw_create =  'rgw create ' + nodename
    execute_cdeploy(admin,rgw_install,path)
    execute_cdeploy(admin,rgw_create,path)
    try: 
      yield
    finally:
        log.info("cleaning up")
        if system_type == 'deb':
            remote.run(args=['sudo', 'stop','ceph-all'],check_status=False)
            remote.run(args=['sudo', 'service','ceph', '-a', 'stop'],check_status=False)
        else:
            remote.run(args=['sudo', '/etc/init.d/ceph', '-a', 'stop'],check_status=False)
        time.sleep(4)
        for i in range(3):
            umount_dev = "{d}1".format(d=devs[i])
            r = remote.run(args=['sudo', 'umount',run.Raw(umount_dev)])
        cmd = 'purge ' + nodename
        execute_cdeploy(admin,cmd,path)
        cmd = 'purgedata ' + nodename
        execute_cdeploy(admin,cmd,path)
        admin.run(args=['rm',run.Raw('-rf'),run.Raw('~/cdtest/*')])
        admin.run(args=['rmdir',run.Raw('~/cdtest')])
        if config.get('rhbuild'):
            admin.run(args = ['sudo', 'yum', 'remove', 'ceph-deploy', '-y'])
Exemplo n.º 51
0
def run_fio(remote, config, rbd_test_dir):
    """
    create fio config file with options based on above config
    get the fio from github, generate binary, and use it to run on
    the generated fio config file
    """
    fio_config=NamedTemporaryFile(prefix='fio_rbd_', dir='/tmp/', delete=False)
    fio_config.write('[global]\n')
    if config.get('io-engine'):
        ioengine=config['io-engine']
        fio_config.write('ioengine={ioe}\n'.format(ioe=ioengine))
    else:
        fio_config.write('ioengine=sync\n')
    if config.get('bs'):
        bs=config['bs']
        fio_config.write('bs={bs}\n'.format(bs=bs))
    else:
        fio_config.write('bs=4k\n')
    fio_config.write('iodepth=2\n')
    if config.get('fio-io-size'):
        size=config['fio-io-size']
        fio_config.write('size={size}\n'.format(size=size))
    else:
        fio_config.write('size=100m\n')
    
    fio_config.write('time_based\n')
    if config.get('runtime'):
        runtime=config['runtime']
        fio_config.write('runtime={runtime}\n'.format(runtime=runtime))
    else:
        fio_config.write('runtime=1800\n')
    fio_config.write('allow_file_create=0\n')    
    image_size=10240    
    if config.get('image_size'):
        image_size=config['image_size']
        
    formats=[1,2]
    features=[1,2,4]
    fio_version='2.2.9'
    if config.get('formats'):
        formats=config['formats']
    if config.get('features'):
        features=config['features']
    if config.get('fio-version'):
        fio_version=config['fio-version']
    
    fio_config.write('norandommap\n')
    if ioengine == 'rbd':
        fio_config.write('invalidate=0\n')
    #handle package required for librbd engine
    sn=remote.shortname
    system_type= teuthology.get_system_type(remote)
    if system_type == 'rpm' and ioengine == 'rbd':
        log.info("Installing librbd1 devel package on {sn}".format(sn=sn))
        remote.run(args=['sudo', 'yum' , 'install', 'librbd1-devel', '-y'])
    elif ioengine == 'rbd':
        log.info("Installing librbd devel package on {sn}".format(sn=sn))
        remote.run(args=['sudo', 'apt-get', '-y', 'install', 'librbd-dev'])
    if ioengine == 'rbd':
        fio_config.write('clientname=admin\n')
        fio_config.write('pool=rbd\n')
    for frmt in formats:
        for feature in features:
           log.info("Creating rbd images on {sn}".format(sn=sn))
           rbd_name = 'i{i}f{f}{sn}'.format(i=frmt,f=feature,sn=sn)
           rbd_snap_name = 'i{i}f{f}{sn}@i{i}f{f}{sn}Snap'.format(i=frmt,f=feature,sn=sn)
           rbd_clone_name = 'i{i}f{f}{sn}Clone'.format(i=frmt,f=feature,sn=sn)
           remote.run(args=['sudo', 'rbd', 'create',
                            '--image', rbd_name,
                            '--image-format', '{f}'.format(f=frmt),
                            '--image-features', '{f}'.format(f=feature),
                            '--size', '{size}'.format(size=image_size)])
           remote.run(args=['sudo', 'rbd', 'info', rbd_name])
           if ioengine != 'rbd':
               out=StringIO.StringIO()
               remote.run(args=['sudo', 'rbd', 'map', rbd_name ],stdout=out)
               dev=re.search(r'(/dev/rbd\d+)',out.getvalue())
               rbd_dev=dev.group(1)
               if config.get('test-clone-io'):
                    log.info("Testing clones using fio")
                    remote.run(args=['sudo', 'rbd', 'snap', 'create', rbd_snap_name])
                    remote.run(args=['sudo', 'rbd', 'snap', 'protect', rbd_snap_name])
                    remote.run(args=['sudo', 'rbd', 'clone', rbd_snap_name, rbd_clone_name])
                    remote.run(args=['sudo', 'rbd', 'map', rbd_clone_name], stdout=out)
                    dev=re.search(r'(/dev/rbd\d+)',out.getvalue())
                    rbd_clone_dev=dev.group(1)
               fio_config.write('[{rbd_dev}]\n'.format(rbd_dev=rbd_dev))
               if config.get('rw'):
                   rw=config['rw']
                   fio_config.write('rw={rw}\n'.format(rw=rw))
               else:
                   fio_config .write('rw=randrw\n')
               fio_config.write('filename={rbd_dev}\n'.format(rbd_dev=rbd_dev))
               if config.get('test-clone-io'):
                   fio_config.write('[{rbd_clone_dev}]\n'.format(rbd_clone_dev=rbd_clone_dev))
                   fio_config.write('rw={rw}\n'.format(rw=rw))
                   fio_config.write('filename={rbd_clone_dev}\n'.format(rbd_clone_dev=rbd_clone_dev))
           else:
               if config.get('test-clone-io'):
                    log.info("Testing clones using fio")
                    remote.run(args=['sudo', 'rbd', 'snap', 'create', rbd_snap_name])
                    remote.run(args=['sudo', 'rbd', 'snap', 'protect', rbd_snap_name])
                    remote.run(args=['sudo', 'rbd', 'clone', rbd_snap_name, rbd_clone_name])
               fio_config.write('[{img_name}]\n'.format(img_name=rbd_name))
               if config.get('rw'):
                   rw=config['rw']
                   fio_config.write('rw={rw}\n'.format(rw=rw))
               else:
                   fio_config.write('rw=randrw\n')
               fio_config.write('rbdname={img_name}\n'.format(img_name=rbd_name))
               if config.get('test-clone-io'):
                   fio_config.write('[{clone_img_name}]\n'.format(clone_img_name=rbd_clone_name))
                   fio_config.write('rw={rw}\n'.format(rw=rw))
                   fio_config.write('rbdname={clone_img_name}\n'.format(clone_img_name=rbd_clone_name))

           
    fio_config.close()
    remote.put_file(fio_config.name,fio_config.name)
    try:
        log.info("Running rbd feature - fio test on {sn}".format(sn=sn))
        fio = "https://github.com/axboe/fio/archive/fio-" + fio_version + ".tar.gz"
        remote.run(args=['mkdir', run.Raw(rbd_test_dir),])
        remote.run(args=['cd' , run.Raw(rbd_test_dir),
                         run.Raw(';'), 'wget' , fio , run.Raw(';'), run.Raw('tar -xvf fio*tar.gz'), run.Raw(';'),
                         run.Raw('cd fio-fio*'), 'configure', run.Raw(';') ,'make'])
        remote.run(args=['sudo', 'ceph', '-s'])
        remote.run(args=['sudo', run.Raw('{tdir}/fio-fio-{v}/fio {f}'.format(tdir=rbd_test_dir,v=fio_version,f=fio_config.name))])
        remote.run(args=['sudo', 'ceph', '-s'])
    finally:
        log.info("Cleaning up fio install")
        remote.run(args=['rm','-rf', run.Raw(rbd_test_dir)])
        if system_type == 'rpm' and ioengine == 'rbd':
            log.info("Uninstall librbd1 devel package on {sn}".format(sn=sn))
            remote.run(args=['sudo', 'yum' , 'remove', 'librbd1-devel', '-y'])
        elif ioengine == 'rbd':
            log.info("Uninstall librbd devel package on {sn}".format(sn=sn))
            remote.run(args=['sudo', 'apt-get', '-y', 'remove', 'librbd-dev'])
Exemplo n.º 52
0
    def install_dependencies(self):
        system_type = misc.get_system_type(self.first_mon)

        if system_type == 'rpm':
            install_cmd = ['sudo', 'yum', '-y', 'install']
            cbt_depends = [
                'python-yaml', 'python-lxml', 'librbd-devel', 'pdsh',
                'collectl'
            ]
        else:
            install_cmd = ['sudo', 'apt-get', '-y', '--force-yes', 'install']
            cbt_depends = [
                'python-yaml', 'python-lxml', 'librbd-dev', 'collectl'
            ]
        self.first_mon.run(args=install_cmd + cbt_depends)

        benchmark_type = self.cbt_config.get('benchmarks').keys()[0]
        self.log.info('benchmark: %s', benchmark_type)

        if benchmark_type == 'librbdfio':
            # install fio
            testdir = misc.get_testdir(self.ctx)
            self.first_mon.run(args=[
                'git', 'clone', '-b', 'master',
                'https://github.com/axboe/fio.git', '{tdir}/fio'.format(
                    tdir=testdir)
            ])
            self.first_mon.run(args=[
                'cd',
                os.path.join(testdir, 'fio'),
                run.Raw('&&'), './configure',
                run.Raw('&&'), 'make'
            ])

        if benchmark_type == 'cosbench':
            # install cosbench
            self.log.info('install dependecies for cosbench')
            if system_type == 'rpm':
                cosbench_depends = [
                    'wget', 'unzip', 'java-1.7.0-openjdk', 'curl'
                ]
            else:
                cosbench_depends = ['wget', 'unzip', 'openjdk-8-jre', 'curl']
            self.first_mon.run(args=install_cmd + cosbench_depends)
            testdir = misc.get_testdir(self.ctx)
            cosbench_version = '0.4.2.c3'
            self.first_mon.run(args=[
                'cd',
                testdir,
                run.Raw('&&'),
                'wget',
                'https://github.com/intel-cloud/cosbench/releases/download/v0.4.2.c3/0.4.2.c3.zip',
                run.Raw('&&'),
                'unzip',
                '0.4.2.c3.zip',
            ])
            self.first_mon.run(args=[
                'cd',
                testdir,
                run.Raw('&&'),
                'ln',
                '-s',
                cosbench_version,
                'cos',
            ])
            self.first_mon.run(args=[
                'cd',
                os.path.join(testdir, 'cos'),
                run.Raw('&&'),
                'chmod',
                '+x',
                run.Raw('*.sh'),
            ])

            # start cosbench and check info
            self.log.info('start cosbench')
            self.first_mon.run(args=[
                'cd', testdir,
                run.Raw('&&'), 'cd', 'cos',
                run.Raw('&&'), 'sh', 'start-all.sh'
            ])
            self.log.info('check cosbench info')
            self.first_mon.run(args=[
                'cd', testdir,
                run.Raw('&&'), 'cd', 'cos',
                run.Raw('&&'), 'sh', 'cli.sh', 'info'
            ])
Exemplo n.º 53
0
    def install_dependencies(self):
        system_type = misc.get_system_type(self.first_mon)

        if system_type == 'rpm':
            install_cmd = ['sudo', 'yum', '-y', 'install']
            cbt_depends = [
                'python-yaml', 'python-lxml', 'librbd-devel', 'pdsh',
                'collectl'
            ]
        else:
            install_cmd = ['sudo', 'apt-get', '-y', '--force-yes', 'install']
            cbt_depends = [
                'python-yaml', 'python-lxml', 'librbd-dev', 'collectl'
            ]
        self.first_mon.run(args=install_cmd + cbt_depends)

        benchmark_type = self.cbt_config.get('benchmarks').keys()[0]
        self.log.info('benchmark: %s', benchmark_type)

        if benchmark_type == 'librbdfio':
            # install fio
            testdir = misc.get_testdir(self.ctx)
            self.first_mon.run(args=[
                'git', 'clone', '-b', 'master',
                'https://github.com/axboe/fio.git', '{tdir}/fio'.format(
                    tdir=testdir)
            ])
            self.first_mon.run(args=[
                'cd',
                os.path.join(testdir, 'fio'),
                run.Raw('&&'), './configure',
                run.Raw('&&'), 'make'
            ])

        if benchmark_type == 'cosbench':
            # install cosbench
            self.log.info('install dependecies for cosbench')
            if system_type == 'rpm':
                cosbench_depends = [
                    'wget', 'unzip', 'java-1.7.0-openjdk', 'curl'
                ]
            else:
                cosbench_depends = ['wget', 'unzip', 'openjdk-8-jre', 'curl']
            self.first_mon.run(args=install_cmd + cosbench_depends)
            testdir = misc.get_testdir(self.ctx)
            cosbench_version = '0.4.2.c3'
            cosbench_location = 'https://github.com/intel-cloud/cosbench/releases/download/v0.4.2.c3/0.4.2.c3.zip'
            os_version = misc.get_system_type(self.first_mon, False, True)

            # additional requirements for bionic
            if os_version == '18.04':
                self.first_mon.run(
                    args=['sudo', 'apt-get', '-y', 'purge', 'openjdk-11*'])
                # use our own version of cosbench
                cosbench_version = 'cosbench-0.4.2.c3.1'
                # contains additonal parameter "-N" to nc
                cosbench_location = 'http://drop.ceph.com/qa/cosbench-0.4.2.c3.1.zip'
                cosbench_dir = os.path.join(testdir, cosbench_version)
                self.ctx.cluster.run(
                    args=['mkdir', '-p', '-m0755', '--', cosbench_dir])
                self.first_mon.run(args=[
                    'cd', testdir,
                    run.Raw('&&'), 'wget', cosbench_location,
                    run.Raw('&&'), 'unzip', '{name}.zip'.format(
                        name=cosbench_version), '-d', cosbench_version
                ])
            else:
                self.first_mon.run(args=[
                    'cd', testdir,
                    run.Raw('&&'), 'wget', cosbench_location,
                    run.Raw('&&'), 'unzip', '{name}.zip'.format(
                        name=cosbench_version)
                ])
            self.first_mon.run(args=[
                'cd',
                testdir,
                run.Raw('&&'),
                'ln',
                '-s',
                cosbench_version,
                'cos',
            ])
            self.first_mon.run(args=[
                'cd',
                os.path.join(testdir, 'cos'),
                run.Raw('&&'),
                'chmod',
                '+x',
                run.Raw('*.sh'),
            ])

            # start cosbench and check info
            self.log.info('start cosbench')
            self.first_mon.run(args=[
                'cd', testdir,
                run.Raw('&&'), 'cd', 'cos',
                run.Raw('&&'), 'sh', 'start-all.sh'
            ])
            self.log.info('check cosbench info')
            self.first_mon.run(args=[
                'cd', testdir,
                run.Raw('&&'), 'cd', 'cos',
                run.Raw('&&'), 'sh', 'cli.sh', 'info'
            ])
Exemplo n.º 54
0
 def system_type(self):
     """
     System type decorator
     """
     return misc.get_system_type(self)
Exemplo n.º 55
0
def install_distro_kernel(remote):
    """
    RPM: Find newest kernel on the machine and update grub to use kernel + reboot.
    DEB: Find newest kernel. Parse grub.cfg to figure out the entryname/subentry.
    then modify 01_ceph_kernel to have correct entry + updategrub + reboot.
    """
    system_type = teuthology.get_system_type(remote)
    distribution = ''
    if system_type == 'rpm':
        output, err_mess = StringIO(), StringIO()
        remote.run(args=['rpm', '-q', 'kernel', '--last'],
                   stdout=output,
                   stderr=err_mess)
        newest = output.getvalue().split()[0].split('kernel-')[1]
        log.info('Distro Kernel Version: {version}'.format(version=newest))
        update_grub_rpm(remote, newest)
        remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False)
        output.close()
        err_mess.close()
        return

    if system_type == 'deb':
        distribution = teuthology.get_system_type(remote, distro=True)
        newversion = get_version_from_pkg(remote, distribution)
        if 'ubuntu' in distribution:
            grub2conf = teuthology.get_file(remote, '/boot/grub/grub.cfg',
                                            True)
            submenu = ''
            menuentry = ''
            for line in grub2conf.split('\n'):
                if 'submenu' in line:
                    submenu = line.split('submenu ')[1]
                    # Ubuntu likes to be sneaky and change formatting of
                    # grub.cfg between quotes/doublequotes between versions
                    if submenu.startswith("'"):
                        submenu = submenu.split("'")[1]
                    if submenu.startswith('"'):
                        submenu = submenu.split('"')[1]
                if 'menuentry' in line:
                    if newversion in line and 'recovery' not in line:
                        menuentry = line.split('\'')[1]
                        break
            if submenu:
                grubvalue = submenu + '>' + menuentry
            else:
                grubvalue = menuentry
            grubfile = 'cat <<EOF\nset default="' + grubvalue + '"\nEOF'
            teuthology.delete_file(remote,
                                   '/etc/grub.d/01_ceph_kernel',
                                   sudo=True,
                                   force=True)
            teuthology.sudo_write_file(remote, '/etc/grub.d/01_ceph_kernel',
                                       StringIO(grubfile), '755')
            log.info(
                'Distro Kernel Version: {version}'.format(version=newversion))
            remote.run(args=['sudo', 'update-grub'])
            remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False)
            return

        if 'debian' in distribution:
            grub2_kernel_select_generic(remote, newversion, 'deb')
            log.info(
                'Distro Kernel Version: {version}'.format(version=newversion))
            remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False)
            return