Exemplo n.º 1
0
def run(ceph_cluster, **kw):
    """
    Runs ceph-ansible deployment
    Args:
        ceph_cluster (ceph.ceph.Ceph): Ceph cluster object
    """
    log.info("Running test")
    log.info("Running ceph ansible test")
    ceph_nodes = kw.get('ceph_nodes')
    config = kw.get('config')
    filestore = config.get('filestore', False)
    k_and_m = config.get('ec-pool-k-m')
    hotfix_repo = config.get('hotfix_repo')
    test_data = kw.get('test_data')

    ubuntu_repo = config.get('ubuntu_repo', None)
    base_url = config.get('base_url', None)
    installer_url = config.get('installer_url', None)
    mixed_lvm_configs = config.get('is_mixed_lvm_configs', None)
    device_to_add = config.get('device', None)
    config['ansi_config']['public_network'] = get_public_network(ceph_nodes[0])

    ceph_cluster.ansible_config = config['ansi_config']
    ceph_cluster.custom_config = test_data.get('custom-config')
    ceph_cluster.custom_config_file = test_data.get('custom-config-file')

    ceph_cluster.use_cdn = config.get('use_cdn')
    build = config.get('build', config.get('rhbuild'))
    ceph_cluster.rhcs_version = build

    if config.get('skip_setup') is True:
        log.info("Skipping setup of ceph cluster")
        return 0

    test_data['install_version'] = build

    ceph_installer = ceph_cluster.get_ceph_object('installer')
    ansible_dir = ceph_installer.ansible_dir

    ceph_cluster.setup_ceph_firewall()

    ceph_cluster.setup_ssh_keys()

    ceph_cluster.setup_packages(base_url, hotfix_repo, installer_url, ubuntu_repo)

    ceph_installer.install_ceph_ansible(build)
    hosts_file = ceph_cluster.generate_ansible_inventory(device_to_add, mixed_lvm_configs, filestore)
    ceph_installer.write_inventory_file(hosts_file)

    if config.get('docker-insecure-registry'):
        ceph_cluster.setup_insecure_registry()

    # use the provided sample file as main site.yml
    ceph_installer.setup_ansible_site_yml(ceph_cluster.containerized)

    ceph_cluster.distribute_all_yml()

    # add iscsi setting if it is necessary
    if test_data.get("luns_setting", None) and test_data.get("initiator_setting", None):
        ceph_installer.add_iscsi_settings(test_data)

    log.info("Ceph ansible version " + ceph_installer.get_installed_ceph_versions())

    out, rc = ceph_installer.exec_command(
        cmd='cd {} ; ANSIBLE_STDOUT_CALLBACK=debug; ansible-playbook -vv -i hosts site.yml'.format(ansible_dir),
        long_running=True)

    # manually handle client creation in a containerized deployment (temporary)
    if ceph_cluster.containerized:
        for node in ceph_cluster.get_ceph_objects('client'):
            log.info("Manually installing client node")
            node.exec_command(sudo=True, cmd="yum install -y ceph-common")

    if rc != 0:
        log.error("Failed during deployment")
        return rc

    # check if all osd's are up and in
    timeout = 300
    if config.get('timeout'):
        timeout = datetime.timedelta(seconds=config.get('timeout'))
    # add test_data for later use by upgrade test etc
    num_osds = ceph_cluster.ceph_demon_stat['osd']
    num_mons = ceph_cluster.ceph_demon_stat['mon']
    test_data['ceph-ansible'] = {'num-osds': num_osds, 'num-mons': num_mons, 'rhbuild': build}

    # create rbd pool used by tests/workunits
    ceph_cluster.create_rbd_pool(k_and_m)

    if ceph_cluster.check_health(build, timeout=timeout) != 0:
        return 1
    return rc
Exemplo n.º 2
0
def run(ceph_cluster, **kw):
    """
    Runs ceph-ansible deployment
    Args:
        ceph_cluster (ceph.ceph.Ceph): Ceph cluster object
    """
    log.info("Running test")
    log.info("Running ceph ansible test")
    ceph_nodes = kw.get("ceph_nodes")
    config = kw.get("config")
    filestore = config.get("filestore", False)
    k_and_m = config.get("ec-pool-k-m")
    hotfix_repo = config.get("hotfix_repo")
    test_data = kw.get("test_data")
    cloud_type = config.get("cloud-type", "openstack")

    ubuntu_repo = config.get("ubuntu_repo", None)
    base_url = config.get("base_url", None)
    installer_url = config.get("installer_url", None)
    mixed_lvm_configs = config.get("is_mixed_lvm_configs", None)
    device_to_add = config.get("device", None)
    config["ansi_config"]["public_network"] = get_public_network(ceph_nodes[0])
    ceph_cluster.ansible_config = config["ansi_config"]
    ceph_cluster.custom_config = test_data.get("custom-config")
    ceph_cluster.custom_config_file = test_data.get("custom-config-file")
    cluster_name = config.get("ansi_config").get("cluster")

    if all(
        key in ceph_cluster.ansible_config
        for key in ("rgw_multisite", "rgw_zonesecondary")
    ):
        ceph_cluster_dict = kw.get("ceph_cluster_dict")
        primary_node = "ceph-rgw1"
        primary_rgw_node = (
            ceph_cluster_dict.get(primary_node).get_ceph_object("rgw").node
        )
        config["ansi_config"]["rgw_pullhost"] = primary_rgw_node.ip_address

    ceph_cluster.use_cdn = config.get("use_cdn")
    build = config.get("build", config.get("rhbuild"))
    ceph_cluster.rhcs_version = build

    if config.get("skip_setup") is True:
        log.info("Skipping setup of ceph cluster")
        return 0

    test_data["install_version"] = build

    ceph_installer = ceph_cluster.get_ceph_object("installer")
    ansible_dir = ceph_installer.ansible_dir

    ceph_cluster.setup_ceph_firewall()

    ceph_cluster.setup_ssh_keys()

    ceph_cluster.setup_packages(
        base_url, hotfix_repo, installer_url, ubuntu_repo, build, cloud_type
    )

    ceph_installer.install_ceph_ansible(build)
    hosts_file = ceph_cluster.generate_ansible_inventory(
        device_to_add, mixed_lvm_configs, filestore
    )
    ceph_installer.write_inventory_file(hosts_file)

    if config.get("docker-insecure-registry"):
        ceph_cluster.setup_insecure_registry()

    # use the provided sample file as main site.yml
    ceph_installer.setup_ansible_site_yml(build, ceph_cluster.containerized)

    ceph_cluster.distribute_all_yml()

    # add iscsi setting if it is necessary
    if test_data.get("luns_setting", None) and test_data.get("initiator_setting", None):
        ceph_installer.add_iscsi_settings(test_data)

    log.info("Ceph ansible version " + ceph_installer.get_installed_ceph_versions())

    # ansible playbookk based on container or bare-metal deployment
    file_name = "site.yml"

    if ceph_cluster.containerized:
        file_name = "site-container.yml"

    out, rc = ceph_installer.exec_command(
        cmd="cd {ansible_dir} ; ANSIBLE_STDOUT_CALLBACK=debug;ansible-playbook -vvvv -i hosts {file_name}".format(
            ansible_dir=ansible_dir, file_name=file_name
        ),
        long_running=True,
    )

    # manually handle client creation in a containerized deployment (temporary)
    if ceph_cluster.containerized:
        for node in ceph_cluster.get_ceph_objects("client"):
            log.info("Manually installing client node")
            node.exec_command(sudo=True, cmd="yum install -y ceph-common")

    if rc != 0:
        log.error("Failed during deployment")
        return rc

    # check if all osd's are up and in
    timeout = 300
    if config.get("timeout"):
        timeout = datetime.timedelta(seconds=config.get("timeout"))
    # add test_data for later use by upgrade test etc
    num_osds = ceph_cluster.ceph_demon_stat["osd"]
    num_mons = ceph_cluster.ceph_demon_stat["mon"]
    test_data["ceph-ansible"] = {
        "num-osds": num_osds,
        "num-mons": num_mons,
        "rhbuild": build,
    }

    # create rbd pool used by tests/workunits
    ceph_cluster.create_rbd_pool(k_and_m, cluster_name)

    if (
        ceph_cluster.check_health(build, timeout=timeout, cluster_name=cluster_name)
        != 0
    ):
        return 1
    return rc
def run(ceph_cluster, **kw):
    log.info("Running test")
    ceph_nodes = kw.get('ceph_nodes')
    log.info("Running ceph ansible test")
    config = kw.get('config')
    test_data = kw.get('test_data')
    prev_install_version = test_data['install_version']
    skip_version_compare = config.get('skip_version_compare')
    containerized = config.get('ansi_config').get('containerized_deployment')
    build = config.get('build', config.get('rhbuild'))
    log.info("Build for upgrade: {build}".format(build=build))

    ubuntu_repo = config.get('ubuntu_repo')
    hotfix_repo = config.get('hotfix_repo')
    base_url = config.get('base_url')
    installer_url = config.get('installer_url')
    config['ansi_config']['public_network'] = get_public_network(ceph_nodes[0])

    ceph_cluster.ansible_config = config['ansi_config']
    ceph_cluster.custom_config = test_data.get('custom-config')
    ceph_cluster.custom_config_file = test_data.get('custom-config-file')
    ceph_cluster.use_cdn = config.get('use_cdn')

    ceph_installer = ceph_cluster.get_ceph_object('installer')
    ansible_dir = '/usr/share/ceph-ansible'

    if config.get('skip_setup') is True:
        log.info("Skipping setup of ceph cluster")
        return 0

    # set pre-upgrade install version
    test_data['install_version'] = build
    log.info("Previous install version: {}".format(prev_install_version))

    # retrieve pre-upgrade versions and initialize container counts
    pre_upgrade_versions = get_ceph_versions(ceph_cluster.get_nodes(),
                                             containerized)
    pre_upgrade_container_counts = {}

    # setup packages based on build
    ceph_cluster.setup_packages(base_url, hotfix_repo, installer_url,
                                ubuntu_repo, build)

    # backup existing hosts file and ansible config
    ceph_installer.exec_command(
        cmd='cp {}/hosts /tmp/hosts'.format(ansible_dir))
    ceph_installer.exec_command(
        cmd='cp {}/group_vars/all.yml /tmp/all.yml'.format(ansible_dir))

    # update ceph-ansible
    ceph_installer.install_ceph_ansible(build, upgrade=True)

    # restore hosts file
    ceph_installer.exec_command(
        sudo=True, cmd='cp /tmp/hosts {}/hosts'.format(ansible_dir))

    # If upgrading from version 2 update hosts file with mgrs
    if prev_install_version.startswith('2') and build.startswith('3'):
        collocate_mons_with_mgrs(ceph_cluster, ansible_dir)

    # configure fetch directory path
    if config.get('ansi_config').get('fetch_directory') is None:
        config['ansi_config']['fetch_directory'] = '~/fetch/'

    # set the docker image tag if necessary
    if containerized and config.get('ansi_config').get(
            'docker-insecure-registry'):
        config['ansi_config'][
            'ceph_docker_image_tag'] = get_latest_container_image_tag(build)
    log.info("gvar: {}".format(config.get('ansi_config')))
    gvar = yaml.dump(config.get('ansi_config'), default_flow_style=False)

    # create all.yml
    log.info("global vars {}".format(gvar))
    gvars_file = ceph_installer.write_file(
        sudo=True,
        file_name='{}/group_vars/all.yml'.format(ansible_dir),
        file_mode='w')
    gvars_file.write(gvar)
    gvars_file.flush()

    # retrieve container count if containerized
    if containerized:
        pre_upgrade_container_counts = get_container_counts(ceph_cluster)

    # configure insecure registry if necessary
    if config.get('docker-insecure-registry'):
        ceph_cluster.setup_insecure_registry()

    # copy rolling update from infrastructure playbook
    jewel_minor_update = build.startswith('2')
    if build.startswith('4'):
        cmd = 'cd {};' \
              'ANSIBLE_STDOUT_CALLBACK=debug;' \
              'ansible-playbook -e ireallymeanit=yes -vvvv -i'\
              'hosts infrastructure-playbooks/rolling_update.yml'.format(ansible_dir)
    else:
        ceph_installer.exec_command(
            sudo=True,
            cmd='cd {} ; cp infrastructure-playbooks/rolling_update.yml .'.
            format(ansible_dir))
        cmd = 'cd {};' \
              'ANSIBLE_STDOUT_CALLBACK=debug;' \
              'ansible-playbook -e ireallymeanit=yes -vvvv -i hosts rolling_update.yml'.format(ansible_dir)
    if jewel_minor_update:
        cmd += " -e jewel_minor_update=true"
        log.info("Upgrade is jewel_minor_update, cmd: {cmd}".format(cmd=cmd))
    out, rc = ceph_installer.exec_command(cmd=cmd, long_running=True)

    if rc != 0:
        log.error("Failed during upgrade (rc = {})".format(rc))
        return rc

    # set build to new version
    log.info("Setting install_version to {build}".format(build=build))
    test_data['install_version'] = build
    ceph_cluster.rhcs_version = build

    # check if all mon's and osd's are in correct state
    num_osds = ceph_cluster.ceph_demon_stat['osd']
    num_mons = ceph_cluster.ceph_demon_stat['mon']
    test_data['ceph-ansible'] = {
        'num-osds': num_osds,
        'num-mons': num_mons,
        'rhbuild': build
    }

    # compare pre and post upgrade versions
    if skip_version_compare:
        log.warning("Skipping version comparison.")
    else:
        if not jewel_minor_update:
            post_upgrade_versions = get_ceph_versions(ceph_nodes,
                                                      containerized)
            version_compare_fail = compare_ceph_versions(
                pre_upgrade_versions, post_upgrade_versions)
            if version_compare_fail:
                return version_compare_fail

    # compare pre and post upgrade container counts
    if containerized:
        post_upgrade_container_counts = get_container_counts(ceph_cluster)
        container_count_fail = compare_container_counts(
            pre_upgrade_container_counts, post_upgrade_container_counts,
            prev_install_version)
        if container_count_fail:
            return container_count_fail
    return ceph_cluster.check_health(build, timeout=config.get('timeout', 300))
Exemplo n.º 4
0
def run(ceph_cluster, **kw):
    log.info("Running test")
    ceph_nodes = kw.get("ceph_nodes")
    log.info("Running ceph ansible test")
    config = kw.get("config")
    test_data = kw.get("test_data")
    prev_install_version = test_data["install_version"]
    skip_version_compare = config.get("skip_version_compare")
    containerized = config.get("ansi_config").get("containerized_deployment")
    build = config.get("build", config.get("rhbuild"))
    log.info("Build for upgrade: {build}".format(build=build))
    cluster_name = config.get("ansi_config").get("cluster")

    ubuntu_repo = config.get("ubuntu_repo")
    hotfix_repo = config.get("hotfix_repo")
    cloud_type = config.get("cloud-type", "openstack")
    base_url = config.get("base_url")
    installer_url = config.get("installer_url")
    config["ansi_config"]["public_network"] = get_public_network(ceph_nodes[0])

    ceph_cluster.ansible_config = config["ansi_config"]
    ceph_cluster.custom_config = test_data.get("custom-config")
    ceph_cluster.custom_config_file = test_data.get("custom-config-file")
    ceph_cluster.use_cdn = config.get("use_cdn")

    config["ansi_config"].update(
        set_container_info(ceph_cluster, config, ceph_cluster.use_cdn,
                           containerized))

    # Translate RGW node to ip address for Multisite
    rgw_pull_host = config["ansi_config"].get("rgw_pullhost")
    if rgw_pull_host:
        ceph_cluster.ansible_config["rgw_pullhost"] = translate_to_ip(
            kw["ceph_cluster_dict"], ceph_cluster.name, rgw_pull_host)

    ceph_installer = ceph_cluster.get_ceph_object("installer")
    ansible_dir = "/usr/share/ceph-ansible"

    if config.get("skip_setup") is True:
        log.info("Skipping setup of ceph cluster")
        return 0

    # set pre-upgrade install version
    test_data["install_version"] = build
    log.info("Previous install version: {}".format(prev_install_version))

    # retrieve pre-upgrade versions and initialize container counts
    pre_upgrade_versions = get_ceph_versions(ceph_cluster.get_nodes(),
                                             containerized)
    pre_upgrade_container_counts = {}

    # setup packages based on build
    ceph_cluster.setup_packages(base_url, hotfix_repo, installer_url,
                                ubuntu_repo, build, cloud_type)

    # backup existing hosts file and ansible config
    ceph_installer.exec_command(
        cmd="cp {}/hosts /tmp/hosts".format(ansible_dir))
    ceph_installer.exec_command(
        cmd="cp {}/group_vars/all.yml /tmp/all.yml".format(ansible_dir))

    # update ceph-ansible
    ceph_installer.install_ceph_ansible(build, upgrade=True)

    # restore hosts file
    ceph_installer.exec_command(
        sudo=True, cmd="cp /tmp/hosts {}/hosts".format(ansible_dir))

    # If upgrading from version 2 update hosts file with mgrs
    if prev_install_version.startswith("2") and build.startswith("3"):
        collocate_mons_with_mgrs(ceph_cluster, ansible_dir)

    # configure fetch directory path
    if config.get("ansi_config").get("fetch_directory") is None:
        config["ansi_config"]["fetch_directory"] = "~/fetch/"

    # set the docker image tag if necessary
    if containerized and config.get("ansi_config").get(
            "docker-insecure-registry"):
        config["ansi_config"][
            "ceph_docker_image_tag"] = get_latest_container_image_tag(build)
    log.info("gvar: {}".format(config.get("ansi_config")))
    gvar = yaml.dump(config.get("ansi_config"), default_flow_style=False)

    # create all.yml
    log.info("global vars {}".format(gvar))
    gvars_file = ceph_installer.remote_file(
        sudo=True,
        file_name="{}/group_vars/all.yml".format(ansible_dir),
        file_mode="w")
    gvars_file.write(gvar)
    gvars_file.flush()

    # retrieve container count if containerized
    if containerized:
        pre_upgrade_container_counts = get_container_counts(ceph_cluster)

    # configure insecure registry if necessary
    if config.get("docker-insecure-registry"):
        ceph_cluster.setup_insecure_registry()

    # copy rolling update from infrastructure playbook
    jewel_minor_update = build.startswith("2")
    if build.startswith("4") or build.startswith("5"):
        cmd = ("cd {};"
               "ANSIBLE_STDOUT_CALLBACK=debug;"
               "ansible-playbook -e ireallymeanit=yes -vvvv -i "
               "hosts infrastructure-playbooks/rolling_update.yml".format(
                   ansible_dir))
    else:
        ceph_installer.exec_command(
            sudo=True,
            cmd="cd {} ; cp infrastructure-playbooks/rolling_update.yml .".
            format(ansible_dir),
        )
        cmd = (
            "cd {};"
            "ANSIBLE_STDOUT_CALLBACK=debug;"
            "ansible-playbook -e ireallymeanit=yes -vvvv -i hosts rolling_update.yml"
            .format(ansible_dir))
    if jewel_minor_update:
        cmd += " -e jewel_minor_update=true"
        log.info("Upgrade is jewel_minor_update, cmd: {cmd}".format(cmd=cmd))

    if config.get("ansi_cli_args"):
        cmd += config_dict_to_string(config["ansi_cli_args"])

    out, rc = ceph_installer.exec_command(cmd=cmd, long_running=True)

    if rc != 0:
        log.error("Failed during upgrade (rc = {})".format(rc))
        return rc

    # set build to new version
    log.info("Setting install_version to {build}".format(build=build))
    test_data["install_version"] = build
    ceph_cluster.rhcs_version = build

    # check if all mon's and osd's are in correct state
    num_osds = ceph_cluster.ceph_demon_stat["osd"]
    num_mons = ceph_cluster.ceph_demon_stat["mon"]
    test_data["ceph-ansible"] = {
        "num-osds": num_osds,
        "num-mons": num_mons,
        "rhbuild": build,
    }

    # compare pre and post upgrade versions
    if skip_version_compare:
        log.warning("Skipping version comparison.")
    else:
        if not jewel_minor_update:
            post_upgrade_versions = get_ceph_versions(ceph_nodes,
                                                      containerized)
            version_compare_fail = compare_ceph_versions(
                pre_upgrade_versions, post_upgrade_versions)
            if version_compare_fail:
                return version_compare_fail

    # compare pre and post upgrade container counts
    if containerized:
        post_upgrade_container_counts = get_container_counts(ceph_cluster)
        container_count_fail = compare_container_counts(
            pre_upgrade_container_counts,
            post_upgrade_container_counts,
            prev_install_version,
        )
        if container_count_fail:
            return container_count_fail

    client = ceph_cluster.get_ceph_object("mon")

    if build.startswith("5"):

        cmd = ("cd {};"
               "ANSIBLE_STDOUT_CALLBACK=debug;"
               "ansible-playbook -e ireallymeanit=yes -vvvv -i "
               "hosts infrastructure-playbooks/cephadm-adopt.yml".format(
                   ansible_dir))
        out, rc = ceph_installer.exec_command(cmd=cmd, long_running=True)

        if rc != 0:
            log.error("Failed during cephadm adopt (rc = {})".format(rc))
            return rc

        client = ceph_cluster.get_nodes("mon")[0]

    return ceph_cluster.check_health(
        build,
        cluster_name=cluster_name,
        client=client,
        timeout=config.get("timeout", 300),
    )