コード例 #1
0
ファイル: test_utils.py プロジェクト: SrinivasaBharath/cephci
def test_get_cephci_config_raises(monkeypatch):
    """Test exception thrown when invalid file is provided."""
    def mock_return(x):
        return "./"

    monkeypatch.setattr(os.path, "expanduser", mock_return)
    with pytest.raises(IOError):
        get_cephci_config()
コード例 #2
0
ファイル: test_utils.py プロジェクト: udaysk23/cephci
def test_get_cephci_config_raises(mock_expanduser):
    """Test exception thrown when invalid file is provided."""
    try:
        mock_expanduser.return_value = "./"
        get_cephci_config()
    except IOError as exception:
        assert mock_expanduser.call_count == 1
        assert exception.errno == 2
コード例 #3
0
ファイル: install_prereq.py プロジェクト: yogesh-mane/cephci
def registry_login(ceph, distro_ver):
    """
    login to this registry 'registry.redhat.io' on all nodes
        docker for RHEL 7.x and podman for RHEL 8.x
    """
    cdn_cred = get_cephci_config().get("cdn_credentials")
    if not cdn_cred:
        log.warning("no cdn_credentials in ~/.cephci.yaml."
                    " Not logging into registry.redhat.io.")
        return
    user = cdn_cred.get("username")
    pwd = cdn_cred.get("password")
    if not (user and pwd):
        log.warning("username and password not found for cdn_credentials")
        return

    container = "docker"
    if distro_ver.startswith("8"):
        container = "podman"

    ceph.exec_command(cmd="sudo yum install -y {c}".format(c=container),
                      long_running=True)

    if container == "docker":
        ceph.exec_command(cmd="sudo systemctl restart docker",
                          long_running=True)

    ceph.exec_command(
        cmd="sudo {c} login -u {u} -p {p} registry.redhat.io".format(
            c=container, u=user, p=pwd),
        check_ec=True,
    )
コード例 #4
0
ファイル: install_prereq.py プロジェクト: jennkimerson/cephci
def registry_login(ceph, distro_ver):
    ''' login to this registry 'registry.redhat.io' on all nodes
        docker for RHEL 7.x and podman for RHEL 8.x'''
    cdn_cred = get_cephci_config().get('cdn_credentials')
    if not cdn_cred:
        log.warn('cdn_credentials not found')
        return
    user = cdn_cred.get('username')
    pwd = cdn_cred.get('password')
    if not (user and pwd):
        log.warn('username and password not found for cdn_credentials')
        return

    container = 'docker'
    if distro_ver.startswith('8'):
        container = 'podman'

    ceph.exec_command(cmd='sudo yum install -y {c}'.format(c=container),
                      long_running=True)

    if container == 'docker':
        ceph.exec_command(cmd='sudo systemctl restart docker',
                          long_running=True)

    ceph.exec_command(
        cmd='sudo {c} login -u {u} -p {p} registry.redhat.io'.format(
            c=container, u=user, p=pwd),
        check_ec=True)
コード例 #5
0
def run(ceph_cluster: Ceph, config: Dict, **kwargs) -> int:
    """
    Entry point for module execution.

    Args:
        ceph_cluster    The cluster participating in the test.
        config          Configuration passed to the test
        kwargs          Additional configurations passed to the test.

    Returns:
        0 on Success else 1

    Raises:
        CommandFailure

    Example:

        - test:
            abort-on-fail: false
            config:
              install:
                - agent
            desc: Install and configure vault agent
            module: install_vault.py
            name: install vault agent
    """
    if "agent" in config["install"]:
        vault_cfg = get_cephci_config().get("vault")
        _install_agent(ceph_cluster, vault_cfg)

        client = ceph_cluster.get_nodes(role="client")[0]
        _configure_rgw_daemons(client, vault_cfg)

    return 0
コード例 #6
0
    def __init__(self):
        """Initialize the instance using global configuration."""
        self._conf = get_cephci_config()["cos"]
        self._api_key = self._conf["api-key"]
        self._resource_id = self._conf["resource-id"]

        self.endpoint = self._conf["endpoint"]
        self.location_constraint = dict(
            {"LocationConstraint": self._conf["location-constraint"]})

        self.client = ibm_boto3.client(
            "s3",
            ibm_api_key_id=self._api_key,
            ibm_service_instance_id=self._resource_id,
            config=Config(signature_version="oauth"),
            endpoint_url=self.endpoint,
        )

        self.resource = ibm_boto3.resource(
            "s3",
            ibm_api_key_id=self._api_key,
            ibm_service_instance_id=self._resource_id,
            config=Config(signature_version="oauth"),
            endpoint_url=self.endpoint,
        )
コード例 #7
0
ファイル: test_utils.py プロジェクト: SrinivasaBharath/cephci
def test_get_cephi_config(monkeypatch, fixtures_dir):
    """Test loading of cephci configuration."""
    def mock_return(x):
        return fixtures_dir

    monkeypatch.setattr(os.path, "expanduser", mock_return)
    ceph_cfg = get_cephci_config()

    assert ceph_cfg.get("email", {}).get("address") == "*****@*****.**"
コード例 #8
0
def registry_login(ceph, distro_ver):
    """
    Login to the given Container registries provided in the configuration.

    In this method, docker or podman is installed based on OS.
    """
    container = "podman"
    if distro_ver.startswith("7"):
        container = "docker"

    ceph.exec_command(cmd="sudo yum install -y {c}".format(c=container),
                      long_running=True)

    if container == "docker":
        ceph.exec_command(cmd="sudo systemctl restart docker",
                          long_running=True)

    config = get_cephci_config()
    registries = [{
        "registry": "registry.redhat.io",
        "user": config["cdn_credentials"]["username"],
        "passwd": config["cdn_credentials"]["password"],
    }]

    if (config.get("registry_credentials")
            and config["registry_credentials"]["registry"] !=
            "registry.redhat.io"):
        registries.append({
            "registry":
            config["registry_credentials"]["registry"],
            "user":
            config["registry_credentials"]["username"],
            "passwd":
            config["registry_credentials"]["password"],
        })
    auths = {}
    for r in registries:
        b64_auth = base64.b64encode(
            f"{r['user']}:{r['passwd']}".encode("ascii"))
        auths[r["registry"]] = {"auth": b64_auth.decode("utf-8")}
    auths_dict = {"auths": auths}
    ceph.exec_command(sudo=True, cmd="mkdir -p ~/.docker")
    ceph.exec_command(cmd="mkdir -p ~/.docker")
    auths_file_sudo = ceph.remote_file(sudo=True,
                                       file_name="/root/.docker/config.json",
                                       file_mode="w")
    auths_file = ceph.remote_file(
        file_name="/home/cephuser/.docker/config.json", file_mode="w")
    files = [auths_file_sudo, auths_file]
    for file in files:
        file.write(json.dumps(auths_dict, indent=4))
        file.flush()
        file.close()
コード例 #9
0
ファイル: polarion.py プロジェクト: sidhant-agrawal/cephci
def post_to_polarion(tc):
    """
    Function to post test results polarion
    It returns nothing and is essentially like noop
    in case of no polarion details found in test object

    Args:
       tc: test case object with details

    Returns:
      None
    """
    current_dir = os.getcwd()
    polarion_cred = get_cephci_config()['polarion']

    if tc['polarion-id'] is not None:
        # add polarion attributes
        ids = tc['polarion-id'].split(',')
        tc['space'] = 'Smoke Suite'
        tc['test_run_id'] = tc['ceph-version'] + "_" + tc['suite-name'] + "_" + tc['distro'] + "_Automated_Smoke_Runs"
        tc['test_run_id'] = tc['test_run_id'].replace('.', '_')
        log.info("Updating test run: %s " % tc['test_run_id'])
        tc['ceph-build'] = '_'.join(
            [_f for _f in [tc['ceph-version'], tc['ceph-ansible-version'], tc['compose-id']] if _f])
        if tc.get('docker-containers-list'):
            tc['docker-container'] = '\ncontainer: {container}'.format(
                container=','.join(list(set(tc.get('docker-containers-list')))))
        tc['test_case_title'] = tc['desc']
        if tc['desc'] is None:
            log.info("cannot update polarion with no description")
            return
        if tc['status'] == "Pass":
            tc['result'] = ''
        else:
            tc['result'] = '<failure message="test failed" type="failure"/>'
        current_dir += '/templates/'
        j2_env = Environment(loader=FileSystemLoader(current_dir),
                             trim_blocks=True)
        for id in ids:
            tc['polarion-id'] = id
            f = NamedTemporaryFile(delete=False)
            test_results = j2_env.get_template('importer-template.xml').render(tc=tc)
            log.info("updating results for %s " % id)
            f.write(test_results.encode())
            f.close()
            url = polarion_cred.get('url')
            user = polarion_cred.get('username')
            pwd = polarion_cred.get('password')
            call(['curl', '-k', '-u',
                  '{user}:{pwd}'.format(user=user, pwd=pwd),
                  '-X', 'POST', '-F', 'file=@{name}'.format(name=f.name),
                  url])
            os.unlink(f.name)
コード例 #10
0
ファイル: ceph_admin.py プロジェクト: SrinivasaBharath/cephci
    def bootstrap(self):
        """
        Cephadm bootstrap
        """
        # Create and set permission to ceph directory
        self.installer.exec_command(cmd='sudo mkdir -p /etc/ceph')
        self.installer.exec_command(cmd='sudo chmod 777 /etc/ceph')

        # Execute bootstrap with MON ip-address
        # Construct bootstrap command
        # 1) Skip default mon, mgr & crash specs
        # 2) Skip automatic dashboard provisioning
        cdn_cred = get_cephci_config().get('cdn_credentials')

        cmd = 'sudo cephadm -v '
        if not self.registry and self.image:
            cmd += '--image {image} '.format(image=self.image)

        cmd += 'bootstrap ' \
               '--registry-url registry.redhat.io ' \
               '--registry-username {user} ' \
               '--registry-password {password} ' \
               '--orphan-initial-daemons ' \
               '--skip-monitoring-stack ' \
               '--mon-ip {mon_ip}'

        cmd = cmd.format(user=cdn_cred.get('username'),
                         password=cdn_cred.get('password'),
                         mon_ip=self.first_mon.ip_address)

        out, err = self.installer.exec_command(
            cmd=cmd,
            timeout=1800,
            check_ec=True,
        )

        logger.info("Bootstrap output : %s", out.read().decode())
        logger.error("Bootstrap error: %s", err.read().decode())

        if not self.image:
            self.get_image()
コード例 #11
0
def construct_registry(cls, registry: str, json_file: bool = False):
    """
    Construct registry credentials for bootstrapping cluster

    Args:
        cls (CephAdmin): class object
        registry (Str): registry name
        json_file (Bool): registry credentials in JSON file (default:False)

    Example::

        json_file:
            - False : Constructs registry credentials for bootstrap
            - True  : Creates file with registry name attached with it,
                      and saved as /tmp/<registry>.json file.

    Returns:
        constructed string of registry credentials ( Str )
    """
    # Todo: Retrieve credentials based on registry name
    _config = get_cephci_config()
    cdn_cred = _config.get("registry_credentials", _config["cdn_credentials"])
    reg_args = {
        "registry-url": cdn_cred.get("registry", registry),
        "registry-username": cdn_cred.get("username"),
        "registry-password": cdn_cred.get("password"),
    }
    if json_file:
        reg = dict((k.lstrip("registry-"), v) for k, v in reg_args.items())

        # Create file and return file_path
        temp_file = tempfile.NamedTemporaryFile(suffix=".json")
        reg_args = {"registry-json": temp_file.name}
        reg_file = cls.installer.node.remote_file(sudo=True,
                                                  file_name=temp_file.name,
                                                  file_mode="w")
        reg_file.write(json.dumps(reg, indent=4))
        reg_file.flush()

    return config_dict_to_string(reg_args)
コード例 #12
0
ファイル: install_prereq.py プロジェクト: yogesh-mane/cephci
def setup_subscription_manager(ceph, is_production=False, timeout=1800):
    timeout = datetime.timedelta(seconds=timeout)
    starttime = datetime.datetime.now()
    log.info("Subscribing {ip} host with {timeout} timeout".format(
        ip=ceph.ip_address, timeout=timeout))
    while True:
        try:
            config_ = get_cephci_config()
            command = "sudo subscription-manager --force register "
            if is_production:
                command += "--serverurl=subscription.rhsm.redhat.com:443/subscription "

                username_ = config_["cdn_credentials"]["username"]
                password_ = config_["cdn_credentials"]["password"]

            else:
                command += (
                    "--serverurl=subscription.rhsm.stage.redhat.com:443/subscription "
                )

                username_ = config_["stage_credentials"]["username"]
                password_ = config_["stage_credentials"]["password"]

            command += f"--baseurl=https://cdn.redhat.com --username={username_} --password={password_}"

            ceph.exec_command(
                cmd=command,
                timeout=720,
            )

            ceph.exec_command(
                cmd=
                "sudo subscription-manager attach --pool 8a99f9af795d57ab01797e572e860569",
                timeout=720,
            )
            break
        except (KeyError, AttributeError):
            raise RuntimeError(
                "Require the {} to be set in ~/.cephci.yaml, Please refer cephci.yaml.template"
                .format("cdn_credentials"
                        if is_production else "stage_credentails"))

        except BaseException:
            if datetime.datetime.now() - starttime > timeout:
                try:
                    out, err = ceph.exec_command(
                        cmd="cat /var/log/rhsm/rhsm.log", timeout=120)
                    rhsm_log = out.read().decode()
                except BaseException:
                    rhsm_log = "No Log Available"
                raise RuntimeError(
                    "Failed to subscribe {ip} with {timeout} timeout:\n {stack_trace}\n\n rhsm.log:\n{log}"
                    .format(
                        ip=ceph.ip_address,
                        timeout=timeout,
                        stack_trace=traceback.format_exc(),
                        log=rhsm_log,
                    ))
            else:
                wait = iter(x for x in itertools.count(1, 10))
                time.sleep(next(wait))
    ceph.exec_command(cmd="sudo subscription-manager repos --disable=*",
                      long_running=True)
コード例 #13
0
def run(args):

    import urllib3

    urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

    # Mandatory arguments
    rhbuild = args["--rhbuild"]
    suite_files = args["--suite"]

    glb_file = args.get("--global-conf")
    if args.get("--cluster-conf"):
        glb_file = args["--cluster-conf"]

    # Deciders
    reuse = args.get("--reuse", None)
    cloud_type = args.get("--cloud", "openstack")

    # These are not mandatory options
    inventory_file = args.get("--inventory")
    osp_cred_file = args.get("--osp-cred")

    osp_cred = load_file(osp_cred_file) if osp_cred_file else dict()
    cleanup_name = args.get("--cleanup", None)

    version2 = args.get("--v2", False)
    ignore_latest_nightly_container = args.get("--ignore-latest-container",
                                               False)

    # Set log directory and get absolute path
    console_log_level = args.get("--log-level")
    log_directory = args.get("--log-dir")

    run_id = generate_unique_id(length=6)
    run_dir = create_run_dir(run_id, log_directory)
    metadata = TestMetaData(
        run_id=run_id,
        rhbuild=rhbuild,
        logstash=get_cephci_config().get("logstash", {}),
    )
    if log.config.get("logstash"):
        host = log.config["logstash"]["host"]
        port = log.config["logstash"]["port"]
        version = log.config["logstash"].get("version", 1)
        handler = logstash.TCPLogstashHandler(
            host=host,
            port=port,
            version=version,
        )
        handler.setLevel(log.log_level)
        root.addHandler(handler)

        server = f"tcp://{host}:{port}"
        log._logger.debug(f"Log events are also pushed to {server}")

    startup_log = os.path.join(run_dir, "startup.log")

    handler = logging.FileHandler(startup_log)
    handler.setLevel(logging.INFO)
    handler.setFormatter(formatter)
    root.addHandler(handler)

    if console_log_level:
        ch.setLevel(logging.getLevelName(console_log_level.upper()))

    log.info(f"Startup log location: {startup_log}")
    run_start_time = datetime.datetime.now()
    trigger_user = getuser()

    platform = None
    build = None
    base_url = None
    ubuntu_repo = None
    docker_registry = None
    docker_image = None
    docker_tag = None

    ceph_name = None
    compose_id = None

    if cleanup_name and not osp_cred:
        raise Exception("Need cloud credentials to perform cleanup.")

    if cleanup_name:
        if cloud_type == "openstack":
            cleanup_ceph_nodes(osp_cred, cleanup_name)
        elif cloud_type == "ibmc":
            cleanup_ibmc_ceph_nodes(osp_cred, cleanup_name)
        else:
            log.warning("Unknown cloud type.")

        return 0

    if glb_file is None and not reuse:
        raise Exception("Unable to gather information about cluster layout.")

    if osp_cred_file is None and not reuse and cloud_type in [
            "openstack", "ibmc"
    ]:
        raise Exception("Require cloud credentials to create cluster.")

    if inventory_file is None and not reuse and cloud_type in [
            "openstack", "ibmc"
    ]:
        raise Exception(
            "Require system configuration information to provision.")

    if not version2:
        # Get ceph cluster version name
        with open("rhbuild.yaml") as fd:
            rhbuild_file = yaml.safe_load(fd)

        ceph = rhbuild_file["ceph"]
        rhbuild_ = None
        try:
            ceph_name, rhbuild_ = next(
                filter(
                    lambda x: x,
                    [(ceph[x]["name"], x)
                     for x in ceph if x == rhbuild.split(".")[0]],
                ))
        except StopIteration:
            print(
                "\nERROR: Please provide correct RH build version, run exited."
            )
            sys.exit(1)

        # Get base-url
        composes = ceph[rhbuild_]["composes"]
        if not base_url:

            if rhbuild in composes:
                base_url = composes[rhbuild or "latest"]["base_url"]

        # Get ubuntu-repo
        if not ubuntu_repo and rhbuild.startswith("3"):
            if rhbuild in composes:
                ubuntu_repo = composes[rhbuild or "latest"]["ubuntu_repo"]

        if os.environ.get("TOOL") is not None:
            ci_message = json.loads(os.environ["CI_MESSAGE"])
            compose_id = ci_message["compose_id"]
            compose_url = ci_message["compose_url"] + "/"
            product_name = ci_message.get("product_name", None)
            product_version = ci_message.get("product_version", None)
            log.info("COMPOSE_URL = %s ", compose_url)

            if os.environ["TOOL"] == "pungi":
                # is a rhel compose
                log.info("trigger on CI RHEL Compose")
            elif os.environ["TOOL"] == "rhcephcompose":
                # is a ubuntu compose
                log.info("trigger on CI Ubuntu Compose")
                ubuntu_repo = compose_url
                log.info("using ubuntu repo" + ubuntu_repo)
            elif os.environ["TOOL"] == "bucko":
                # is a docker compose
                log.info("Trigger on CI Docker Compose")
                docker_registry, docker_tag = ci_message["repository"].split(
                    "/rh-osbs/rhceph:")
                docker_image = "rh-osbs/rhceph"
                log.info(
                    f"\nUsing docker registry from ci message: {docker_registry} \n"
                    f"Docker image: {docker_image}\nDocker tag:{docker_tag}")
                log.warning("Using Docker insecure registry setting")
                docker_insecure_registry = True

            if product_name == "ceph":
                # is a rhceph compose
                base_url = compose_url
                log.info("using base url" + base_url)

        if not os.environ.get("TOOL") and not ignore_latest_nightly_container:
            try:
                latest_container = get_latest_container(rhbuild)
            except ValueError:
                print(
                    "ERROR:No latest nightly container UMB msg at "
                    "/ceph/cephci-jenkins/latest-rhceph-container-info/ "
                    "specify using the cli args or use --ignore-latest-container"
                )
                sys.exit(1)
            docker_registry = (latest_container.get("docker_registry")
                               if not docker_registry else docker_registry)
            docker_image = (latest_container.get("docker_image")
                            if not docker_image else docker_image)
            docker_tag = (latest_container.get("docker_tag")
                          if not docker_tag else docker_tag)
            log.info(
                f"Using latest nightly docker image - {docker_registry}/{docker_image}:{docker_tag}"
            )
            docker_insecure_registry = True
            log.warning("Using Docker insecure registry setting")
    else:
        platform = args.get("--platform", "rhel-8")
        build = args.get("--build", "latest")

        if not platform:
            raise TestSetupFailure("please provide --platform [rhel-7|rhel-8]")

        if build != "released":
            base_url, docker_registry, docker_image, docker_tag = fetch_build_artifacts(
                build, rhbuild, platform)

    store = args.get("--store", False)

    base_url = args.get("--rhs-ceph-repo") or base_url
    ubuntu_repo = args.get("--ubuntu-repo") or ubuntu_repo
    docker_registry = args.get("--docker-registry") or docker_registry
    docker_image = args.get("--docker-image") or docker_image
    docker_tag = args.get("--docker-tag") or docker_tag
    kernel_repo = args.get("--kernel-repo", None)

    docker_insecure_registry = args.get("--insecure-registry", False)

    post_results = args.get("--post-results")
    skip_setup = args.get("--skip-cluster", False)
    skip_subscription = args.get("--skip-subscription", False)
    post_to_report_portal = args.get("--report-portal", False)
    rp_logger = ReportPortal()

    instances_name = args.get("--instances-name")
    if instances_name:
        instances_name = instances_name.replace(".", "-")

    osp_image = args.get("--osp-image")
    filestore = args.get("--filestore", False)
    ec_pool_vals = args.get("--use-ec-pool", None)
    skip_version_compare = args.get("--skip-version-compare", False)
    custom_config = args.get("--custom-config")
    custom_config_file = args.get("--custom-config-file")
    xunit_results = args.get("--xunit-results", False)

    enable_eus = args.get("--enable-eus", False)
    skip_enabling_rhel_rpms = args.get("--skip-enabling-rhel-rpms", False)

    # load config, suite and inventory yaml files
    conf = load_file(glb_file)
    suite = init_suite.load_suites(suite_files)

    cli_arguments = f"{sys.executable} {' '.join(sys.argv)}"
    log.info(f"The CLI for the current run :\n{cli_arguments}\n")
    log.info(f"RPM Compose source - {base_url}")
    log.info(
        f"Red Hat Ceph Image used - {docker_registry}/{docker_image}:{docker_tag}"
    )

    ceph_version = []
    ceph_ansible_version = []
    distro = []
    clients = []

    inventory = None
    image_name = None
    if inventory_file:
        inventory = load_file(inventory_file)

        if osp_image and inventory.get("instance", {}).get("create"):
            inventory.get("instance").get("create").update(
                {"image-name": osp_image})

        image_name = inventory.get("instance", {}).get("create",
                                                       {}).get("image-name")

        if inventory.get("instance", {}).get("create"):
            distro.append(
                inventory.get("instance").get("create").get("image-name"))

    for cluster in conf.get("globals"):

        if cluster.get("ceph-cluster").get("inventory"):
            cluster_inventory_path = os.path.abspath(
                cluster.get("ceph-cluster").get("inventory"))
            with open(cluster_inventory_path, "r") as inventory_stream:
                cluster_inventory = yaml.safe_load(inventory_stream)
            image_name = (cluster_inventory.get("instance").get("create").get(
                "image-name"))
            distro.append(image_name.replace(".iso", ""))

        # get COMPOSE ID and ceph version
        if build not in ["released", "cvp"]:
            if cloud_type == "openstack" or cloud_type == "baremetal":
                resp = requests.get(base_url + "/COMPOSE_ID", verify=False)
                compose_id = resp.text
            elif cloud_type == "ibmc":
                compose_id = "UNKNOWN"

            if "rhel" == inventory.get("id"):
                if cloud_type == "ibmc":
                    ceph_pkgs = requests.get(base_url + "/Tools/Packages/",
                                             verify=False)
                elif cloud_type == "openstack" or cloud_type == "baremetal":
                    ceph_pkgs = requests.get(
                        base_url + "/compose/Tools/x86_64/os/Packages/",
                        verify=False)
                m = re.search(r"ceph-common-(.*?).x86", ceph_pkgs.text)
                ceph_version.append(m.group(1))
                m = re.search(r"ceph-ansible-(.*?).rpm", ceph_pkgs.text)
                ceph_ansible_version.append(m.group(1))
                log.info("Compose id is: " + compose_id)
            else:
                ubuntu_pkgs = requests.get(
                    ubuntu_repo +
                    "/Tools/dists/xenial/main/binary-amd64/Packages")
                m = re.search(r"ceph\nVersion: (.*)", ubuntu_pkgs.text)
                ceph_version.append(m.group(1))
                m = re.search(r"ceph-ansible\nVersion: (.*)", ubuntu_pkgs.text)
                ceph_ansible_version.append(m.group(1))

    distro = ",".join(list(set(distro)))
    ceph_version = ", ".join(list(set(ceph_version)))
    ceph_ansible_version = ", ".join(list(set(ceph_ansible_version)))
    metadata["rhcs"] = ceph_version
    log.info("Testing Ceph Version: " + ceph_version)
    log.info("Testing Ceph Ansible Version: " + ceph_ansible_version)

    service = None
    suite_name = "::".join(suite_files)
    if post_to_report_portal:
        log.info("Creating report portal session")

        # Only the first file is considered for launch description.
        suite_file_name = suite_name.split("::")[0].split("/")[-1]
        suite_file_name = suite_file_name.strip(".yaml")
        suite_file_name = " ".join(suite_file_name.split("_"))
        _log = run_dir.replace("/ceph/", "http://magna002.ceph.redhat.com/")

        launch_name = f"RHCS {rhbuild} - {suite_file_name}"
        launch_desc = textwrap.dedent("""
            ceph version: {ceph_version}
            ceph-ansible version: {ceph_ansible_version}
            compose-id: {compose_id}
            invoked-by: {user}
            log-location: {_log}
            """.format(
            ceph_version=ceph_version,
            ceph_ansible_version=ceph_ansible_version,
            user=getuser(),
            compose_id=compose_id,
            _log=_log,
        ))
        if docker_image and docker_registry and docker_tag:
            launch_desc = launch_desc + textwrap.dedent("""
                docker registry: {docker_registry}
                docker image: {docker_image}
                docker tag: {docker_tag}
                invoked-by: {user}
                """.format(
                docker_registry=docker_registry,
                docker_image=docker_image,
                user=getuser(),
                docker_tag=docker_tag,
            ))

        qe_tier = get_tier_level(suite_name)
        attributes = dict({
            "rhcs":
            rhbuild,
            "tier":
            qe_tier,
            "ceph_version":
            ceph_version,
            "os":
            platform if platform else "-".join(rhbuild.split("-")[1:]),
        })

        rp_logger.start_launch(name=launch_name,
                               description=launch_desc,
                               attributes=attributes)

    def fetch_test_details(var) -> dict:
        """
        Accepts the test and then provides the parameters of that test as a list.

        :param var: the test collected from the suite file
        :return: Returns a dictionary of the various test params
        """
        details = dict()
        details["docker-containers-list"] = []
        details["name"] = var.get("name")
        details["desc"] = var.get("desc")
        details["file"] = var.get("module")
        details["cli_arguments"] = cli_arguments
        details["polarion-id"] = var.get("polarion-id")
        polarion_default_url = "https://polarion.engineering.redhat.com/polarion/#/project/CEPH/workitem?id="
        details["polarion-id-link"] = "{}{}".format(polarion_default_url,
                                                    details["polarion-id"])
        details["rhbuild"] = rhbuild
        details["cloud-type"] = cloud_type
        details["ceph-version"] = ceph_version
        details["ceph-ansible-version"] = ceph_ansible_version
        details["compose-id"] = compose_id
        details["distro"] = distro
        details["suite-name"] = suite_name
        details["suite-file"] = suite_files
        details["conf-file"] = glb_file
        details["ceph-version-name"] = ceph_name
        details["duration"] = "0s"
        details["status"] = "Not Executed"
        details["comments"] = var.get("comments", str())
        return details

    if reuse is None:
        try:
            ceph_cluster_dict, clients = create_nodes(
                conf,
                inventory,
                osp_cred,
                run_id,
                cloud_type,
                service,
                instances_name,
                enable_eus=enable_eus,
                rp_logger=rp_logger,
            )
        except Exception as err:
            log.error(err)
            tests = suite.get("tests")
            res = []
            for test in tests:
                test = test.get("test")
                tmp = fetch_test_details(test)
                res.append(tmp)
            run_end_time = datetime.datetime.now()
            duration = divmod((run_end_time - run_start_time).total_seconds(),
                              60)
            total_time = {
                "start": run_start_time.strftime("%d %B %Y , %I:%M:%S %p"),
                "end": run_end_time.strftime("%d %B %Y , %I:%M:%S %p"),
                "total": f"{int(duration[0])} mins, {int(duration[1])} secs",
            }
            send_to_cephci = post_results or post_to_report_portal
            info = {
                "status": "Fail",
                "trace": (traceback.format_exc(limit=2)).split("\n"),
            }
            test_res = {
                "result": res,
                "run_id": run_id,
                "trigger_user": trigger_user,
                "run_directory": run_dir,
                "total_time": total_time,
                "info": info,
                "send_to_cephci": send_to_cephci,
            }
            email_results(test_result=test_res)
            return 1
    else:
        ceph_store_nodes = open(reuse, "rb")
        ceph_cluster_dict = pickle.load(ceph_store_nodes)
        ceph_store_nodes.close()
        for cluster_name, cluster in ceph_cluster_dict.items():
            for node in cluster:
                node.reconnect()
    if store:
        ceph_clusters_file = f"rerun/{instances_name}-{run_id}"
        if not os.path.exists(os.path.dirname(ceph_clusters_file)):
            os.makedirs(os.path.dirname(ceph_clusters_file))
        store_cluster_state(ceph_cluster_dict, ceph_clusters_file)

    sys.path.append(os.path.abspath("tests"))
    sys.path.append(os.path.abspath("tests/rados"))
    sys.path.append(os.path.abspath("tests/cephadm"))
    sys.path.append(os.path.abspath("tests/rbd"))
    sys.path.append(os.path.abspath("tests/rbd_mirror"))
    sys.path.append(os.path.abspath("tests/cephfs"))
    sys.path.append(os.path.abspath("tests/iscsi"))
    sys.path.append(os.path.abspath("tests/rgw"))
    sys.path.append(os.path.abspath("tests/ceph_ansible"))
    sys.path.append(os.path.abspath("tests/ceph_installer"))
    sys.path.append(os.path.abspath("tests/mgr"))
    sys.path.append(os.path.abspath("tests/dashboard"))
    sys.path.append(os.path.abspath("tests/misc_env"))
    sys.path.append(os.path.abspath("tests/parallel"))
    sys.path.append(os.path.abspath("tests/upgrades"))

    tests = suite.get("tests")
    tcs = []
    jenkins_rc = 0
    # use ceph_test_data to pass around dynamic data between tests
    ceph_test_data = dict()
    ceph_test_data["custom-config"] = custom_config
    ceph_test_data["custom-config-file"] = custom_config_file

    # Initialize test return code
    rc = 0

    for test in tests:
        test = test.get("test")
        parallel = test.get("parallel")
        tc = fetch_test_details(test)
        test_file = tc["file"]
        report_portal_description = tc["desc"] or ""
        unique_test_name = create_unique_test_name(tc["name"], test_names)
        test_names.append(unique_test_name)
        tc["log-link"] = configure_logger(unique_test_name, run_dir)
        mod_file_name = os.path.splitext(test_file)[0]
        test_mod = importlib.import_module(mod_file_name)
        print("\nRunning test: {test_name}".format(test_name=tc["name"]))

        if tc.get("log-link"):
            print("Test logfile location: {log_url}".format(
                log_url=tc["log-link"]))
        log.info(f"Running test {test_file}")
        # log.info("Running test %s", test_file)
        start = datetime.datetime.now()
        for cluster_name in test.get("clusters", ceph_cluster_dict):
            if test.get("clusters"):
                config = test.get("clusters").get(cluster_name).get(
                    "config", {})
            else:
                config = test.get("config", {})

            if not config.get("base_url"):
                config["base_url"] = base_url

            config[
                "rhbuild"] = f"{rhbuild}-{platform}" if version2 else rhbuild
            config["cloud-type"] = cloud_type
            if "ubuntu_repo" in locals():
                config["ubuntu_repo"] = ubuntu_repo

            if skip_setup is True:
                config["skip_setup"] = True

            if skip_subscription is True:
                config["skip_subscription"] = True

            if args.get("--add-repo"):
                repo = args.get("--add-repo")
                if repo.startswith("http"):
                    config["add-repo"] = repo

            config["build_type"] = build
            config["enable_eus"] = enable_eus
            config["skip_enabling_rhel_rpms"] = skip_enabling_rhel_rpms
            config["docker-insecure-registry"] = docker_insecure_registry
            config["skip_version_compare"] = skip_version_compare
            config["container_image"] = "%s/%s:%s" % (
                docker_registry,
                docker_image,
                docker_tag,
            )

            config["ceph_docker_registry"] = docker_registry
            report_portal_description += f"docker registry: {docker_registry}"
            config["ceph_docker_image"] = docker_image
            report_portal_description += f"docker image: {docker_image}"
            config["ceph_docker_image_tag"] = docker_tag
            report_portal_description += f"docker registry: {docker_registry}"

            if filestore:
                config["filestore"] = filestore

            if ec_pool_vals:
                config["ec-pool-k-m"] = ec_pool_vals

            if args.get("--hotfix-repo"):
                hotfix_repo = args.get("--hotfix-repo")
                if hotfix_repo.startswith("http"):
                    config["hotfix_repo"] = hotfix_repo

            if kernel_repo is not None:
                config["kernel-repo"] = kernel_repo

            if osp_cred:
                config["osp_cred"] = osp_cred

            # if Kernel Repo is defined in ENV then set the value in config
            if os.environ.get("KERNEL-REPO-URL") is not None:
                config["kernel-repo"] = os.environ.get("KERNEL-REPO-URL")
            try:
                if post_to_report_portal:
                    rp_logger.start_test_item(
                        name=unique_test_name,
                        description=report_portal_description,
                        item_type="STEP",
                    )
                    rp_logger.log(
                        message=f"Logfile location - {tc['log-link']}")
                    rp_logger.log(message=f"Polarion ID: {tc['polarion-id']}")

                # Initialize the cluster with the expected rhcs_version hence the
                # precedence would be from test suite.
                # rhbuild would start with the version for example 5.0 or 4.2-rhel-7
                _rhcs_version = test.get("ceph_rhcs_version", rhbuild[:3])
                ceph_cluster_dict[cluster_name].rhcs_version = _rhcs_version

                rc = test_mod.run(
                    ceph_cluster=ceph_cluster_dict[cluster_name],
                    ceph_nodes=ceph_cluster_dict[cluster_name],
                    config=config,
                    parallel=parallel,
                    test_data=ceph_test_data,
                    ceph_cluster_dict=ceph_cluster_dict,
                    clients=clients,
                )
            except BaseException:  # noqa
                if post_to_report_portal:
                    rp_logger.log(message=traceback.format_exc(),
                                  level="ERROR")

                log.error(traceback.format_exc())
                rc = 1
            finally:
                collect_recipe(ceph_cluster_dict[cluster_name])
                if store:
                    store_cluster_state(ceph_cluster_dict, ceph_clusters_file)

            if rc != 0:
                break

        elapsed = datetime.datetime.now() - start
        tc["duration"] = elapsed

        # Write to report portal
        if post_to_report_portal:
            rp_logger.finish_test_item(
                status="PASSED" if rc == 0 else "FAILED")

        if rc == 0:
            tc["status"] = "Pass"
            msg = "Test {} passed".format(test_mod)
            log.info(msg)
            print(msg)

            if post_results:
                post_to_polarion(tc=tc)
        else:
            tc["status"] = "Failed"
            msg = "Test {} failed".format(test_mod)
            log.info(msg)
            print(msg)
            jenkins_rc = 1

            if post_results:
                post_to_polarion(tc=tc)

            if test.get("abort-on-fail", False):
                log.info("Aborting on test failure")
                tcs.append(tc)
                break

        if test.get("destroy-cluster") is True:
            if cloud_type == "openstack":
                cleanup_ceph_nodes(osp_cred, instances_name)
            elif cloud_type == "ibmc":
                cleanup_ibmc_ceph_nodes(osp_cred, instances_name)

        if test.get("recreate-cluster") is True:
            ceph_cluster_dict, clients = create_nodes(
                conf,
                inventory,
                osp_cred,
                run_id,
                cloud_type,
                service,
                instances_name,
                enable_eus=enable_eus,
            )
        tcs.append(tc)

    url_base = (magna_url + run_dir.split("/")[-1]
                if "/ceph/cephci-jenkins" in run_dir else run_dir)
    log.info("\nAll test logs located here: {base}".format(base=url_base))
    close_and_remove_filehandlers()

    test_run_metadata = {
        "build": rhbuild,
        "polarion-project-id": "CEPH",
        "suite-name": suite_name,
        "distro": distro,
        "ceph-version": ceph_version,
        "ceph-ansible-version": ceph_ansible_version,
        "base_url": base_url,
        "container-registry": docker_registry,
        "container-image": docker_image,
        "container-tag": docker_tag,
        "compose-id": compose_id,
        "log-dir": run_dir,
        "run-id": run_id,
    }

    if post_to_report_portal:
        rp_logger.finish_launch()

    if xunit_results:
        create_xunit_results(suite_name, tcs, test_run_metadata)

    print("\nAll test logs located here: {base}".format(base=url_base))
    print_results(tcs)
    send_to_cephci = post_results or post_to_report_portal
    run_end_time = datetime.datetime.now()
    duration = divmod((run_end_time - run_start_time).total_seconds(), 60)
    total_time = {
        "start": run_start_time.strftime("%d %B %Y , %I:%M:%S %p"),
        "end": run_end_time.strftime("%d %B %Y , %I:%M:%S %p"),
        "total": f"{int(duration[0])} mins, {int(duration[1])} secs",
    }
    info = {"status": "Pass"}
    test_res = {
        "result": tcs,
        "run_id": run_id,
        "trigger_user": trigger_user,
        "run_directory": run_dir,
        "total_time": total_time,
        "info": info,
        "send_to_cephci": send_to_cephci,
    }

    email_results(test_result=test_res)

    return jenkins_rc
コード例 #14
0
def setup_subscription_manager(ceph,
                               is_production=False,
                               cloud_type="openstack",
                               timeout=1800):
    timeout = datetime.timedelta(seconds=timeout)
    starttime = datetime.datetime.now()
    log.info("Subscribing {ip} host with {timeout} timeout".format(
        ip=ceph.ip_address, timeout=timeout))
    while True:
        try:
            # subscription-manager tips:
            #
            # "--serverurl" (optional) is the entitlement service. The default
            # server (production) has customer-facing entitlement and SKU
            # information. The "stage" server has QE-only entitlement data.
            # We use Red Hat's internal "Ethel" tool to add SKUs to the
            # "rhcsuser" account that only exists in stage.
            #
            # "--baseurl" (optional) is the RPM content host. The default
            # value is the production CDN (cdn.redhat.com), and this hosts the
            # RPM contents to which all customers have access. Alternatively
            # you can push content to the staging CDN through the Errata Tool,
            # and then test it with --baseurl=cdn.stage.redhat.com.
            config_ = get_cephci_config()
            command = "sudo subscription-manager register --force "
            if is_production or cloud_type.startswith("ibmc"):
                command += "--serverurl=subscription.rhsm.redhat.com:443/subscription "
                username_ = config_["cdn_credentials"]["username"]
                password_ = config_["cdn_credentials"]["password"]
                pool_id = "8a85f99a7db4827d017dc5134ff800ba"

            else:
                command += (
                    "--serverurl=subscription.rhsm.stage.redhat.com:443/subscription "
                )
                username_ = config_["stage_credentials"]["username"]
                password_ = config_["stage_credentials"]["password"]
                pool_id = "8a99f9af795d57ab01797e572e860569"

            command += f"--baseurl=https://cdn.redhat.com --username={username_}"
            command += f" --password={password_}"

            ceph.exec_command(cmd=command, timeout=720, long_running=True)

            ceph.exec_command(
                cmd=f"sudo subscription-manager attach --pool {pool_id}",
                timeout=720,
                long_running=True,
            )
            break
        except (KeyError, AttributeError):
            required_key = "stage_credentials"
            if is_production or cloud_type.startswith("ibmc"):
                required_key = "cdn_credentials"

            raise RuntimeError(
                f"Require the {required_key} to be set in ~/.cephci.yaml, "
                "Please refer cephci.yaml.template")
        except BaseException:  # noqa
            if datetime.datetime.now() - starttime > timeout:
                try:
                    rhsm_log, err = ceph.exec_command(
                        cmd="cat /var/log/rhsm/rhsm.log", timeout=120)
                except BaseException:  # noqa
                    rhsm_log = "No Log Available"
                raise RuntimeError(
                    "Failed to subscribe {ip} with {timeout} timeout:"
                    "\n {stack_trace}\n\n rhsm.log:\n{log}".format(
                        ip=ceph.ip_address,
                        timeout=timeout,
                        stack_trace=traceback.format_exc(),
                        log=rhsm_log,
                    ))
            else:
                wait = iter(x for x in itertools.count(1, 10))
                time.sleep(next(wait))
    ceph.exec_command(cmd="sudo subscription-manager repos --disable=*",
                      long_running=True)
コード例 #15
0
ファイル: polarion.py プロジェクト: yogesh-mane/cephci
def post_to_polarion(tc):
    """
    Function to post test results polarion
    It returns nothing and is essentially like noop
    in case of no polarion details found in test object

    Args:
       tc: test case object with details

    Returns:
      None
    """
    current_dir = os.getcwd()
    polarion_cred = get_cephci_config()["polarion"]

    if tc["polarion-id"] is not None:
        # add polarion attributes
        ids = tc["polarion-id"].split(",")
        tc["space"] = "Smoke Suite"
        tc["test_run_id"] = (tc["ceph-version"] + "_" + tc["suite-name"] +
                             "_" + tc["distro"] + "_Automated_Smoke_Runs")
        tc["test_run_id"] = tc["test_run_id"].replace(".", "_")
        log.info("Updating test run: %s " % tc["test_run_id"])
        tc["ceph-build"] = "_".join([
            _f for _f in [
                tc["ceph-version"],
                tc["ceph-ansible-version"],
                tc["compose-id"],
            ] if _f
        ])
        if tc.get("docker-containers-list"):
            tc["docker-container"] = "\ncontainer: {container}".format(
                container=",".join(list(set(tc.get(
                    "docker-containers-list")))))
        tc["test_case_title"] = tc["desc"]
        if tc["desc"] is None:
            log.info("cannot update polarion with no description")
            return
        if tc["status"] == "Pass":
            tc["result"] = ""
        else:
            tc["result"] = '<failure message="test failed" type="failure"/>'
        current_dir += "/templates/"
        j2_env = Environment(loader=FileSystemLoader(current_dir),
                             trim_blocks=True)
        for id in ids:
            tc["polarion-id"] = id
            f = NamedTemporaryFile(delete=False)
            test_results = j2_env.get_template("importer-template.xml").render(
                tc=tc)
            log.info("updating results for %s " % id)
            f.write(test_results.encode())
            f.close()
            url = polarion_cred.get("url")
            user = polarion_cred.get("username")
            pwd = polarion_cred.get("password")
            call([
                "curl",
                "-k",
                "-u",
                "{user}:{pwd}".format(user=user, pwd=pwd),
                "-X",
                "POST",
                "-F",
                "file=@{name}".format(name=f.name),
                url,
            ])
            os.unlink(f.name)
コード例 #16
0
    def bootstrap(self: CephAdmProtocol, config: Dict) -> None:
        """
        Execute cephadm bootstrap with the passed kwargs on the installer node.

        Bootstrap involves,
          - Creates /etc/ceph directory with permissions
          - CLI creation with bootstrap options with custom/default image
          - Execution of bootstrap command

        Args:
            config: Key/value pairs passed from the test case.

        Example:
            config:
                command: bootstrap
                base_cmd_args:
                    verbose: true
                args:
                    custom_image: true | false
                    mon-ip: <node_name>
                    mgr-id: <mgr_id>
                    fsid: <id>
        """
        self.cluster.setup_ssh_keys()
        self.set_tool_repo()
        self.install()

        cdn_cred = get_cephci_config().get("cdn_credentials")
        cmd = "cephadm"

        if config.get("base_cmd_args"):
            cmd += config_dict_to_string(config["base_cmd_args"])

        args = config.get("args")
        custom_image = args.pop("custom_image", True)

        if custom_image:
            cmd += f" --image {self.config['container_image']}"

        cmd += " bootstrap"
        custom_image_args = (" --registry-url registry.redhat.io"
                             " --registry-username {user}"
                             " --registry-password {password}")
        cmd += custom_image_args.format(
            user=cdn_cred.get("username"),
            password=cdn_cred.get("password"),
        )

        # To be generic, the mon-ip contains the global node name. Here, we replace the
        # name with the IP address. The replacement allows us to be inline with the
        # CLI option.

        # Todo: need to switch installer node on any other node name provided
        #       other than installer node
        mon_node = args.pop("mon-ip", self.installer.node.shortname)
        if mon_node:
            for node in self.cluster.get_nodes():
                if mon_node in node.shortname:
                    cmd += f" --mon-ip {node.ip_address}"
                    break
            else:
                raise ResourceNotFoundError(f"Unknown {mon_node} node name.")

        cmd += config_dict_to_string(args)

        out, err = self.installer.exec_command(
            sudo=True,
            cmd=cmd,
            timeout=1800,
            check_ec=True,
        )

        logger.info("Bootstrap output : %s", out.read().decode())
        logger.error("Bootstrap error: %s", err.read().decode())

        self.distribute_cephadm_gen_pub_key()