Exemple #1
0
def cluster(request, log_cli_level):
    """
    This fixture initiates deployment for both OCP and OCS clusters.
    Specific platform deployment classes will handle the fine details
    of action
    """
    log.info(f"All logs located at {ocsci_log_path()}")

    teardown = config.RUN['cli_params']['teardown']
    deploy = config.RUN['cli_params']['deploy']
    factory = dep_factory.DeploymentFactory()
    deployer = factory.get_deployment()

    # Add a finalizer to teardown the cluster after test execution is finished
    if teardown:

        def cluster_teardown_finalizer():
            deployer.destroy_cluster(log_cli_level)

        request.addfinalizer(cluster_teardown_finalizer)
        log.info("Will teardown cluster because --teardown was provided")

    # Download client
    force_download = (config.RUN['cli_params'].get('deploy')
                      and config.DEPLOYMENT['force_download_client'])
    get_openshift_client(force_download=force_download)

    if deploy:
        # Deploy cluster
        deployer.deploy_cluster(log_cli_level)
Exemple #2
0
    def set_kubeconfig(kubeconfig_path):
        """
        Export environment variable KUBECONFIG for future calls of OC commands
        or other API calls

        Args:
            kubeconfig_path (str): path to kubeconfig file to be exported

        Returns:
            boolean: True if successfully connected to cluster, False otherwise
        """
        # Test cluster access
        log.info("Testing access to cluster with %s", kubeconfig_path)
        if not os.path.isfile(kubeconfig_path):
            log.warning("The kubeconfig file %s doesn't exist!",
                        kubeconfig_path)
            return False
        os.environ["KUBECONFIG"] = kubeconfig_path
        if not which("oc"):
            get_openshift_client()
        try:
            run_cmd("oc cluster-info")
        except CommandFailed as ex:
            log.error("Cluster is not ready to use: %s", ex)
            return False
        log.info("Access to cluster is OK!")
        return True
Exemple #3
0
def cluster(request, log_cli_level):
    """
    This fixture initiates deployment for both OCP and OCS clusters.
    Specific platform deployment classes will handle the fine details
    of action
    """
    log.info(f"All logs located at {ocsci_log_path()}")

    teardown = config.RUN['cli_params']['teardown']
    deploy = config.RUN['cli_params']['deploy']
    factory = dep_factory.DeploymentFactory()
    deployer = factory.get_deployment()

    # Add a finalizer to teardown the cluster after test execution is finished
    if teardown:

        def cluster_teardown_finalizer():
            deployer.destroy_cluster(log_cli_level)

        request.addfinalizer(cluster_teardown_finalizer)
        log.info("Will teardown cluster because --teardown was provided")

    # Download client
    force_download = (config.RUN['cli_params'].get('deploy')
                      and config.DEPLOYMENT['force_download_client'])
    get_openshift_client(force_download=force_download)

    if deploy:
        # Deploy cluster
        deployer.deploy_cluster(log_cli_level)
        # Workaround for #1777384 - enable container_use_cephfs on RHEL workers
        ocp_obj = ocp.OCP()
        cmd = ['/usr/sbin/setsebool -P container_use_cephfs on']
        workers = get_typed_worker_nodes(os_id="rhel")
        for worker in workers:
            cmd_list = cmd.copy()
            node = worker.get().get('metadata').get('name')
            log.info(f"{node} is a RHEL based worker - applying '{cmd_list}'")
            ocp_obj.exec_oc_debug_cmd(node=node, cmd_list=cmd_list)
Exemple #4
0
def cluster(request):
    log.info(f"All logs located at {log_path}")
    log.info("Running OCS basic installation")
    cluster_path = config.ENV_DATA['cluster_path']
    deploy = config.RUN['cli_params']['deploy']
    teardown = config.RUN['cli_params']['teardown']
    # Add a finalizer to teardown the cluster after test execution is finished
    if teardown:
        request.addfinalizer(cluster_teardown)
        log.info("Will teardown cluster because --teardown was provided")
    # Test cluster access and if exist just skip the deployment.
    if is_cluster_running(cluster_path):
        log.info("The installation is skipped because the cluster is running")
        return
    elif teardown and not deploy:
        log.info("Attempting teardown of non-accessible cluster: %s",
                 cluster_path)
        return
    elif not deploy and not teardown:
        msg = "The given cluster can not be connected to: {}. ".format(
            cluster_path)
        msg += "Provide a valid --cluster-path or use --deploy to deploy a new cluster"
        pytest.fail(msg)
    elif not system.is_path_empty(cluster_path) and deploy:
        msg = "The given cluster path is not empty: {}. ".format(cluster_path)
        msg += "Provide an empty --cluster-path and --deploy to deploy a new cluster"
        pytest.fail(msg)
    else:
        log.info(
            "A testing cluster will be deployed and cluster information stored at: %s",
            cluster_path)

    # Generate install-config from template
    log.info("Generating install-config")
    pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")

    # TODO: check for supported platform and raise the exception if not
    # supported. Currently we support just AWS.

    _templating = templating.Templating()
    install_config_str = _templating.render_template("install-config.yaml.j2",
                                                     config.ENV_DATA)
    # Log the install config *before* adding the pull secret, so we don't leak
    # sensitive data.
    log.info(f"Install config: \n{install_config_str}")
    # Parse the rendered YAML so that we can manipulate the object directly
    install_config_obj = yaml.safe_load(install_config_str)
    with open(pull_secret_path, "r") as f:
        # Parse, then unparse, the JSON file.
        # We do this for two reasons: to ensure it is well-formatted, and
        # also to ensure it ends up as a single line.
        install_config_obj['pullSecret'] = json.dumps(json.loads(f.read()))
    install_config_str = yaml.safe_dump(install_config_obj)
    install_config = os.path.join(cluster_path, "install-config.yaml")
    with open(install_config, "w") as f:
        f.write(install_config_str)

    # Download installer
    installer = get_openshift_installer(config.DEPLOYMENT['installer_version'])
    # Download client
    get_openshift_client()

    # Deploy cluster
    log.info("Deploying cluster")
    run_cmd(f"{installer} create cluster "
            f"--dir {cluster_path} "
            f"--log-level debug")

    # Test cluster access
    if not OCP.set_kubeconfig(
            os.path.join(cluster_path, config.RUN.get('kubeconfig_location'))):
        pytest.fail("Cluster is not available!")

    # TODO: Create cluster object, add to config.ENV_DATA for other tests to
    # utilize.
    # Determine worker pattern and create ebs volumes
    with open(os.path.join(cluster_path, "terraform.tfvars")) as f:
        tfvars = json.load(f)

    cluster_id = tfvars['cluster_id']
    worker_pattern = f'{cluster_id}-worker*'
    log.info(f'Worker pattern: {worker_pattern}')
    create_ebs_volumes(worker_pattern, region_name=config.ENV_DATA['region'])

    # render templates and create resources
    create_oc_resource('common.yaml', cluster_path, _templating,
                       config.ENV_DATA)
    run_cmd(f'oc label namespace {config.ENV_DATA["cluster_namespace"]} '
            f'"openshift.io/cluster-monitoring=true"')
    run_cmd(f"oc policy add-role-to-user view "
            f"system:serviceaccount:openshift-monitoring:prometheus-k8s "
            f"-n {config.ENV_DATA['cluster_namespace']}")
    apply_oc_resource('csi-nodeplugin-rbac_rbd.yaml',
                      cluster_path,
                      _templating,
                      config.ENV_DATA,
                      template_dir="ocs-deployment/csi/rbd/")
    apply_oc_resource('csi-provisioner-rbac_rbd.yaml',
                      cluster_path,
                      _templating,
                      config.ENV_DATA,
                      template_dir="ocs-deployment/csi/rbd/")
    apply_oc_resource('csi-nodeplugin-rbac_cephfs.yaml',
                      cluster_path,
                      _templating,
                      config.ENV_DATA,
                      template_dir="ocs-deployment/csi/cephfs/")
    apply_oc_resource('csi-provisioner-rbac_cephfs.yaml',
                      cluster_path,
                      _templating,
                      config.ENV_DATA,
                      template_dir="ocs-deployment/csi/cephfs/")
    # Increased to 15 seconds as 10 is not enough
    # TODO: do the sampler function and check if resource exist
    wait_time = 15
    log.info(f"Waiting {wait_time} seconds...")
    time.sleep(wait_time)
    create_oc_resource('operator-openshift-with-csi.yaml', cluster_path,
                       _templating, config.ENV_DATA)
    log.info(f"Waiting {wait_time} seconds...")
    time.sleep(wait_time)
    run_cmd(f"oc wait --for condition=ready pod "
            f"-l app=rook-ceph-operator "
            f"-n {config.ENV_DATA['cluster_namespace']} "
            f"--timeout=120s")
    run_cmd(f"oc wait --for condition=ready pod "
            f"-l app=rook-discover "
            f"-n {config.ENV_DATA['cluster_namespace']} "
            f"--timeout=120s")
    create_oc_resource('cluster.yaml', cluster_path, _templating,
                       config.ENV_DATA)

    POD = ocp.OCP(kind=constants.POD,
                  namespace=config.ENV_DATA['cluster_namespace'])
    CFS = ocp.OCP(kind=constants.CEPHFILESYSTEM,
                  namespace=config.ENV_DATA['cluster_namespace'])

    # Check for the Running status of Ceph Pods
    run_cmd(f"oc wait --for condition=ready pod "
            f"-l app=rook-ceph-agent "
            f"-n {config.ENV_DATA['cluster_namespace']} "
            f"--timeout=120s")
    assert POD.wait_for_resource(condition='Running',
                                 selector='app=rook-ceph-mon',
                                 resource_count=3,
                                 timeout=600)
    assert POD.wait_for_resource(condition='Running',
                                 selector='app=rook-ceph-mgr',
                                 timeout=600)
    assert POD.wait_for_resource(condition='Running',
                                 selector='app=rook-ceph-osd',
                                 resource_count=3,
                                 timeout=600)

    create_oc_resource('toolbox.yaml', cluster_path, _templating,
                       config.ENV_DATA)
    log.info(f"Waiting {wait_time} seconds...")
    time.sleep(wait_time)
    create_oc_resource('storage-manifest.yaml', cluster_path, _templating,
                       config.ENV_DATA)
    create_oc_resource("service-monitor.yaml", cluster_path, _templating,
                       config.ENV_DATA)
    create_oc_resource("prometheus-rules.yaml", cluster_path, _templating,
                       config.ENV_DATA)
    log.info(f"Waiting {wait_time} seconds...")
    time.sleep(wait_time)

    # Create MDS pods for CephFileSystem
    fs_data = templating.load_yaml_to_dict(constants.CEPHFILESYSTEM_YAML)
    fs_data['metadata']['namespace'] = config.ENV_DATA['cluster_namespace']

    ceph_obj = OCS(**fs_data)
    ceph_obj.create()
    assert POD.wait_for_resource(condition=constants.STATUS_RUNNING,
                                 selector='app=rook-ceph-mds',
                                 resource_count=2,
                                 timeout=600)

    # Check for CephFilesystem creation in ocp
    cfs_data = CFS.get()
    cfs_name = cfs_data['items'][0]['metadata']['name']

    if helpers.validate_cephfilesystem(cfs_name):
        log.info(f"MDS deployment is successful!")
        defaults.CEPHFILESYSTEM_NAME = cfs_name
    else:
        log.error(f"MDS deployment Failed! Please check logs!")

    # Verify health of ceph cluster
    # TODO: move destroy cluster logic to new CLI usage pattern?
    log.info("Done creating rook resources, waiting for HEALTH_OK")
    assert ceph_health_check(namespace=config.ENV_DATA['cluster_namespace'])