コード例 #1
0
def setup():
    """
    Setting up the environment - Creating Secret
    """
    global RBD_POOL, RBD_STORAGE_CLASS, RBD_SECRET, CEPHFS_OBJ, \
        CEPHFS_STORAGE_CLASS, CEPHFS_SECRET, RBD_PVC, CEPHFS_PVC
    log.info("Creating RBD Pool")
    RBD_POOL = helpers.create_ceph_block_pool()

    log.info("Creating RBD Secret")
    RBD_SECRET = helpers.create_secret(constants.CEPHBLOCKPOOL)

    log.info("Creating RBD StorageClass")
    RBD_STORAGE_CLASS = helpers.create_storage_class(constants.CEPHBLOCKPOOL,
                                                     RBD_POOL.name,
                                                     RBD_SECRET.name)

    log.info("Creating CephFilesystem")
    CEPHFS_OBJ = helpers.create_cephfilesystem()

    log.info("Creating FS Secret")
    CEPHFS_SECRET = helpers.create_secret(constants.CEPHFILESYSTEM)

    log.info("Creating FS StorageClass")
    CEPHFS_STORAGE_CLASS = helpers.create_storage_class(
        constants.CEPHFILESYSTEM, helpers.get_cephfs_data_pool_name(),
        CEPHFS_SECRET.name)

    log.info("Creating RBC PVC")
    RBD_PVC = helpers.create_pvc(sc_name=RBD_STORAGE_CLASS.name)

    log.info("Creating CephFs PVC")
    CEPHFS_PVC = helpers.create_pvc(sc_name=CEPHFS_STORAGE_CLASS.name)
コード例 #2
0
def setup():
    """
    Setting up the environment - Creating Secret
    """
    global SECRET
    log.info("Creating RBD Secret")
    SECRET = helpers.create_secret(constants.CEPHBLOCKPOOL)
コード例 #3
0
def setup():
    """
    Setting up the environment for the test
    """
    global RBD_SECRET_OBJ
    RBD_SECRET_OBJ = helpers.create_secret(constants.CEPHBLOCKPOOL)

    log.info("Creating CEPHFS Secret")
    global CEPHFS_SECRET_OBJ
    CEPHFS_SECRET_OBJ = helpers.create_secret(constants.CEPHFILESYSTEM)

    log.info("Creating RBD Storageclass")
    assert create_multiple_rbd_storageclasses(count=5)

    log.info("Creating CEPHFS Storageclass")
    assert create_storageclass_cephfs()
コード例 #4
0
def setup(self):
    """
    Creates the resources needed for the type of interface to be used.

    For CephBlockPool interface: Creates CephBlockPool, Secret and StorageClass
    For CephFilesystem interface: Creates Secret and StorageClass
    """
    logger.info(f"Creating resources for {self.interface_type} interface")

    self.interface_name = None
    if self.interface_type == constants.CEPHBLOCKPOOL:
        self.cbp_obj = helpers.create_ceph_block_pool()
        assert self.cbp_obj, f"Failed to create block pool"
        self.interface_name = self.cbp_obj.name

    elif self.interface_type == constants.CEPHFILESYSTEM:
        self.interface_name = helpers.get_cephfs_data_pool_name()

    self.secret_obj = helpers.create_secret(interface_type=self.interface_type)
    assert self.secret_obj, f"Failed to create secret"

    self.sc_obj = helpers.create_storage_class(
        interface_type=self.interface_type,
        interface_name=self.interface_name,
        secret_name=self.secret_obj.name
    )
    assert self.sc_obj, f"Failed to create storage class"
コード例 #5
0
def setup():
    """
    Setting up the environment - Creating Secret
    """
    global SECRET, POOL
    log.info("Creating RBD Pool")
    POOL = helpers.create_ceph_block_pool()

    log.info("Creating RBD Secret")
    SECRET = helpers.create_secret(constants.CEPHBLOCKPOOL)
コード例 #6
0
def setup_fs():
    log.info("Creating CEPHFS Secret")
    global CEPHFS_SECRET_OBJ
    CEPHFS_SECRET_OBJ = helpers.create_secret(constants.CEPHFILESYSTEM)

    global CEPHFS_SC_OBJ
    log.info("Creating CephFS Storage class ")
    CEPHFS_SC_OBJ = helpers.create_storage_class(
        constants.CEPHFILESYSTEM, helpers.get_cephfs_data_pool_name(),
        CEPHFS_SECRET_OBJ.name)
コード例 #7
0
 def factory(interface=constants.CEPHBLOCKPOOL):
     """
     Args:
         interface (str): CephBlockPool or CephFileSystem. This decides
             whether a RBD based or CephFS resource is created.
             RBD is default.
     """
     secret_obj = helpers.create_secret(interface_type=interface)
     assert secret_obj, "Failed to create a secret"
     instances.append(secret_obj)
     return secret_obj
コード例 #8
0
def setup_rbd():
    """
    Setting up the environment
    Creating replicated pool,secret,storageclass for rbd
    """
    log.info("Creating CephBlockPool")
    global RBD_POOL
    RBD_POOL = helpers.create_ceph_block_pool()
    global RBD_SECRET_OBJ
    RBD_SECRET_OBJ = helpers.create_secret(constants.CEPHBLOCKPOOL)
    global RBD_SC_OBJ
    log.info("Creating RBD Storage class ")
    RBD_SC_OBJ = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=RBD_POOL.name,
        secret_name=RBD_SECRET_OBJ.name)
コード例 #9
0
ファイル: test_reclaim_policy.py プロジェクト: ksandha/ocs-ci
def setup(self):
    """
    Setting up a secret and storage class
    """

    self.secret_obj = helpers.create_secret(constants.CEPHBLOCKPOOL)
    self.sc_obj_retain = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=constants.DEFAULT_BLOCKPOOL,
        secret_name=self.secret_obj.name,
        reclaim_policy=constants.RECLAIM_POLICY_RETAIN)
    self.sc_obj_delete = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=constants.DEFAULT_BLOCKPOOL,
        secret_name=self.secret_obj.name,
        reclaim_policy=constants.RECLAIM_POLICY_DELETE)
コード例 #10
0
ファイル: fixtures.py プロジェクト: xhtheking/ocs-ci
def create_rbd_secret(request):
    """
    Create a rbd secret
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete the project
        """
        if class_instance.rbd_secret_obj.get():
            class_instance.rbd_secret_obj.delete()

    request.addfinalizer(finalizer)

    class_instance.rbd_secret_obj = helpers.create_secret(
        interface_type=constants.CEPHBLOCKPOOL)
    assert class_instance.rbd_secret_obj, "Failed to create rbd secret"
コード例 #11
0
ファイル: fixtures.py プロジェクト: xhtheking/ocs-ci
def create_cephfs_secret(request):
    """
    Create a cephfs secret
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete the project
        """
        if class_instance.cephfs_secret_obj.get():
            class_instance.cephfs_secret_obj.delete()

    request.addfinalizer(finalizer)

    class_instance.cephfs_secret_obj = helpers.create_secret(
        interface_type=constants.CEPHFILESYSTEM)
    assert class_instance.cephfs_secret_obj, "Failed to create cephfs secret"
コード例 #12
0
ファイル: fixtures.py プロジェクト: waynesun09/ocs-ci
def create_cephfs_secret(request):
    """
    Create a CephFS secret
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete the FS secret
        """
        if hasattr(class_instance, 'cephfs_secret_obj'):
            class_instance.cephfs_secret_obj.delete()
            class_instance.cephfs_secret_obj.ocp.wait_for_delete(
                class_instance.cephfs_secret_obj.name)

    request.addfinalizer(finalizer)

    class_instance.cephfs_secret_obj = helpers.create_secret(
        interface_type=constants.CEPHFILESYSTEM)
    assert class_instance.cephfs_secret_obj, f"Failed to create secret"
コード例 #13
0
ファイル: fixtures.py プロジェクト: waynesun09/ocs-ci
def create_rbd_secret(request):
    """
    Create an RBD secret
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete the RBD secret
        """
        if hasattr(class_instance, 'rbd_secret_obj'):
            class_instance.rbd_secret_obj.delete()
            class_instance.rbd_secret_obj.ocp.wait_for_delete(
                class_instance.rbd_secret_obj.name)

    request.addfinalizer(finalizer)

    class_instance.rbd_secret_obj = helpers.create_secret(
        interface_type=constants.CEPHBLOCKPOOL)
    assert class_instance.rbd_secret_obj, "Failed to create secret"
コード例 #14
0
ファイル: test_pgsql.py プロジェクト: nimrod-becker/ocs-ci
def ripsaw(request):
    # Create Secret and Pool
    secret = helpers.create_secret(constants.CEPHBLOCKPOOL)
    pool = helpers.create_ceph_block_pool()

    # Create storage class
    log.info("Creating a Storage Class")
    sc = helpers.create_storage_class(sc_name='pgsql-workload',
                                      interface_type=constants.CEPHBLOCKPOOL,
                                      secret_name=secret.name,
                                      interface_name=pool.name)
    # Create RipSaw Operator
    ripsaw = RipSaw()

    def teardown():
        ripsaw.cleanup()
        sc.delete()
        secret.delete()
        pool.delete()

    request.addfinalizer(teardown)
    return ripsaw
コード例 #15
0
def create_resources(resources, run_io=True):
    """
    Sanity validation - Create resources (FS and RBD) and run IO

    Args:
        resources (tuple): Lists of projects, secrets, pools,
            storageclasses, pvcs and pods
        run_io (bool): True for run IO, False otherwise

    """
    # Create resources and run IO for both FS and RBD
    # Unpack resources
    projects, secrets, pools, storageclasses, pvcs, pods = resources[:6]

    # Project
    projects.append(helpers.create_project())

    # Secrets
    secrets.append(helpers.create_secret(constants.CEPHBLOCKPOOL))
    secrets.append(helpers.create_secret(constants.CEPHFILESYSTEM))

    # Pools
    pools.append(helpers.create_ceph_block_pool())
    pools.append(helpers.get_cephfs_data_pool_name())

    # Storageclasses
    storageclasses.append(
        helpers.create_storage_class(interface_type=constants.CEPHBLOCKPOOL,
                                     interface_name=pools[0].name,
                                     secret_name=secrets[0].name))
    storageclasses.append(
        helpers.create_storage_class(interface_type=constants.CEPHFILESYSTEM,
                                     interface_name=pools[1],
                                     secret_name=secrets[1].name))

    # PVCs
    pvcs.append(
        helpers.create_pvc(sc_name=storageclasses[0].name,
                           namespace=projects[0].namespace))
    pvcs.append(
        helpers.create_pvc(sc_name=storageclasses[1].name,
                           namespace=projects[0].namespace))
    for pvc in pvcs:
        helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND)
        pvc.reload()

    # Pods
    pods.append(
        helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                           pvc_name=pvcs[0].name,
                           namespace=projects[0].namespace))
    pods.append(
        helpers.create_pod(interface_type=constants.CEPHFILESYSTEM,
                           pvc_name=pvcs[1].name,
                           namespace=projects[0].namespace))
    for pod in pods:
        helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
        pod.reload()

    if run_io:
        # Run IO
        for pod in pods:
            pod.run_io('fs', '1G')
        for pod in pods:
            fio_result = pod.get_fio_results()
            logger.info(f"IOPs after FIO for pod {pod.name}:")
            logger.info(
                f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
            logger.info(
                f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")
コード例 #16
0
def run_io_in_background(request):
    """
    Run IO during the test execution
    """
    if config.RUN['cli_params'].get('io_in_bg'):
        log.info(f"Tests will be running while IO is in the background")

        g_sheet = None
        if config.RUN['google_api_secret']:
            g_sheet = GoogleSpreadSheetAPI("IO BG results", 0)
        else:
            log.warning(
                "Google API secret was not found. IO won't be reported to "
                "a Google spreadsheet")
        results = list()
        temp_file = tempfile.NamedTemporaryFile(mode='w+',
                                                prefix='test_status',
                                                delete=False)

        def get_test_status():
            with open(temp_file.name, 'r') as t_file:
                return t_file.readline()

        def set_test_status(status):
            with open(temp_file.name, 'w') as t_file:
                t_file.writelines(status)

        set_test_status('running')

        def finalizer():
            """
            Delete the resources created during setup, used for
            running IO in the test background
            """
            set_test_status('finished')
            try:
                for status in TimeoutSampler(90, 3, get_test_status):
                    if status == 'terminated':
                        break
            except TimeoutExpiredError:
                log.warning("Background IO was still in progress before IO "
                            "thread termination")
            if thread:
                thread.join()

            log.info(f"Background IO has stopped")
            for result in results:
                log.info(f"IOPs after FIO for pod {pod_obj.name}:")
                log.info(f"Read: {result[0]}")
                log.info(f"Write: {result[1]}")

            if pod_obj:
                pod_obj.delete()
                pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
            if pvc_obj:
                pvc_obj.delete()
                pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
            if sc_obj:
                sc_obj.delete()
            if cbp_obj:
                cbp_obj.delete()
            if secret_obj:
                secret_obj.delete()

        request.addfinalizer(finalizer)

        secret_obj = helpers.create_secret(
            interface_type=constants.CEPHBLOCKPOOL)
        cbp_obj = helpers.create_ceph_block_pool()
        sc_obj = helpers.create_storage_class(
            interface_type=constants.CEPHBLOCKPOOL,
            interface_name=cbp_obj.name,
            secret_name=secret_obj.name)
        pvc_obj = helpers.create_pvc(sc_name=sc_obj.name, size='2Gi')
        helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
        pvc_obj.reload()
        pod_obj = helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                                     pvc_name=pvc_obj.name)
        helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
        pod_obj.reload()

        def run_io_in_bg():
            """
            Run IO by executing FIO and deleting the file created for FIO on
            the pod, in a while true loop. Will be running as long as
            the test is running.
            """
            while get_test_status() == 'running':
                pod_obj.run_io('fs', '1G')
                result = pod_obj.get_fio_results()
                reads = result.get('jobs')[0].get('read').get('iops')
                writes = result.get('jobs')[0].get('write').get('iops')
                if g_sheet:
                    now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                    g_sheet.insert_row([now, reads, writes])

                results.append((reads, writes))

                file_path = os.path.join(
                    pod_obj.get_storage_path(storage_type='fs'),
                    pod_obj.io_params['filename'])
                pod_obj.exec_cmd_on_pod(f'rm -rf {file_path}')
            set_test_status('terminated')

        log.info(f"Start running IO in the test background")

        thread = threading.Thread(target=run_io_in_bg)
        thread.start()
コード例 #17
0
    def validate_cluster(self, resources, instances):
        """
        Perform cluster validation - nodes readiness, Ceph cluster health
        check and functional resources tests
        """
        instances_names = list(instances.values())
        assert ocp.wait_for_nodes_ready(instances_names), (
            "Not all nodes reached status Ready"
        )

        ceph_cluster = CephCluster()
        assert ceph_health_check(
            namespace=config.ENV_DATA['cluster_namespace']
        )
        ceph_cluster.cluster_health_check(timeout=60)

        # Create resources and run IO for both FS and RBD
        # Unpack resources
        projects, secrets, pools, storageclasses, pvcs, pods = resources[:6]

        # Project
        projects.append(helpers.create_project())

        # Secrets
        secrets.append(helpers.create_secret(constants.CEPHBLOCKPOOL))
        secrets.append(helpers.create_secret(constants.CEPHFILESYSTEM))

        # Pools
        pools.append(helpers.create_ceph_block_pool())
        pools.append(helpers.get_cephfs_data_pool_name())

        # Storageclasses
        storageclasses.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=pools[0].name,
                secret_name=secrets[0].name
            )
        )
        storageclasses.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHFILESYSTEM,
                interface_name=pools[1],
                secret_name=secrets[1].name
            )
        )

        # PVCs
        pvcs.append(helpers.create_pvc(
            sc_name=storageclasses[0].name, namespace=projects[0].namespace)
        )
        pvcs.append(helpers.create_pvc(
            sc_name=storageclasses[1].name, namespace=projects[0].namespace)
        )

        # Pods
        pods.append(
            helpers.create_pod(
                interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvcs[0].name,
                namespace=projects[0].namespace
            )
        )
        pods.append(
            helpers.create_pod(
                interface_type=constants.CEPHFILESYSTEM, pvc_name=pvcs[1].name,
                namespace=projects[0].namespace
            )
        )

        # Run IO
        for pod in pods:
            pod.run_io('fs', '1G')
        for pod in pods:
            fio_result = pod.get_fio_results()
            logger.info(f"IOPs after FIO for pod {pod.name}:")
            logger.info(
                f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
            )
            logger.info(
                f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
            )
コード例 #18
0
    def deploy_ocs(self):
        """
        Handle OCS deployment, since OCS deployment steps are common to any
        platform, implementing OCS deployment here in base class.
        """
        _templating = templating.Templating()

        ceph_cluster = ocp.OCP(
            kind='CephCluster', namespace=self.namespace
        )
        try:
            ceph_cluster.get().get('items')[0]
            logger.warning("OCS cluster already exists")
            return
        except (IndexError, CommandFailed):
            logger.info("Running OCS basic installation")

        if not self.ocs_operator_deployment:
            create_oc_resource(
                'common.yaml', self.cluster_path, _templating, config.ENV_DATA
            )
            run_cmd(
                f'oc label namespace {config.ENV_DATA["cluster_namespace"]} '
                f'"openshift.io/cluster-monitoring=true"'
            )
            run_cmd(
                f"oc policy add-role-to-user view "
                f"system:serviceaccount:openshift-monitoring:prometheus-k8s "
                f"-n {self.namespace}"
            )
            # HACK: If you would like to drop this hack, make sure that you
            # also updated docs and write appropriate unit/integration tests
            # for config processing.
            if config.ENV_DATA.get('monitoring_enabled') in (
                "true", "True", True
            ):
                # RBAC rules for monitoring, based on documentation change in
                # rook:
                # https://github.com/rook/rook/commit/1b6fe840f6ae7372a9675ba727ecc65326708aa8
                # HACK: This should be dropped when OCS is managed by OLM
                apply_oc_resource(
                    'rbac.yaml',
                    self.cluster_path,
                    _templating,
                    config.ENV_DATA,
                    template_dir="monitoring"
                )
            # Increased to 15 seconds as 10 is not enough
            # TODO: do the sampler function and check if resource exist
            wait_time = 15
            logger.info(f"Waiting {wait_time} seconds...")
            time.sleep(wait_time)
            create_oc_resource(
                'operator-openshift.yaml', self.cluster_path,
                _templating, config.ENV_DATA
            )
            logger.info(f"Waiting {wait_time} seconds...")
            time.sleep(wait_time)
            run_cmd(
                f"oc wait --for condition=ready pod "
                f"-l app=rook-ceph-operator "
                f"-n {self.namespace} "
                f"--timeout=120s"
            )
            run_cmd(
                f"oc wait --for condition=ready pod "
                f"-l app=rook-discover "
                f"-n {self.namespace} "
                f"--timeout=120s"
            )
            create_oc_resource(
                'cluster.yaml', self.cluster_path, _templating, config.ENV_DATA
            )
        else:
            self.deploy_ocs_via_operator()

        pod = ocp.OCP(
            kind=constants.POD, namespace=self.namespace
        )
        cfs = ocp.OCP(
            kind=constants.CEPHFILESYSTEM,
            namespace=self.namespace
        )
        # Check for Ceph pods
        assert pod.wait_for_resource(
            condition='Running', selector='app=rook-ceph-mon',
            resource_count=3, timeout=600
        )
        assert pod.wait_for_resource(
            condition='Running', selector='app=rook-ceph-mgr',
            timeout=600
        )
        assert pod.wait_for_resource(
            condition='Running', selector='app=rook-ceph-osd',
            resource_count=3, timeout=600
        )

        # validate ceph mon/osd volumes are backed by pvc
        validate_cluster_on_pvc()

        # Creating toolbox pod
        setup_ceph_toolbox()

        assert pod.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            selector='app=rook-ceph-tools', resource_count=1, timeout=600
        )

        if not self.ocs_operator_deployment:
            logger.info(f"Waiting {wait_time} seconds...")
            time.sleep(wait_time)
            # HACK: This should be dropped (including service-monitor.yaml and
            # prometheus-rules.yaml files) when OCS is managed by OLM
            if config.ENV_DATA.get('monitoring_enabled') not in (
                "true", "True", True
            ):
                # HACK: skip creation of rook-ceph-mgr service monitor when
                # monitoring is enabled (if this were not skipped, the step
                # would fail because rook would create the service monitor at
                # this point already)
                create_oc_resource(
                    "service-monitor.yaml", self.cluster_path, _templating,
                    config.ENV_DATA
                )
                # HACK: skip creation of prometheus-rules, rook-ceph is
                # concerned with it's setup now, based on clarification from
                # Umanga Chapagain
                create_oc_resource(
                    "prometheus-rules.yaml", self.cluster_path, _templating,
                    config.ENV_DATA
                )
            logger.info(f"Waiting {wait_time} seconds...")
            time.sleep(wait_time)

            # Create MDS pods for CephFileSystem
            fs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML)
            fs_data['metadata']['namespace'] = self.namespace

            ceph_obj = OCS(**fs_data)
            ceph_obj.create()
            assert pod.wait_for_resource(
                condition=constants.STATUS_RUNNING, selector='app=rook-ceph-mds',
                resource_count=2, timeout=600
            )

        # Check for CephFilesystem creation in ocp
        cfs_data = cfs.get()
        cfs_name = cfs_data['items'][0]['metadata']['name']

        if helpers.validate_cephfilesystem(cfs_name):
            logger.info(f"MDS deployment is successful!")
            defaults.CEPHFILESYSTEM_NAME = cfs_name
        else:
            logger.error(
                f"MDS deployment Failed! Please check logs!"
            )

        if config.ENV_DATA.get('monitoring_enabled') and config.ENV_DATA.get('persistent-monitoring'):
            # Create a pool, secrets and sc
            secret_obj = helpers.create_secret(interface_type=constants.CEPHBLOCKPOOL)
            cbj_obj = helpers.create_ceph_block_pool()
            sc_obj = helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=cbj_obj.name,
                secret_name=secret_obj.name
            )

            # Get the list of monitoring pods
            pods_list = get_all_pods(
                namespace=defaults.OCS_MONITORING_NAMESPACE,
                selector=['prometheus', 'alertmanager']
            )

            # Create configmap cluster-monitoring-config
            create_configmap_cluster_monitoring_pod(sc_obj.name)

            # Take some time to respin the pod
            waiting_time = 30
            logger.info(f"Waiting {waiting_time} seconds...")
            time.sleep(waiting_time)

            # Validate the pods are respinned and in running state
            validate_pods_are_respinned_and_running_state(
                pods_list
            )

            # Validate the pvc is created on monitoring pods
            validate_pvc_created_and_bound_on_monitoring_pods()

            # Validate the pvc are mounted on pods
            validate_pvc_are_mounted_on_monitoring_pods(pods_list)

        # Change registry backend to OCS CEPHFS RWX PVC
        registry.change_registry_backend_to_ocs()

        # Verify health of ceph cluster
        # TODO: move destroy cluster logic to new CLI usage pattern?
        logger.info("Done creating rook resources, waiting for HEALTH_OK")
        assert ceph_health_check(
            namespace=self.namespace
        )
        # patch gp2/thin storage class as 'non-default'
        self.patch_default_sc_to_non_default()