def setup(self):
    """
    Creates the resources needed for the type of interface to be used.

    For CephBlockPool interface: Creates CephBlockPool, Secret and StorageClass
    For CephFilesystem interface: Creates Secret and StorageClass
    """
    logger.info(f"Creating resources for {self.interface_type} interface")

    self.interface_name = None
    if self.interface_type == constants.CEPHBLOCKPOOL:
        self.cbp_obj = helpers.create_ceph_block_pool()
        assert self.cbp_obj, f"Failed to create block pool"
        self.interface_name = self.cbp_obj.name

    elif self.interface_type == constants.CEPHFILESYSTEM:
        self.interface_name = helpers.get_cephfs_data_pool_name()

    self.secret_obj = helpers.create_secret(interface_type=self.interface_type)
    assert self.secret_obj, f"Failed to create secret"

    self.sc_obj = helpers.create_storage_class(
        interface_type=self.interface_type,
        interface_name=self.interface_name,
        secret_name=self.secret_obj.name
    )
    assert self.sc_obj, f"Failed to create storage class"
Exemple #2
0
def setup():
    """
    Setting up the environment - Creating Secret
    """
    global RBD_POOL, RBD_STORAGE_CLASS, RBD_SECRET, CEPHFS_OBJ, \
        CEPHFS_STORAGE_CLASS, CEPHFS_SECRET, RBD_PVC, CEPHFS_PVC
    log.info("Creating RBD Pool")
    RBD_POOL = helpers.create_ceph_block_pool()

    log.info("Creating RBD Secret")
    RBD_SECRET = helpers.create_secret(constants.CEPHBLOCKPOOL)

    log.info("Creating RBD StorageClass")
    RBD_STORAGE_CLASS = helpers.create_storage_class(constants.CEPHBLOCKPOOL,
                                                     RBD_POOL.name,
                                                     RBD_SECRET.name)

    log.info("Creating CephFilesystem")
    CEPHFS_OBJ = helpers.create_cephfilesystem()

    log.info("Creating FS Secret")
    CEPHFS_SECRET = helpers.create_secret(constants.CEPHFILESYSTEM)

    log.info("Creating FS StorageClass")
    CEPHFS_STORAGE_CLASS = helpers.create_storage_class(
        constants.CEPHFILESYSTEM, helpers.get_cephfs_data_pool_name(),
        CEPHFS_SECRET.name)

    log.info("Creating RBC PVC")
    RBD_PVC = helpers.create_pvc(sc_name=RBD_STORAGE_CLASS.name)

    log.info("Creating CephFs PVC")
    CEPHFS_PVC = helpers.create_pvc(sc_name=CEPHFS_STORAGE_CLASS.name)
def test_fixture(request):
    """
    Setup and teardown
    * The setup will deploy openshift-logging in the cluster
    * The teardown will uninstall cluster-logging from the cluster
    """

    def finalizer():
        teardown(cbp_obj, sc_obj)

    request.addfinalizer(finalizer)

    # Deploys elastic-search operator on the project openshift-operators-redhat
    ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
    assert ocp_logging_obj.create_elasticsearch_operator_group(
        yaml_file=constants.EO_OG_YAML,
        resource_name='openshift-operators-redhat'
    )
    assert ocp_logging_obj.set_rbac(
        yaml_file=constants.EO_RBAC_YAML, resource_name='prometheus-k8s'
    )
    assert ocp_logging_obj.create_elasticsearch_subscription(constants.EO_SUB_YAML)

    # Deploys cluster-logging operator on the project openshift-logging
    ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
    assert ocp_logging_obj.create_clusterlogging_operator_group(
        yaml_file=constants.CL_OG_YAML
    )
    assert ocp_logging_obj.create_clusterlogging_subscription(
        yaml_file=constants.CL_SUB_YAML
    )

    # Creates storage class
    cbp_obj = helpers.create_ceph_block_pool()
    sc_obj = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=cbp_obj.name,
        secret_name=constants.DEFAULT_SECRET,
        reclaim_policy="Delete"
    )
    assert ocp_logging_obj.create_instance_in_clusterlogging(sc_name=sc_obj.name)

    # Check the health of the cluster-logging
    assert ocp_logging_obj.check_health_of_clusterlogging()

    csv_obj = CSV(
        kind=constants.CLUSTER_SERVICE_VERSION, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
    )

    get_version = csv_obj.get(out_yaml_format=True)
    for i in range(len(get_version['items'])):
        if '4.2.0' in get_version['items'][i]['metadata']['name']:
            logger.info("The version of operators is 4.2.0")
            logger.info(get_version['items'][i]['metadata']['name'])
        else:
            logger.error("The version is not 4.2.0")
Exemple #4
0
def setup():
    """
    Setting up the environment - Creating Secret
    """
    global SECRET, POOL
    log.info("Creating RBD Pool")
    POOL = helpers.create_ceph_block_pool()

    log.info("Creating RBD Secret")
    SECRET = helpers.create_secret(constants.CEPHBLOCKPOOL)
Exemple #5
0
 def factory(interface=constants.CEPHBLOCKPOOL):
     if interface == constants.CEPHBLOCKPOOL:
         ceph_pool_obj = helpers.create_ceph_block_pool()
     elif interface == constants.CEPHFILESYSTEM:
         cfs = ocp.OCP(kind=constants.CEPHFILESYSTEM,
                       namespace=defaults.ROOK_CLUSTER_NAMESPACE).get(
                           defaults.CEPHFILESYSTEM_NAME)
         ceph_pool_obj = OCS(**cfs)
     assert ceph_pool_obj, f"Failed to create {interface} pool"
     instances.append(ceph_pool_obj)
     return ceph_pool_obj
Exemple #6
0
def test_verify_new_cbp_creation_not_blocked_by_invalid_cbp(teardown_factory):
    """
    Test to verify new ceph block pool can be created without deleting
    ceph block pool having invalid parameters
    Verifies bz 1711814
    """
    log.info("Trying creating ceph block pool with invalid failure domain.")
    cbp_invalid = helpers.create_ceph_block_pool(
        failure_domain='no-failure-domain', verify=False)
    teardown_factory(cbp_invalid)
    assert not helpers.verify_block_pool_exists(cbp_invalid.name), (
        f"Unexpected: Ceph Block Pool {cbp_invalid.name} created with "
        f"invalid failure domain.")
    log.info(
        f"Expected: {cbp_invalid.name} with invalid failure domain is not "
        f"present in pools list")

    log.info("Create valid ceph block pool")
    cbp_valid = helpers.create_ceph_block_pool(verify=False)
    teardown_factory(cbp_valid)
    assert helpers.verify_block_pool_exists(
        cbp_valid.name), (f"Ceph Block Pool {cbp_valid.name} is not created.")
    log.info(f"Verified: {cbp_valid.name} is created")
def setup_rbd():
    """
    Setting up the environment
    Creating replicated pool,secret,storageclass for rbd
    """
    log.info("Creating CephBlockPool")
    global RBD_POOL
    RBD_POOL = helpers.create_ceph_block_pool()
    global RBD_SECRET_OBJ
    RBD_SECRET_OBJ = helpers.create_secret(constants.CEPHBLOCKPOOL)
    global RBD_SC_OBJ
    log.info("Creating RBD Storage class ")
    RBD_SC_OBJ = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=RBD_POOL.name,
        secret_name=RBD_SECRET_OBJ.name)
Exemple #8
0
def create_ceph_block_pool(request):
    """
    Create a Ceph block pool
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete the Ceph block pool
        """
        if class_instance.cbp_obj.get():
            class_instance.cbp_obj.delete()

    request.addfinalizer(finalizer)

    class_instance.cbp_obj = helpers.create_ceph_block_pool()
    assert class_instance.cbp_obj, "Failed to create block pool"
Exemple #9
0
def create_multiple_rbd_storageclasses(count=1):
    """
    Function for creating multiple rbd storageclass
    By default if we haven't passed count function will create only one
    storageclass because by default count for creating sc is one

    Args:
         count (int): count specify no of storageclass want to create by
            default count is set to one i.e it will create one sc
    """
    for sc_count in range(count):
        log.info("Creating CephBlockPool")
        pool_obj = helpers.create_ceph_block_pool()
        helpers.create_storage_class(constants.CEPHBLOCKPOOL,
                                     interface_name=pool_obj.name,
                                     secret_name=RBD_SECRET_OBJ.name)

    return True
Exemple #10
0
    def test_remove_mon_pod_from_cluster(self):
        """
        To remove mon pod from the cluster
        after the I/O is performed on the pool
        and waiting for the operator to create a
        new mon pod on its own

        """
        ceph_cluster = CephCluster()
        pods = ocp.OCP(kind=constants.POD,
                       namespace=config.ENV_DATA['cluster_namespace'])
        list_mons = ceph_cluster.get_mons_from_cluster()
        assert len(list_mons) > 1, pytest.skip(
            "INVALID: Mon count should be more than one to delete.")
        self.pool_obj = create_ceph_block_pool()
        assert run_io_on_pool(self.pool_obj), 'Failed to run I/O on the pool'
        assert delete_cephblockpools([self.pool_obj]), 'Failed to delete pool'
        ceph_cluster.cluster_health_check(timeout=0)
        ceph_cluster.remove_mon_from_cluster()
        assert verify_mon_pod_up(pods), "Mon pods are not up and running state"
        ceph_cluster.cluster_health_check(timeout=60)
def test_fixture(request):
    """
    Setup and teardown
    * The setup will deploy openshift-logging in the cluster
    * The teardown will uninstall cluster-logging from the cluster
    """
    def finalizer():
        teardown(cbp_obj, sc_obj)

    request.addfinalizer(finalizer)

    # Deploys elastic-search operator on the project openshift-operators-redhat
    ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
    assert ocp_logging_obj.create_elasticsearch_operator_group(
        yaml_file=constants.EO_OG_YAML,
        resource_name='openshift-operators-redhat')
    assert ocp_logging_obj.set_rbac(yaml_file=constants.EO_RBAC_YAML,
                                    resource_name='prometheus-k8s')
    assert ocp_logging_obj.create_elasticsearch_subscription(
        constants.EO_SUB_YAML)

    # Deploys cluster-logging operator on the project openshift-logging
    ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
    assert ocp_logging_obj.create_clusterlogging_operator_group(
        yaml_file=constants.CL_OG_YAML)
    assert ocp_logging_obj.create_clusterlogging_subscription(
        yaml_file=constants.CL_SUB_YAML)

    # Creates storage class
    cbp_obj = helpers.create_ceph_block_pool()
    sc_obj = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=cbp_obj.name,
        secret_name=constants.DEFAULT_SECRET,
        reclaim_policy="Delete")
    assert ocp_logging_obj.create_instance_in_clusterlogging(
        sc_name=sc_obj.name)

    # Check the health of the cluster-logging
    assert ocp_logging_obj.check_health_of_clusterlogging()
Exemple #12
0
def ripsaw(request):
    # Create Secret and Pool
    secret = helpers.create_secret(constants.CEPHBLOCKPOOL)
    pool = helpers.create_ceph_block_pool()

    # Create storage class
    log.info("Creating a Storage Class")
    sc = helpers.create_storage_class(sc_name='pgsql-workload',
                                      interface_type=constants.CEPHBLOCKPOOL,
                                      secret_name=secret.name,
                                      interface_name=pool.name)
    # Create RipSaw Operator
    ripsaw = RipSaw()

    def teardown():
        ripsaw.cleanup()
        sc.delete()
        secret.delete()
        pool.delete()

    request.addfinalizer(teardown)
    return ripsaw
Exemple #13
0
def run_io_in_background(request):
    """
    Run IO during the test execution
    """
    if config.RUN['cli_params'].get('io_in_bg'):
        log.info(f"Tests will be running while IO is in the background")

        g_sheet = None
        if config.RUN['google_api_secret']:
            g_sheet = GoogleSpreadSheetAPI("IO BG results", 0)
        else:
            log.warning(
                "Google API secret was not found. IO won't be reported to "
                "a Google spreadsheet")
        results = list()
        temp_file = tempfile.NamedTemporaryFile(mode='w+',
                                                prefix='test_status',
                                                delete=False)

        def get_test_status():
            with open(temp_file.name, 'r') as t_file:
                return t_file.readline()

        def set_test_status(status):
            with open(temp_file.name, 'w') as t_file:
                t_file.writelines(status)

        set_test_status('running')

        def finalizer():
            """
            Delete the resources created during setup, used for
            running IO in the test background
            """
            set_test_status('finished')
            try:
                for status in TimeoutSampler(90, 3, get_test_status):
                    if status == 'terminated':
                        break
            except TimeoutExpiredError:
                log.warning("Background IO was still in progress before IO "
                            "thread termination")
            if thread:
                thread.join()

            log.info(f"Background IO has stopped")
            for result in results:
                log.info(f"IOPs after FIO for pod {pod_obj.name}:")
                log.info(f"Read: {result[0]}")
                log.info(f"Write: {result[1]}")

            if pod_obj:
                pod_obj.delete()
                pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
            if pvc_obj:
                pvc_obj.delete()
                pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
            if sc_obj:
                sc_obj.delete()
            if cbp_obj:
                cbp_obj.delete()
            if secret_obj:
                secret_obj.delete()

        request.addfinalizer(finalizer)

        secret_obj = helpers.create_secret(
            interface_type=constants.CEPHBLOCKPOOL)
        cbp_obj = helpers.create_ceph_block_pool()
        sc_obj = helpers.create_storage_class(
            interface_type=constants.CEPHBLOCKPOOL,
            interface_name=cbp_obj.name,
            secret_name=secret_obj.name)
        pvc_obj = helpers.create_pvc(sc_name=sc_obj.name, size='2Gi')
        helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
        pvc_obj.reload()
        pod_obj = helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                                     pvc_name=pvc_obj.name)
        helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
        pod_obj.reload()

        def run_io_in_bg():
            """
            Run IO by executing FIO and deleting the file created for FIO on
            the pod, in a while true loop. Will be running as long as
            the test is running.
            """
            while get_test_status() == 'running':
                pod_obj.run_io('fs', '1G')
                result = pod_obj.get_fio_results()
                reads = result.get('jobs')[0].get('read').get('iops')
                writes = result.get('jobs')[0].get('write').get('iops')
                if g_sheet:
                    now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                    g_sheet.insert_row([now, reads, writes])

                results.append((reads, writes))

                file_path = os.path.join(
                    pod_obj.get_storage_path(storage_type='fs'),
                    pod_obj.io_params['filename'])
                pod_obj.exec_cmd_on_pod(f'rm -rf {file_path}')
            set_test_status('terminated')

        log.info(f"Start running IO in the test background")

        thread = threading.Thread(target=run_io_in_bg)
        thread.start()
    def validate_cluster(self, resources, instances):
        """
        Perform cluster validation - nodes readiness, Ceph cluster health
        check and functional resources tests
        """
        instances_names = list(instances.values())
        assert ocp.wait_for_nodes_ready(instances_names), (
            "Not all nodes reached status Ready"
        )

        ceph_cluster = CephCluster()
        assert ceph_health_check(
            namespace=config.ENV_DATA['cluster_namespace']
        )
        ceph_cluster.cluster_health_check(timeout=60)

        # Create resources and run IO for both FS and RBD
        # Unpack resources
        projects, secrets, pools, storageclasses, pvcs, pods = resources[:6]

        # Project
        projects.append(helpers.create_project())

        # Secrets
        secrets.append(helpers.create_secret(constants.CEPHBLOCKPOOL))
        secrets.append(helpers.create_secret(constants.CEPHFILESYSTEM))

        # Pools
        pools.append(helpers.create_ceph_block_pool())
        pools.append(helpers.get_cephfs_data_pool_name())

        # Storageclasses
        storageclasses.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=pools[0].name,
                secret_name=secrets[0].name
            )
        )
        storageclasses.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHFILESYSTEM,
                interface_name=pools[1],
                secret_name=secrets[1].name
            )
        )

        # PVCs
        pvcs.append(helpers.create_pvc(
            sc_name=storageclasses[0].name, namespace=projects[0].namespace)
        )
        pvcs.append(helpers.create_pvc(
            sc_name=storageclasses[1].name, namespace=projects[0].namespace)
        )

        # Pods
        pods.append(
            helpers.create_pod(
                interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvcs[0].name,
                namespace=projects[0].namespace
            )
        )
        pods.append(
            helpers.create_pod(
                interface_type=constants.CEPHFILESYSTEM, pvc_name=pvcs[1].name,
                namespace=projects[0].namespace
            )
        )

        # Run IO
        for pod in pods:
            pod.run_io('fs', '1G')
        for pod in pods:
            fio_result = pod.get_fio_results()
            logger.info(f"IOPs after FIO for pod {pod.name}:")
            logger.info(
                f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
            )
            logger.info(
                f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
            )
Exemple #15
0
    def deploy_ocs(self):
        """
        Handle OCS deployment, since OCS deployment steps are common to any
        platform, implementing OCS deployment here in base class.
        """
        _templating = templating.Templating()

        ceph_cluster = ocp.OCP(
            kind='CephCluster', namespace=self.namespace
        )
        try:
            ceph_cluster.get().get('items')[0]
            logger.warning("OCS cluster already exists")
            return
        except (IndexError, CommandFailed):
            logger.info("Running OCS basic installation")

        if not self.ocs_operator_deployment:
            create_oc_resource(
                'common.yaml', self.cluster_path, _templating, config.ENV_DATA
            )
            run_cmd(
                f'oc label namespace {config.ENV_DATA["cluster_namespace"]} '
                f'"openshift.io/cluster-monitoring=true"'
            )
            run_cmd(
                f"oc policy add-role-to-user view "
                f"system:serviceaccount:openshift-monitoring:prometheus-k8s "
                f"-n {self.namespace}"
            )
            # HACK: If you would like to drop this hack, make sure that you
            # also updated docs and write appropriate unit/integration tests
            # for config processing.
            if config.ENV_DATA.get('monitoring_enabled') in (
                "true", "True", True
            ):
                # RBAC rules for monitoring, based on documentation change in
                # rook:
                # https://github.com/rook/rook/commit/1b6fe840f6ae7372a9675ba727ecc65326708aa8
                # HACK: This should be dropped when OCS is managed by OLM
                apply_oc_resource(
                    'rbac.yaml',
                    self.cluster_path,
                    _templating,
                    config.ENV_DATA,
                    template_dir="monitoring"
                )
            # Increased to 15 seconds as 10 is not enough
            # TODO: do the sampler function and check if resource exist
            wait_time = 15
            logger.info(f"Waiting {wait_time} seconds...")
            time.sleep(wait_time)
            create_oc_resource(
                'operator-openshift.yaml', self.cluster_path,
                _templating, config.ENV_DATA
            )
            logger.info(f"Waiting {wait_time} seconds...")
            time.sleep(wait_time)
            run_cmd(
                f"oc wait --for condition=ready pod "
                f"-l app=rook-ceph-operator "
                f"-n {self.namespace} "
                f"--timeout=120s"
            )
            run_cmd(
                f"oc wait --for condition=ready pod "
                f"-l app=rook-discover "
                f"-n {self.namespace} "
                f"--timeout=120s"
            )
            create_oc_resource(
                'cluster.yaml', self.cluster_path, _templating, config.ENV_DATA
            )
        else:
            self.deploy_ocs_via_operator()

        pod = ocp.OCP(
            kind=constants.POD, namespace=self.namespace
        )
        cfs = ocp.OCP(
            kind=constants.CEPHFILESYSTEM,
            namespace=self.namespace
        )
        # Check for Ceph pods
        assert pod.wait_for_resource(
            condition='Running', selector='app=rook-ceph-mon',
            resource_count=3, timeout=600
        )
        assert pod.wait_for_resource(
            condition='Running', selector='app=rook-ceph-mgr',
            timeout=600
        )
        assert pod.wait_for_resource(
            condition='Running', selector='app=rook-ceph-osd',
            resource_count=3, timeout=600
        )

        # validate ceph mon/osd volumes are backed by pvc
        validate_cluster_on_pvc()

        # Creating toolbox pod
        setup_ceph_toolbox()

        assert pod.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            selector='app=rook-ceph-tools', resource_count=1, timeout=600
        )

        if not self.ocs_operator_deployment:
            logger.info(f"Waiting {wait_time} seconds...")
            time.sleep(wait_time)
            # HACK: This should be dropped (including service-monitor.yaml and
            # prometheus-rules.yaml files) when OCS is managed by OLM
            if config.ENV_DATA.get('monitoring_enabled') not in (
                "true", "True", True
            ):
                # HACK: skip creation of rook-ceph-mgr service monitor when
                # monitoring is enabled (if this were not skipped, the step
                # would fail because rook would create the service monitor at
                # this point already)
                create_oc_resource(
                    "service-monitor.yaml", self.cluster_path, _templating,
                    config.ENV_DATA
                )
                # HACK: skip creation of prometheus-rules, rook-ceph is
                # concerned with it's setup now, based on clarification from
                # Umanga Chapagain
                create_oc_resource(
                    "prometheus-rules.yaml", self.cluster_path, _templating,
                    config.ENV_DATA
                )
            logger.info(f"Waiting {wait_time} seconds...")
            time.sleep(wait_time)

            # Create MDS pods for CephFileSystem
            fs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML)
            fs_data['metadata']['namespace'] = self.namespace

            ceph_obj = OCS(**fs_data)
            ceph_obj.create()
            assert pod.wait_for_resource(
                condition=constants.STATUS_RUNNING, selector='app=rook-ceph-mds',
                resource_count=2, timeout=600
            )

        # Check for CephFilesystem creation in ocp
        cfs_data = cfs.get()
        cfs_name = cfs_data['items'][0]['metadata']['name']

        if helpers.validate_cephfilesystem(cfs_name):
            logger.info(f"MDS deployment is successful!")
            defaults.CEPHFILESYSTEM_NAME = cfs_name
        else:
            logger.error(
                f"MDS deployment Failed! Please check logs!"
            )

        if config.ENV_DATA.get('monitoring_enabled') and config.ENV_DATA.get('persistent-monitoring'):
            # Create a pool, secrets and sc
            secret_obj = helpers.create_secret(interface_type=constants.CEPHBLOCKPOOL)
            cbj_obj = helpers.create_ceph_block_pool()
            sc_obj = helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=cbj_obj.name,
                secret_name=secret_obj.name
            )

            # Get the list of monitoring pods
            pods_list = get_all_pods(
                namespace=defaults.OCS_MONITORING_NAMESPACE,
                selector=['prometheus', 'alertmanager']
            )

            # Create configmap cluster-monitoring-config
            create_configmap_cluster_monitoring_pod(sc_obj.name)

            # Take some time to respin the pod
            waiting_time = 30
            logger.info(f"Waiting {waiting_time} seconds...")
            time.sleep(waiting_time)

            # Validate the pods are respinned and in running state
            validate_pods_are_respinned_and_running_state(
                pods_list
            )

            # Validate the pvc is created on monitoring pods
            validate_pvc_created_and_bound_on_monitoring_pods()

            # Validate the pvc are mounted on pods
            validate_pvc_are_mounted_on_monitoring_pods(pods_list)

        # Change registry backend to OCS CEPHFS RWX PVC
        registry.change_registry_backend_to_ocs()

        # Verify health of ceph cluster
        # TODO: move destroy cluster logic to new CLI usage pattern?
        logger.info("Done creating rook resources, waiting for HEALTH_OK")
        assert ceph_health_check(
            namespace=self.namespace
        )
        # patch gp2/thin storage class as 'non-default'
        self.patch_default_sc_to_non_default()
Exemple #16
0
    def test_create_multiple_sc_with_different_pool_name(
        self, teardown_factory
    ):
        """
        This test function does below,
        *. Creates multiple Storage Classes with different pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """

        # Create 3 storageclasses, each with different pool name
        cbp_list = []
        sc_list = []
        for i in range(3):
            log.info(f"Creating cephblockpool")
            cbp_obj = helpers.create_ceph_block_pool()
            log.info(
                f"{cbp_obj.name} created successfully"
            )
            log.info(
                f"Creating a RBD storage class using {cbp_obj.name}"
            )
            cbp_list.append(cbp_obj)
            sc_obj = helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=cbp_obj.name,
                secret_name=self.rbd_secret_obj.name
            )

            log.info(
                f"StorageClass: {sc_obj.name} "
                f"created successfully using {cbp_obj.name}"
            )
            sc_list.append(sc_obj)
            teardown_factory(cbp_obj)
            teardown_factory(sc_obj)

        # Create PVCs using each SC
        pvc_list = []
        for i in range(3):
            log.info(f"Creating a PVC using {sc_list[i].name}")
            pvc_obj = helpers.create_pvc(sc_list[i].name)
            log.info(
                f"PVC: {pvc_obj.name} created successfully using "
                f"{sc_list[i].name}"
            )
            pvc_list.append(pvc_obj)
            teardown_factory(pvc_obj)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()

        # Create app pod and mount each PVC
        pod_list = []
        for i in range(3):
            log.info(f"Creating an app pod and mount {pvc_list[i].name}")
            pod_obj = helpers.create_pod(
                interface_type=constants.CEPHBLOCKPOOL,
                pvc_name=pvc_list[i].name,
            )
            log.info(
                f"{pod_obj.name} created successfully and "
                f"mounted {pvc_list[i].name}"
            )
            pod_list.append(pod_obj)
            teardown_factory(pod_obj)
            helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
            pod_obj.reload()

        # Run IO on each app pod for sometime
        for pod in pod_list:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io('fs', size='2G')

        for pod in pod_list:
            get_fio_rw_iops(pod)
def create_resources(resources, run_io=True):
    """
    Sanity validation - Create resources (FS and RBD) and run IO

    Args:
        resources (tuple): Lists of projects, secrets, pools,
            storageclasses, pvcs and pods
        run_io (bool): True for run IO, False otherwise

    """
    # Create resources and run IO for both FS and RBD
    # Unpack resources
    projects, secrets, pools, storageclasses, pvcs, pods = resources[:6]

    # Project
    projects.append(helpers.create_project())

    # Secrets
    secrets.append(helpers.create_secret(constants.CEPHBLOCKPOOL))
    secrets.append(helpers.create_secret(constants.CEPHFILESYSTEM))

    # Pools
    pools.append(helpers.create_ceph_block_pool())
    pools.append(helpers.get_cephfs_data_pool_name())

    # Storageclasses
    storageclasses.append(
        helpers.create_storage_class(interface_type=constants.CEPHBLOCKPOOL,
                                     interface_name=pools[0].name,
                                     secret_name=secrets[0].name))
    storageclasses.append(
        helpers.create_storage_class(interface_type=constants.CEPHFILESYSTEM,
                                     interface_name=pools[1],
                                     secret_name=secrets[1].name))

    # PVCs
    pvcs.append(
        helpers.create_pvc(sc_name=storageclasses[0].name,
                           namespace=projects[0].namespace))
    pvcs.append(
        helpers.create_pvc(sc_name=storageclasses[1].name,
                           namespace=projects[0].namespace))
    for pvc in pvcs:
        helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND)
        pvc.reload()

    # Pods
    pods.append(
        helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                           pvc_name=pvcs[0].name,
                           namespace=projects[0].namespace))
    pods.append(
        helpers.create_pod(interface_type=constants.CEPHFILESYSTEM,
                           pvc_name=pvcs[1].name,
                           namespace=projects[0].namespace))
    for pod in pods:
        helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
        pod.reload()

    if run_io:
        # Run IO
        for pod in pods:
            pod.run_io('fs', '1G')
        for pod in pods:
            fio_result = pod.get_fio_results()
            logger.info(f"IOPs after FIO for pod {pod.name}:")
            logger.info(
                f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
            logger.info(
                f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")