示例#1
0
文件: ocs_288.py 项目: 697127/ocs-ci
def create_storageclass_cephfs():
    """
    Function for creating CephFs storageclass
    """
    helpers.create_storage_class(constants.CEPHFILESYSTEM,
                                 helpers.get_cephfs_data_pool_name(),
                                 CEPHFS_SECRET_OBJ.name)

    return True
示例#2
0
def setup(self):
    """
    Setting up storage class
    """

    self.sc_obj_retain = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=self.cbp_obj.name,
        secret_name=self.rbd_secret_obj.name,
        reclaim_policy=constants.RECLAIM_POLICY_RETAIN)
    self.sc_obj_delete = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=self.cbp_obj.name,
        secret_name=self.rbd_secret_obj.name,
        reclaim_policy=constants.RECLAIM_POLICY_DELETE)
def setup(self):
    """
    Creates the resources needed for the type of interface to be used.

    For CephBlockPool interface: Creates CephBlockPool, Secret and StorageClass
    For CephFilesystem interface: Creates Secret and StorageClass
    """
    logger.info(f"Creating resources for {self.interface_type} interface")

    self.interface_name = None
    if self.interface_type == constants.CEPHBLOCKPOOL:
        self.cbp_obj = helpers.create_ceph_block_pool()
        assert self.cbp_obj, f"Failed to create block pool"
        self.interface_name = self.cbp_obj.name

    elif self.interface_type == constants.CEPHFILESYSTEM:
        self.interface_name = helpers.get_cephfs_data_pool_name()

    self.secret_obj = helpers.create_secret(interface_type=self.interface_type)
    assert self.secret_obj, f"Failed to create secret"

    self.sc_obj = helpers.create_storage_class(
        interface_type=self.interface_type,
        interface_name=self.interface_name,
        secret_name=self.secret_obj.name
    )
    assert self.sc_obj, f"Failed to create storage class"
示例#4
0
    def factory(interface=constants.CEPHBLOCKPOOL,
                secret=None,
                custom_data=None):
        """
        Args:
            interface (str): CephBlockPool or CephFileSystem. This decides
                whether a RBD based or CephFS resource is created.
                RBD is default.
            secret (object): An OCS instance for the secret.
            custom_data (dict): If provided then storageclass object is created
                by using these data. Parameters `block_pool` and `secret`
                are not useds but references are set if provided.

        Returns:
            object: helpers.create_storage_class instance with links to
                block_pool and secret.
        """
        if custom_data:
            sc_obj = helpers.create_resource(**custom_data, wait=False)
        else:
            secret = secret or secret_factory(interface=interface)
            ceph_pool = ceph_pool_factory(interface)
            interface_name = ceph_pool.name

            sc_obj = helpers.create_storage_class(
                interface_type=interface,
                interface_name=interface_name,
                secret_name=secret.name)
            assert sc_obj, f"Failed to create {interface} storage class"
            sc_obj.ceph_pool = ceph_pool
            sc_obj.secret = secret

        instances.append(sc_obj)
        return sc_obj
示例#5
0
    def test_ocs_347(self, resources):
        pod, pvc, storageclass = resources

        log.info("Creating RBD StorageClass")
        storageclass.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=self.cbp_obj.name,
                secret_name=self.rbd_secret_obj.name,
            )
        )
        log.info("Creating a PVC")
        pvc.append(helpers.create_pvc(sc_name=storageclass[0].name))
        for pvc_obj in pvc:
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
        log.info(
            f"Creating a pod on with pvc {pvc[0].name}"
        )
        pod_obj = helpers.create_pod(
            interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvc[0].name,
            pod_dict_path=constants.NGINX_POD_YAML
        )
        pod.append(pod_obj)
        helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
        pod_obj.reload()
示例#6
0
def create_rbd_storageclass(request):
    """
    Create an RBD storage class
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete the RBD storage class
        """
        if class_instance.sc_obj.get():
            class_instance.sc_obj.delete()
            class_instance.sc_obj.ocp.wait_for_delete(
                class_instance.sc_obj.name)

    request.addfinalizer(finalizer)

    if not hasattr(class_instance, 'reclaim_policy'):
        class_instance.reclaim_policy = constants.RECLAIM_POLICY_DELETE

    class_instance.sc_obj = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=class_instance.cbp_obj.name,
        secret_name=class_instance.rbd_secret_obj.name,
        reclaim_policy=class_instance.reclaim_policy)
    assert class_instance.sc_obj, "Failed to create storage class"
示例#7
0
    def dynamic_pvc_base(self, interface_type, reclaim_policy):
        """
        Base function for Dynamic PVC creation tests
        Fetches the worker nodes name list, creates StorageClass and PVC
        """
        self.interface_type = interface_type
        self.reclaim_policy = reclaim_policy
        self.worker_nodes_list = helpers.get_worker_nodes()

        if self.interface_type == constants.CEPHBLOCKPOOL:
            self.interface_name = self.cbp_obj.name
            self.secret_name = self.rbd_secret_obj.name

        elif self.interface_type == constants.CEPHFILESYSTEM:
            self.interface_name = helpers.get_cephfs_data_pool_name()
            self.secret_name = self.cephfs_secret_obj.name

        logger.info(
            f"Creating Storage Class with reclaimPolicy: {self.reclaim_policy}"
        )
        self.sc_obj = helpers.create_storage_class(
            interface_type=self.interface_type,
            interface_name=self.interface_name,
            secret_name=self.secret_name,
            reclaim_policy=self.reclaim_policy)

        logger.info(f"Creating PVC with accessModes: {self.access_mode}")
        self.pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name,
                                          namespace=self.namespace,
                                          size=self.pvc_size,
                                          wait=True,
                                          access_mode=self.access_mode)
示例#8
0
    def test_ocs_347(self):
        global PVC, STORAGE_CLASS
        log.info("Creating RBD StorageClass")
        STORAGE_CLASS = helpers.create_storage_class(constants.CEPHBLOCKPOOL,
                                                     'rbd', SECRET.name)

        log.info("Creating a PVC")
        PVC = helpers.create_pvc(STORAGE_CLASS.name)
示例#9
0
文件: ocs_288.py 项目: 697127/ocs-ci
def create_multiple_rbd_storageclasses(count=1):
    """
    Function for creating multiple rbd storageclass
    By default if we haven't passed count function will create only one
    storageclass because by default count for creating sc is one

    Args:
         count (int): count specify no of storageclass want to create by
            default count is set to one i.e it will create one sc
    """
    for sc_count in range(count):
        log.info("Creating CephBlockPool")
        pool_obj = helpers.create_ceph_block_pool()
        helpers.create_storage_class(constants.CEPHBLOCKPOOL,
                                     interface_name=pool_obj.name,
                                     secret_name=RBD_SECRET_OBJ.name)

    return True
示例#10
0
    def test_create_multiple_sc_with_same_pool_name(self, interface_type,
                                                    resources):
        """
        This test function does below,
        *. Creates multiple Storage Classes with same pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """
        # Unpack resources
        pods, pvcs, storageclasses = resources

        # Create 3 Storage Classes with same pool name
        if interface_type == constants.CEPHBLOCKPOOL:
            secret = self.rbd_secret_obj.name
            interface_name = self.cbp_obj.name
        else:
            interface_type = constants.CEPHFILESYSTEM
            secret = self.cephfs_secret_obj.name
            interface_name = helpers.get_cephfs_data_pool_name()
        for i in range(3):
            log.info(f"Creating a {interface_type} storage class")
            storageclasses.append(
                helpers.create_storage_class(interface_type=interface_type,
                                             interface_name=interface_name,
                                             secret_name=secret))
            log.info(f"{interface_type}StorageClass: {storageclasses[i].name} "
                     f"created successfully")

        # Create PVCs using each SC
        for i in range(3):
            log.info(f"Creating a PVC using {storageclasses[i].name}")
            pvcs.append(helpers.create_pvc(storageclasses[i].name))
        for pvc in pvcs:
            helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND)
            pvc.reload()

        # Create app pod and mount each PVC
        for i in range(3):
            log.info(f"Creating an app pod and mount {pvcs[i].name}")
            pods.append(
                helpers.create_pod(interface_type=interface_type,
                                   pvc_name=pvcs[i].name,
                                   namespace=defaults.ROOK_CLUSTER_NAMESPACE))
            for pod in pods:
                helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
                pod.reload()
            log.info(f"{pods[i].name} created successfully and "
                     f"mounted {pvcs[i].name}")

        # Run IO on each app pod for sometime
        for pod in pods:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io('fs', size='2G')

        for pod in pods:
            get_fio_rw_iops(pod)
def test_fixture(request):
    """
    Setup and teardown
    * The setup will deploy openshift-logging in the cluster
    * The teardown will uninstall cluster-logging from the cluster
    """

    def finalizer():
        teardown(cbp_obj, sc_obj)

    request.addfinalizer(finalizer)

    # Deploys elastic-search operator on the project openshift-operators-redhat
    ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
    assert ocp_logging_obj.create_elasticsearch_operator_group(
        yaml_file=constants.EO_OG_YAML,
        resource_name='openshift-operators-redhat'
    )
    assert ocp_logging_obj.set_rbac(
        yaml_file=constants.EO_RBAC_YAML, resource_name='prometheus-k8s'
    )
    assert ocp_logging_obj.create_elasticsearch_subscription(constants.EO_SUB_YAML)

    # Deploys cluster-logging operator on the project openshift-logging
    ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
    assert ocp_logging_obj.create_clusterlogging_operator_group(
        yaml_file=constants.CL_OG_YAML
    )
    assert ocp_logging_obj.create_clusterlogging_subscription(
        yaml_file=constants.CL_SUB_YAML
    )

    # Creates storage class
    cbp_obj = helpers.create_ceph_block_pool()
    sc_obj = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=cbp_obj.name,
        secret_name=constants.DEFAULT_SECRET,
        reclaim_policy="Delete"
    )
    assert ocp_logging_obj.create_instance_in_clusterlogging(sc_name=sc_obj.name)

    # Check the health of the cluster-logging
    assert ocp_logging_obj.check_health_of_clusterlogging()

    csv_obj = CSV(
        kind=constants.CLUSTER_SERVICE_VERSION, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
    )

    get_version = csv_obj.get(out_yaml_format=True)
    for i in range(len(get_version['items'])):
        if '4.2.0' in get_version['items'][i]['metadata']['name']:
            logger.info("The version of operators is 4.2.0")
            logger.info(get_version['items'][i]['metadata']['name'])
        else:
            logger.error("The version is not 4.2.0")
    def test_create_storage_class_with_wrong_provisioner(self, interface):
        """
        Test function which creates Storage Class with
        wrong provisioner and verifies PVC status
        """
        log.info(f"Creating a {interface} storage class")
        if interface == "RBD":
            interface_type = constants.CEPHBLOCKPOOL
            secret = self.rbd_secret_obj.name
            interface_name = self.cbp_obj.name
        else:
            interface_type = constants.CEPHFILESYSTEM
            secret = self.cephfs_secret_obj.name
            interface_name = helpers.get_cephfs_data_pool_name()
        sc_obj = helpers.create_storage_class(
            interface_type=interface_type,
            interface_name=interface_name,
            secret_name=secret,
            provisioner=constants.AWS_EFS_PROVISIONER)
        log.info(
            f"{interface}Storage class: {sc_obj.name} created successfully")

        # Create PVC
        pvc_obj = helpers.create_pvc(sc_name=sc_obj.name, do_reload=False)

        # Check PVC status
        pvc_output = pvc_obj.get()
        pvc_status = pvc_output['status']['phase']
        log.info(f"Status of PVC {pvc_obj.name} after creation: {pvc_status}")
        log.info(f"Waiting for status '{constants.STATUS_PENDING}' "
                 f"for 20 seconds (it shouldn't change)")

        pvc_obj.ocp.wait_for_resource(resource_name=pvc_obj.name,
                                      condition=constants.STATUS_PENDING,
                                      timeout=20,
                                      sleep=5)
        # Check PVC status again after 20 seconds
        pvc_output = pvc_obj.get()
        pvc_status = pvc_output['status']['phase']
        assert_msg = (
            f"PVC {pvc_obj.name} is not in {constants.STATUS_PENDING} "
            f"status")
        assert pvc_status == constants.STATUS_PENDING, assert_msg
        log.info(f"Status of {pvc_obj.name} after 20 seconds: {pvc_status}")

        # Delete PVC
        log.info(f"Deleting PVC: {pvc_obj.name}")
        assert pvc_obj.delete()
        log.info(f"PVC {pvc_obj.name} delete successfully")

        # Delete Storage Class
        log.info(f"Deleting Storageclass: {sc_obj.name}")
        assert sc_obj.delete()
        log.info(f"Storage Class: {sc_obj.name} deleted successfully")
    def dynamic_pvc_base(self, interface_type, reclaim_policy):
        """
        Base function for Dynamic PVC creation tests
        Fetches the worker nodes name list, creates StorageClass and PVC
        """
        self.interface_type = interface_type
        self.reclaim_policy = reclaim_policy
        self.worker_nodes_list = helpers.get_worker_nodes()

        if self.interface_type == constants.CEPHBLOCKPOOL:
            self.interface_name = self.cbp_obj.name
            self.secret_name = self.rbd_secret_obj.name

        elif self.interface_type == constants.CEPHFILESYSTEM:
            self.interface_name = helpers.get_cephfs_data_pool_name()
            self.secret_name = self.cephfs_secret_obj.name

        logger.info(
            f"Creating Storage Class with reclaimPolicy: {self.reclaim_policy}"
        )
        self.sc_obj = helpers.create_storage_class(
            interface_type=self.interface_type,
            interface_name=self.interface_name,
            secret_name=self.secret_name,
            reclaim_policy=self.reclaim_policy
        )

        logger.info(f"Creating PVC with accessModes: {self.access_mode}")
        self.pvc_obj = helpers.create_pvc(
            sc_name=self.sc_obj.name, namespace=self.namespace,
            size=self.pvc_size, access_mode=self.access_mode
        )
        helpers.wait_for_resource_state(self.pvc_obj, constants.STATUS_BOUND)
        self.pvc_obj.reload()

        logger.info(
            f"Creating first pod on node: {self.worker_nodes_list[0]}"
            f" with pvc {self.pvc_obj.name}"
        )
        self.pod_obj1 = helpers.create_pod(
            interface_type=self.interface_type, pvc_name=self.pvc_obj.name,
            namespace=self.namespace, node_name=self.worker_nodes_list[0],
            pod_dict_path=constants.NGINX_POD_YAML
        )
        helpers.wait_for_resource_state(self.pod_obj1, constants.STATUS_RUNNING)
        self.pod_obj1.reload()
示例#14
0
    def factory(
        interface=constants.CEPHBLOCKPOOL,
        secret=None,
        custom_data=None,
        sc_name=None,
        reclaim_policy=constants.RECLAIM_POLICY_DELETE
    ):
        """
        Args:
            interface (str): CephBlockPool or CephFileSystem. This decides
                whether a RBD based or CephFS resource is created.
                RBD is default.
            secret (object): An OCS instance for the secret.
            custom_data (dict): If provided then storageclass object is created
                by using these data. Parameters `block_pool` and `secret`
                are not useds but references are set if provided.
            sc_name (str): Name of the storage class

        Returns:
            object: helpers.create_storage_class instance with links to
                block_pool and secret.
        """
        if custom_data:
            sc_obj = helpers.create_resource(**custom_data)
        else:
            secret = secret or secret_factory(interface=interface)
            ceph_pool = ceph_pool_factory(interface)
            if interface == constants.CEPHBLOCKPOOL:
                interface_name = ceph_pool.name
            elif interface == constants.CEPHFILESYSTEM:
                interface_name = helpers.get_cephfs_data_pool_name()

            sc_obj = helpers.create_storage_class(
                interface_type=interface,
                interface_name=interface_name,
                secret_name=secret.name,
                sc_name=sc_name,
                reclaim_policy=reclaim_policy
            )
            assert sc_obj, f"Failed to create {interface} storage class"
            sc_obj.ceph_pool = ceph_pool
            sc_obj.secret = secret

        instances.append(sc_obj)
        return sc_obj
示例#15
0
def test_fixture(request):
    """
    Setup and teardown
    * The setup will deploy openshift-logging in the cluster
    * The teardown will uninstall cluster-logging from the cluster
    """
    def finalizer():
        teardown(cbp_obj, sc_obj)

    request.addfinalizer(finalizer)

    # Deploys elastic-search operator on the project openshift-operators-redhat
    ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
    assert ocp_logging_obj.create_elasticsearch_operator_group(
        yaml_file=constants.EO_OG_YAML,
        resource_name='openshift-operators-redhat')
    assert ocp_logging_obj.set_rbac(yaml_file=constants.EO_RBAC_YAML,
                                    resource_name='prometheus-k8s')
    assert ocp_logging_obj.create_elasticsearch_subscription(
        constants.EO_SUB_YAML)

    # Deploys cluster-logging operator on the project openshift-logging
    ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
    assert ocp_logging_obj.create_clusterlogging_operator_group(
        yaml_file=constants.CL_OG_YAML)
    assert ocp_logging_obj.create_clusterlogging_subscription(
        yaml_file=constants.CL_SUB_YAML)

    # Creates storage class
    cbp_obj = helpers.create_ceph_block_pool()
    sc_obj = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=cbp_obj.name,
        secret_name=constants.DEFAULT_SECRET,
        reclaim_policy="Delete")
    assert ocp_logging_obj.create_instance_in_clusterlogging(
        sc_name=sc_obj.name)

    # Check the health of the cluster-logging
    assert ocp_logging_obj.check_health_of_clusterlogging()
示例#16
0
def ripsaw(request):
    # Create Secret and Pool
    secret = helpers.create_secret(constants.CEPHBLOCKPOOL)
    pool = helpers.create_ceph_block_pool()

    # Create storage class
    log.info("Creating a Storage Class")
    sc = helpers.create_storage_class(sc_name='pgsql-workload',
                                      interface_type=constants.CEPHBLOCKPOOL,
                                      secret_name=secret.name,
                                      interface_name=pool.name)
    # Create RipSaw Operator
    ripsaw = RipSaw()

    def teardown():
        ripsaw.cleanup()
        sc.delete()
        secret.delete()
        pool.delete()

    request.addfinalizer(teardown)
    return ripsaw
示例#17
0
def create_cephfs_storageclass(request):
    """
    Create a CephFS storage class
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete the CephFS storage class
        """
        if class_instance.sc_obj.get():
            class_instance.sc_obj.delete()
            class_instance.sc_obj.ocp.wait_for_delete(
                class_instance.sc_obj.name)

    request.addfinalizer(finalizer)

    class_instance.sc_obj = helpers.create_storage_class(
        interface_type=constants.CEPHFILESYSTEM,
        interface_name=helpers.get_cephfs_data_pool_name(),
        secret_name=class_instance.cephfs_secret_obj.name)
    assert class_instance.sc_obj, f"Failed to create storage class"
示例#18
0
    def test_ocs_347(self, resources):
        pod, pvc, storageclass = resources

        log.info("Creating RBD StorageClass")
        storageclass.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=self.cbp_obj.name,
                secret_name=self.rbd_secret_obj.name,
            ))
        log.info("Creating a PVC")
        pvc.append(
            helpers.create_pvc(
                sc_name=storageclass[0].name,
                wait=True,
            ))
        log.info(f"Creating a pod on with pvc {pvc[0].name}")
        pod.append(
            helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                               pvc_name=pvc[0].name,
                               desired_status=constants.STATUS_RUNNING,
                               wait=True,
                               pod_dict_path=constants.NGINX_POD_YAML))
示例#19
0
    def test_create_multiple_sc_with_different_pool_name(
        self, teardown_factory
    ):
        """
        This test function does below,
        *. Creates multiple Storage Classes with different pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """

        # Create 3 storageclasses, each with different pool name
        cbp_list = []
        sc_list = []
        for i in range(3):
            log.info(f"Creating cephblockpool")
            cbp_obj = helpers.create_ceph_block_pool()
            log.info(
                f"{cbp_obj.name} created successfully"
            )
            log.info(
                f"Creating a RBD storage class using {cbp_obj.name}"
            )
            cbp_list.append(cbp_obj)
            sc_obj = helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=cbp_obj.name,
                secret_name=self.rbd_secret_obj.name
            )

            log.info(
                f"StorageClass: {sc_obj.name} "
                f"created successfully using {cbp_obj.name}"
            )
            sc_list.append(sc_obj)
            teardown_factory(cbp_obj)
            teardown_factory(sc_obj)

        # Create PVCs using each SC
        pvc_list = []
        for i in range(3):
            log.info(f"Creating a PVC using {sc_list[i].name}")
            pvc_obj = helpers.create_pvc(sc_list[i].name)
            log.info(
                f"PVC: {pvc_obj.name} created successfully using "
                f"{sc_list[i].name}"
            )
            pvc_list.append(pvc_obj)
            teardown_factory(pvc_obj)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()

        # Create app pod and mount each PVC
        pod_list = []
        for i in range(3):
            log.info(f"Creating an app pod and mount {pvc_list[i].name}")
            pod_obj = helpers.create_pod(
                interface_type=constants.CEPHBLOCKPOOL,
                pvc_name=pvc_list[i].name,
            )
            log.info(
                f"{pod_obj.name} created successfully and "
                f"mounted {pvc_list[i].name}"
            )
            pod_list.append(pod_obj)
            teardown_factory(pod_obj)
            helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
            pod_obj.reload()

        # Run IO on each app pod for sometime
        for pod in pod_list:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io('fs', size='2G')

        for pod in pod_list:
            get_fio_rw_iops(pod)
示例#20
0
    def test_rbd_based_rwo_pvc(self, reclaim_policy):
        """
        Verifies RBD Based RWO Dynamic PVC creation with Reclaim policy set to
        Delete/Retain

        Steps:
        1. Create Storage Class with reclaimPolicy: Delete/Retain
        2. Create PVC with 'accessModes' 'ReadWriteOnce'
        3. Create two pods using same PVC
        4. Run IO on first pod
        5. Verify second pod is not getting into Running state
        6. Delete first pod
        7. Verify second pod is in Running state
        8. Verify usage of volume in second pod is matching with usage in
           first pod
        9. Run IO on second pod
        10. Delete second pod
        11. Delete PVC
        12. Verify PV associated with deleted PVC is also deleted/released
        """
        # Create Storage Class with reclaimPolicy: Delete
        sc_obj = helpers.create_storage_class(
            interface_type=constants.CEPHBLOCKPOOL,
            interface_name=self.cbp_obj.name,
            secret_name=self.rbd_secret_obj.name,
            reclaim_policy=reclaim_policy
        )

        # Create PVC with 'accessModes' 'ReadWriteOnce'
        pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML)
        pvc_data['metadata']['name'] = helpers.create_unique_resource_name(
            'test', 'pvc'
        )
        pvc_data['metadata']['namespace'] = self.namespace
        pvc_data['spec']['storageClassName'] = sc_obj.name
        pvc_data['spec']['accessModes'] = ['ReadWriteOnce']
        pvc_obj = PVC(**pvc_data)
        pvc_obj.create()

        # Create first pod
        log.info(f"Creating two pods which use PVC {pvc_obj.name}")
        pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML)
        pod_data['metadata']['name'] = helpers.create_unique_resource_name(
            'test', 'pod'
        )
        pod_data['metadata']['namespace'] = self.namespace
        pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_obj.name

        pod_obj = Pod(**pod_data)
        pod_obj.create()
        assert helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)

        node_pod1 = pod_obj.get()['spec']['nodeName']

        # Create second pod
        # Try creating pod until it is on a different node than first pod
        for retry in range(1, 6):
            pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML)
            pod_data['metadata']['name'] = helpers.create_unique_resource_name(
                'test', 'pod'
            )
            pod_data['metadata']['namespace'] = self.namespace
            pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_obj.name
            pod_obj2 = Pod(**pod_data)
            pod_obj2.create()
            assert helpers.wait_for_resource_state(pod_obj2, constants.STATUS_PENDING)

            node_pod2 = pod_obj2.get()['spec']['nodeName']
            if node_pod1 != node_pod2:
                break
            log.info(
                f"Both pods are on same node. Deleting second pod and "
                f"creating another pod. Retry count:{retry}"
            )
            pod_obj2.delete()
            if retry == 5:
                raise UnexpectedBehaviour(
                    "Second pod is always created on same node as of first "
                    "pod even after trying 5 times."
                )

        # Run IO on first pod
        log.info(f"Running IO on first pod {pod_obj.name}")
        pod_obj.run_io('fs', '1G')
        logging.info(f"Waiting for IO results from pod {pod_obj.name}")
        fio_result = pod_obj.get_fio_results()
        logging.info("IOPs after FIO:")
        logging.info(
            f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
        )
        logging.info(
            f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
        )

        # Fetch usage details
        mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
        mount_point = mount_point.split()
        usage = mount_point[mount_point.index('/var/lib/www/html') - 1]

        # Verify that second pod is not getting into Running state. Check it
        # for some period of time.
        try:
            assert not pod_obj2.ocp.wait_for_resource(
                condition='Running', resource_name=pod_obj2.name,
            ), "Unexpected: Second pod is in Running state"
        except TimeoutExpiredError:
            log.info(
                f"Verified: Second pod {pod_obj2.name} is not in "
                f"Running state"
            )

        # Delete first pod
        pod_obj.delete(wait=True)

        # Verify pod is deleted
        try:
            pod_obj.get()
            raise UnexpectedBehaviour(
                f"First pod {pod_obj.name} is not deleted."
            )
        except CommandFailed as exp:
            assert "not found" in str(exp), (
                "Failed to fetch pod details"
            )
            log.info(f"First pod {pod_obj.name} is deleted.")

        # Wait for second pod to be in Running state
        try:
            pod_obj2.ocp.wait_for_resource(
                condition='Running', resource_name=pod_obj2.name, timeout=180
            )
        except TimeoutExpiredError as exp:
            raise TimeoutExpiredError(
                f"Second pod {pod_obj2.name} is not in Running state "
                f"after deleting first pod."
            ) from exp
        log.info(
            f"Second pod {pod_obj2.name} is in Running state after "
            f"deleting the first pod."
        )

        # Verify that volume usage in second pod is matching with the usage in
        # first pod
        mount_point = pod_obj2.exec_cmd_on_pod(command="df -kh")
        mount_point = mount_point.split()
        usage_re = mount_point[mount_point.index('/var/lib/www/html') - 1]
        assert usage_re == usage, (
            "Use percentage in new pod is not matching with old pod"
        )

        # Run IO on second pod
        log.info(f"Running IO on second pod {pod_obj2.name}")
        pod_obj2.run_io('fs', '1G')
        logging.info(f"Waiting for IO results from pod {pod_obj2.name}")
        fio_result = pod_obj2.get_fio_results()
        logging.info("IOPs after FIO:")
        logging.info(
            f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
        )
        logging.info(
            f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
        )

        # Delete second pod
        pod_obj2.delete()

        # Verify pod is deleted
        try:
            pod_obj2.get()
            raise UnexpectedBehaviour(
                f"Second pod {pod_obj2.name} is not deleted."
            )
        except CommandFailed as exp:
            assert "not found" in str(exp), (
                "Failed to fetch pod details"
            )
            log.info(f"Second pod {pod_obj2.name} is deleted.")

        # Get PV name
        pvc_obj.reload()
        pv_name = pvc_obj.backed_pv

        # Delete PVC
        pvc_obj.delete()

        # Verify PVC is deleted
        try:
            pvc_obj.get()
            raise UnexpectedBehaviour(
                f"PVC {pvc_obj.name} is not deleted."
            )
        except CommandFailed as exp:
            assert "not found" in str(exp), (
                "Failed to verify PVC deletion."
            )
            log.info(f"PVC {pvc_obj.name} is deleted.")

        pv_obj = OCP(
            kind=constants.PV, namespace=self.namespace
        )

        if reclaim_policy == "Delete":
            # Verify PV is deleted
            for pv_info in TimeoutSampler(
                    30, 2, pv_obj.get, out_yaml_format=False
            ):
                if pv_name not in pv_info:
                    break
                log.warning(
                    f"PV {pv_name} exists after deleting PVC {pvc_obj.name}. "
                    f"Checking again."
                )

            # TODO: Verify PV using ceph toolbox. PV should be deleted.
            # Blocked by bz 1723656

        elif reclaim_policy == "Retain":
            # Wait for PV to be in Released state
            assert pv_obj.wait_for_resource(
                condition='Released', resource_name=pv_name
            )
            log.info(f"PV {pv_name} is in Released state")

            # TODO: Delete PV from backend and verify
            # Blocked by bz 1723656
            pv_obj.delete(resource_name=pv_name)

        # Delete Storage Class
        sc_obj.delete()
示例#21
0
    def validate_cluster(self, resources, instances):
        """
        Perform cluster validation - nodes readiness, Ceph cluster health
        check and functional resources tests
        """
        instances_names = list(instances.values())
        assert ocp.wait_for_nodes_ready(instances_names), (
            "Not all nodes reached status Ready"
        )

        ceph_cluster = CephCluster()
        assert ceph_health_check(
            namespace=config.ENV_DATA['cluster_namespace']
        )
        ceph_cluster.cluster_health_check(timeout=60)

        # Create resources and run IO for both FS and RBD
        # Unpack resources
        projects, secrets, pools, storageclasses, pvcs, pods = resources[:6]

        # Project
        projects.append(helpers.create_project())

        # Secrets
        secrets.append(helpers.create_secret(constants.CEPHBLOCKPOOL))
        secrets.append(helpers.create_secret(constants.CEPHFILESYSTEM))

        # Pools
        pools.append(helpers.create_ceph_block_pool())
        pools.append(helpers.get_cephfs_data_pool_name())

        # Storageclasses
        storageclasses.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=pools[0].name,
                secret_name=secrets[0].name
            )
        )
        storageclasses.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHFILESYSTEM,
                interface_name=pools[1],
                secret_name=secrets[1].name
            )
        )

        # PVCs
        pvcs.append(helpers.create_pvc(
            sc_name=storageclasses[0].name, namespace=projects[0].namespace)
        )
        pvcs.append(helpers.create_pvc(
            sc_name=storageclasses[1].name, namespace=projects[0].namespace)
        )

        # Pods
        pods.append(
            helpers.create_pod(
                interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvcs[0].name,
                namespace=projects[0].namespace
            )
        )
        pods.append(
            helpers.create_pod(
                interface_type=constants.CEPHFILESYSTEM, pvc_name=pvcs[1].name,
                namespace=projects[0].namespace
            )
        )

        # Run IO
        for pod in pods:
            pod.run_io('fs', '1G')
        for pod in pods:
            fio_result = pod.get_fio_results()
            logger.info(f"IOPs after FIO for pod {pod.name}:")
            logger.info(
                f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
            )
            logger.info(
                f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
            )
示例#22
0
def create_resources(resources, run_io=True):
    """
    Sanity validation - Create resources (FS and RBD) and run IO

    Args:
        resources (tuple): Lists of projects, secrets, pools,
            storageclasses, pvcs and pods
        run_io (bool): True for run IO, False otherwise

    """
    # Create resources and run IO for both FS and RBD
    # Unpack resources
    projects, secrets, pools, storageclasses, pvcs, pods = resources[:6]

    # Project
    projects.append(helpers.create_project())

    # Secrets
    secrets.append(helpers.create_secret(constants.CEPHBLOCKPOOL))
    secrets.append(helpers.create_secret(constants.CEPHFILESYSTEM))

    # Pools
    pools.append(helpers.create_ceph_block_pool())
    pools.append(helpers.get_cephfs_data_pool_name())

    # Storageclasses
    storageclasses.append(
        helpers.create_storage_class(interface_type=constants.CEPHBLOCKPOOL,
                                     interface_name=pools[0].name,
                                     secret_name=secrets[0].name))
    storageclasses.append(
        helpers.create_storage_class(interface_type=constants.CEPHFILESYSTEM,
                                     interface_name=pools[1],
                                     secret_name=secrets[1].name))

    # PVCs
    pvcs.append(
        helpers.create_pvc(sc_name=storageclasses[0].name,
                           namespace=projects[0].namespace))
    pvcs.append(
        helpers.create_pvc(sc_name=storageclasses[1].name,
                           namespace=projects[0].namespace))
    for pvc in pvcs:
        helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND)
        pvc.reload()

    # Pods
    pods.append(
        helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                           pvc_name=pvcs[0].name,
                           namespace=projects[0].namespace))
    pods.append(
        helpers.create_pod(interface_type=constants.CEPHFILESYSTEM,
                           pvc_name=pvcs[1].name,
                           namespace=projects[0].namespace))
    for pod in pods:
        helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
        pod.reload()

    if run_io:
        # Run IO
        for pod in pods:
            pod.run_io('fs', '1G')
        for pod in pods:
            fio_result = pod.get_fio_results()
            logger.info(f"IOPs after FIO for pod {pod.name}:")
            logger.info(
                f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
            logger.info(
                f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")