def health_check(self): """ Perform Ceph and cluster health checks """ ceph_cluster = CephCluster() assert ceph_health_check( namespace=config.ENV_DATA['cluster_namespace']) ceph_cluster.cluster_health_check(timeout=60)
class TestAddCapacity(ManageTest): """ Automates adding variable capacity to the cluster while IOs running """ def test_add_capacity(self): """ Test to add variable capacity to the OSD cluster while IOs running """ self.ceph_cluster = CephCluster() osd_size = storage_cluster.get_osd_size() result = storage_cluster.add_capacity(osd_size) pod = OCP(kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']) pod.wait_for_resource(timeout=300, condition=constants.STATUS_RUNNING, selector='app=rook-ceph-osd', resource_count=result * 3) self.ceph_cluster.cluster_health_check(timeout=1200)
def test_remove_mon_pod_from_cluster(self): """ To remove mon pod from the cluster after the I/O is performed on the pool and waiting for the operator to create a new mon pod on its own """ ceph_cluster = CephCluster() pods = ocp.OCP(kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']) list_mons = ceph_cluster.get_mons_from_cluster() assert len(list_mons) > 1, pytest.skip( "INVALID: Mon count should be more than one to delete.") self.pool_obj = create_ceph_block_pool() assert run_io_on_pool(self.pool_obj), 'Failed to run I/O on the pool' assert delete_cephblockpools([self.pool_obj]), 'Failed to delete pool' ceph_cluster.cluster_health_check(timeout=0) ceph_cluster.remove_mon_from_cluster() assert verify_mon_pod_up(pods), "Mon pods are not up and running state" ceph_cluster.cluster_health_check(timeout=60)
class Sanity: """ Class for cluster health and functional validations """ def __init__(self): """ Initializer for Sanity class - Init CephCluster() in order to set the cluster status before starting the tests """ self.pvc_objs = list() self.pod_objs = list() self.obc_objs = list() self.obj_data = "" self.ceph_cluster = CephCluster() def health_check(self, cluster_check=True, tries=20): """ Perform Ceph and cluster health checks """ wait_for_cluster_connectivity(tries=400) logger.info("Checking cluster and Ceph health") node.wait_for_nodes_status(timeout=300) ceph_health_check(namespace=config.ENV_DATA["cluster_namespace"], tries=tries) if cluster_check: self.ceph_cluster.cluster_health_check(timeout=60) def create_resources(self, pvc_factory, pod_factory, bucket_factory, rgw_bucket_factory, run_io=True): """ Sanity validation: Create resources - pods, OBCs (RGW and MCG), PVCs (FS and RBD) and run IO Args: pvc_factory (function): A call to pvc_factory function pod_factory (function): A call to pod_factory function bucket_factory (function): A call to bucket_factory function rgw_bucket_factory (function): A call to rgw_bucket_factory function run_io (bool): True for run IO, False otherwise """ logger.info( "Creating resources and running IO as a sanity functional validation" ) for interface in [constants.CEPHBLOCKPOOL, constants.CEPHFILESYSTEM]: pvc_obj = pvc_factory(interface) self.pvc_objs.append(pvc_obj) self.pod_objs.append(pod_factory(pvc=pvc_obj, interface=interface)) if run_io: for pod in self.pod_objs: pod.run_io("fs", "1G", runtime=30) for pod in self.pod_objs: get_fio_rw_iops(pod) if rgw_bucket_factory: self.obc_objs.extend(rgw_bucket_factory(1, "rgw-oc")) if bucket_factory: self.obc_objs.extend(bucket_factory(amount=1, interface="OC")) self.ceph_cluster.wait_for_noobaa_health_ok() def delete_resources(self): """ Sanity validation - Delete resources (pods, PVCs and OBCs) """ logger.info("Deleting resources as a sanity functional validation") for pod_obj in self.pod_objs: pod_obj.delete() for pod_obj in self.pod_objs: pod_obj.ocp.wait_for_delete(pod_obj.name) for pvc_obj in self.pvc_objs: pvc_obj.delete() for pvc_obj in self.pvc_objs: pvc_obj.ocp.wait_for_delete(pvc_obj.name) for obc_obj in self.obc_objs: obc_obj.delete(), f"OBC {obc_obj.name} still exists" @ignore_leftovers def create_pvc_delete(self, multi_pvc_factory, project=None): """ Creates and deletes all types of PVCs """ # Create rbd pvcs pvc_objs_rbd = create_pvcs( multi_pvc_factory=multi_pvc_factory, interface="CephBlockPool", project=project, status="", storageclass=None, ) # Create cephfs pvcs pvc_objs_cephfs = create_pvcs( multi_pvc_factory=multi_pvc_factory, interface="CephFileSystem", project=project, status="", storageclass=None, ) all_pvc_to_delete = pvc_objs_rbd + pvc_objs_cephfs # Check pvc status for pvc_obj in all_pvc_to_delete: helpers.wait_for_resource_state(resource=pvc_obj, state=constants.STATUS_BOUND, timeout=300) # Start deleting PVC delete_pvcs(all_pvc_to_delete) # Check PVCs are deleted for pvc_obj in all_pvc_to_delete: pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name) logger.info("All PVCs are deleted as expected") def obc_put_obj_create_delete(self, mcg_obj, bucket_factory): """ Creates bucket then writes, reads and deletes objects """ bucket_name = bucket_factory(amount=1, interface="OC")[0].name self.obj_data = "A string data" for i in range(0, 30): key = "Object-key-" + f"{i}" logger.info(f"Write, read and delete object with key: {key}") assert s3_put_object(mcg_obj, bucket_name, key, self.obj_data), f"Failed: Put object, {key}" assert s3_get_object(mcg_obj, bucket_name, key), f"Failed: Get object, {key}" assert s3_delete_object(mcg_obj, bucket_name, key), f"Failed: Delete object, {key}"
class Sanity: """ Class for cluster health and functional validations """ def __init__(self): """ Initializer for Sanity class - Init CephCluster() in order to set the cluster status before starting the tests """ self.pvc_objs = list() self.pod_objs = list() self.obj_data = "" self.ceph_cluster = CephCluster() def health_check(self, cluster_check=True, tries=20): """ Perform Ceph and cluster health checks """ wait_for_cluster_connectivity(tries=400) logger.info("Checking cluster and Ceph health") node.wait_for_nodes_status(timeout=300) ceph_health_check(namespace=config.ENV_DATA["cluster_namespace"], tries=tries) if cluster_check: self.ceph_cluster.cluster_health_check(timeout=60) def create_resources(self, pvc_factory, pod_factory, run_io=True): """ Sanity validation - Create resources (FS and RBD) and run IO Args: pvc_factory (function): A call to pvc_factory function pod_factory (function): A call to pod_factory function run_io (bool): True for run IO, False otherwise """ logger.info( "Creating resources and running IO as a sanity functional validation" ) for interface in [constants.CEPHBLOCKPOOL, constants.CEPHFILESYSTEM]: pvc_obj = pvc_factory(interface) self.pvc_objs.append(pvc_obj) self.pod_objs.append(pod_factory(pvc=pvc_obj, interface=interface)) if run_io: for pod in self.pod_objs: pod.run_io("fs", "1G", runtime=30) for pod in self.pod_objs: get_fio_rw_iops(pod) self.create_obc() self.verify_obc() def create_obc(self): """ OBC creation for RGW and Nooba """ if config.ENV_DATA["platform"] in constants.ON_PREM_PLATFORMS: obc_rgw = templating.load_yaml(constants.RGW_OBC_YAML) obc_rgw_data_yaml = tempfile.NamedTemporaryFile( mode="w+", prefix="obc_rgw_data", delete=False) templating.dump_data_to_temp_yaml(obc_rgw, obc_rgw_data_yaml.name) logger.info("Creating OBC for rgw") run_cmd(f"oc create -f {obc_rgw_data_yaml.name}", timeout=2400) self.obc_rgw = obc_rgw["metadata"]["name"] obc_nooba = templating.load_yaml(constants.MCG_OBC_YAML) obc_mcg_data_yaml = tempfile.NamedTemporaryFile(mode="w+", prefix="obc_mcg_data", delete=False) templating.dump_data_to_temp_yaml(obc_nooba, obc_mcg_data_yaml.name) logger.info("create OBC for mcg") run_cmd(f"oc create -f {obc_mcg_data_yaml.name}", timeout=2400) self.obc_mcg = obc_nooba["metadata"]["name"] def delete_obc(self): """ Clenaup OBC resources created above """ if config.ENV_DATA["platform"] in constants.ON_PREM_PLATFORMS: logger.info(f"Deleting rgw obc {self.obc_rgw}") obcrgw = OCP(kind="ObjectBucketClaim", resource_name=f"{self.obc_rgw}") run_cmd(f"oc delete obc/{self.obc_rgw}") obcrgw.wait_for_delete(resource_name=f"{self.obc_rgw}", timeout=300) logger.info(f"Deleting mcg obc {self.obc_mcg}") obcmcg = OCP(kind="ObjectBucketClaim", resource_name=f"{self.obc_mcg}") run_cmd(f"oc delete obc/{self.obc_mcg} -n " f"{defaults.ROOK_CLUSTER_NAMESPACE}") obcmcg.wait_for_delete(resource_name=f"{self.obc_mcg}", timeout=300) def verify_obc(self): """ OBC verification from external cluster perspective, we will check 2 OBCs """ self.ceph_cluster.wait_for_noobaa_health_ok() def delete_resources(self): """ Sanity validation - Delete resources (FS and RBD) """ logger.info("Deleting resources as a sanity functional validation") self.delete_obc() for pod_obj in self.pod_objs: pod_obj.delete() for pod_obj in self.pod_objs: pod_obj.ocp.wait_for_delete(pod_obj.name) for pvc_obj in self.pvc_objs: pvc_obj.delete() for pvc_obj in self.pvc_objs: pvc_obj.ocp.wait_for_delete(pvc_obj.name) @ignore_leftovers def create_pvc_delete(self, multi_pvc_factory, project=None): """ Creates and deletes all types of PVCs """ # Create rbd pvcs pvc_objs_rbd = create_pvcs( multi_pvc_factory=multi_pvc_factory, interface="CephBlockPool", project=project, status="", storageclass=None, ) # Create cephfs pvcs pvc_objs_cephfs = create_pvcs( multi_pvc_factory=multi_pvc_factory, interface="CephFileSystem", project=project, status="", storageclass=None, ) all_pvc_to_delete = pvc_objs_rbd + pvc_objs_cephfs # Check pvc status for pvc_obj in all_pvc_to_delete: helpers.wait_for_resource_state(resource=pvc_obj, state=constants.STATUS_BOUND, timeout=300) # Start deleting PVC delete_pvcs(all_pvc_to_delete) # Check PVCs are deleted for pvc_obj in all_pvc_to_delete: pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name) logger.info("All PVCs are deleted as expected") def obc_put_obj_create_delete(self, mcg_obj, bucket_factory): """ Creates bucket then writes, reads and deletes objects """ bucket_name = bucket_factory(amount=1, interface="OC")[0].name self.obj_data = "A string data" for i in range(0, 30): key = "Object-key-" + f"{i}" logger.info(f"Write, read and delete object with key: {key}") assert s3_put_object(mcg_obj, bucket_name, key, self.obj_data), f"Failed: Put object, {key}" assert s3_get_object(mcg_obj, bucket_name, key), f"Failed: Get object, {key}" assert s3_delete_object(mcg_obj, bucket_name, key), f"Failed: Delete object, {key}"
def validate_cluster(self, resources, instances): """ Perform cluster validation - nodes readiness, Ceph cluster health check and functional resources tests """ instances_names = list(instances.values()) assert ocp.wait_for_nodes_ready(instances_names), ( "Not all nodes reached status Ready" ) ceph_cluster = CephCluster() assert ceph_health_check( namespace=config.ENV_DATA['cluster_namespace'] ) ceph_cluster.cluster_health_check(timeout=60) # Create resources and run IO for both FS and RBD # Unpack resources projects, secrets, pools, storageclasses, pvcs, pods = resources[:6] # Project projects.append(helpers.create_project()) # Secrets secrets.append(helpers.create_secret(constants.CEPHBLOCKPOOL)) secrets.append(helpers.create_secret(constants.CEPHFILESYSTEM)) # Pools pools.append(helpers.create_ceph_block_pool()) pools.append(helpers.get_cephfs_data_pool_name()) # Storageclasses storageclasses.append( helpers.create_storage_class( interface_type=constants.CEPHBLOCKPOOL, interface_name=pools[0].name, secret_name=secrets[0].name ) ) storageclasses.append( helpers.create_storage_class( interface_type=constants.CEPHFILESYSTEM, interface_name=pools[1], secret_name=secrets[1].name ) ) # PVCs pvcs.append(helpers.create_pvc( sc_name=storageclasses[0].name, namespace=projects[0].namespace) ) pvcs.append(helpers.create_pvc( sc_name=storageclasses[1].name, namespace=projects[0].namespace) ) # Pods pods.append( helpers.create_pod( interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvcs[0].name, namespace=projects[0].namespace ) ) pods.append( helpers.create_pod( interface_type=constants.CEPHFILESYSTEM, pvc_name=pvcs[1].name, namespace=projects[0].namespace ) ) # Run IO for pod in pods: pod.run_io('fs', '1G') for pod in pods: fio_result = pod.get_fio_results() logger.info(f"IOPs after FIO for pod {pod.name}:") logger.info( f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}" ) logger.info( f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}" )
class Sanity: """ Class for cluster health and functional validations """ def __init__(self): """ Initializer for Sanity class - Init CephCluster() in order to set the cluster status before starting the tests """ self.pvc_objs = list() self.pod_objs = list() self.ceph_cluster = CephCluster() def health_check(self, cluster_check=True, tries=20): """ Perform Ceph and cluster health checks """ wait_for_cluster_connectivity(tries=400) logger.info("Checking cluster and Ceph health") node.wait_for_nodes_status(timeout=300) ceph_health_check(namespace=config.ENV_DATA['cluster_namespace'], tries=tries) if cluster_check: self.ceph_cluster.cluster_health_check(timeout=60) def create_resources(self, pvc_factory, pod_factory, run_io=True): """ Sanity validation - Create resources (FS and RBD) and run IO Args: pvc_factory (function): A call to pvc_factory function pod_factory (function): A call to pod_factory function run_io (bool): True for run IO, False otherwise """ logger.info("Creating resources and running IO as a sanity functional validation") for interface in [constants.CEPHBLOCKPOOL, constants.CEPHFILESYSTEM]: pvc_obj = pvc_factory(interface) self.pvc_objs.append(pvc_obj) self.pod_objs.append(pod_factory(pvc=pvc_obj, interface=interface)) if run_io: for pod in self.pod_objs: pod.run_io('fs', '1G') for pod in self.pod_objs: get_fio_rw_iops(pod) def delete_resources(self): """ Sanity validation - Delete resources (FS and RBD) """ logger.info("Deleting resources as a sanity functional validation") for pod_obj in self.pod_objs: pod_obj.delete() for pod_obj in self.pod_objs: pod_obj.ocp.wait_for_delete(pod_obj.name) for pvc_obj in self.pvc_objs: pvc_obj.delete() for pvc_obj in self.pvc_objs: pvc_obj.ocp.wait_for_delete(pvc_obj.name)