def test_new_sc_new_rbd_pool( self, replica, compression, volume_binding_mode, pvc_status, storageclass_factory, pvc_factory, pod_factory, ): """ This test function does below, *. Creates Storage Class with creating new rbd pool *. Creates PVCs using new Storage Class *. Mount PVC to an app pod *. Run IO on an app pod """ interface_type = constants.CEPHBLOCKPOOL sc_obj = storageclass_factory( interface=interface_type, new_rbd_pool=True, replica=replica, compression=compression, volume_binding_mode=volume_binding_mode, ) log.info(f"Creating a PVC using {sc_obj.name}") pvc_obj = pvc_factory(interface=interface_type, storageclass=sc_obj, size=10, status=pvc_status) log.info(f"PVC: {pvc_obj.name} created successfully using " f"{sc_obj.name}") # Create app pod and mount each PVC log.info(f"Creating an app pod and mount {pvc_obj.name}") pod_obj = pod_factory(interface=interface_type, pvc=pvc_obj) log.info( f"{pod_obj.name} created successfully and mounted {pvc_obj.name}") # Run IO on each app pod for sometime log.info(f"Running FIO on {pod_obj.name}") pod_obj.run_io( "fs", size="1G", rate="1500m", runtime=60, buffer_compress_percentage=60, buffer_pattern="0xdeadface", bs="8K", jobs=5, readwrite="readwrite", ) cluster_used_space = get_percent_used_capacity() log.info(f"Cluster used space with replica size {replica}, " f"compression mode {compression}={cluster_used_space}") cbp_name = sc_obj.get().get("parameters").get("pool") if compression != "none": validate_compression(cbp_name) validate_replica_data(cbp_name, replica)
def test_create_delete_pool( self, replica, compression, namespace, storage, pvc, pod, ): """ test create delete pool have the following workflow .* Create new RBD pool .* Associate the pool with storageclass .* Create PVC based on the storageclass .* Create POD based on the PVC .* Run IO on the POD .* Check replication and compression """ if not check_pool_compression_replica_ceph_level( self.pool_name, compression, replica): raise PoolCephValueNotMatch( f"Pool {self.pool_name} values do not match configuration") # Running IO on POD self.pod_obj.run_io( "fs", size="100m", rate="1500m", runtime=0, buffer_compress_percentage=60, buffer_pattern="0xdeadface", bs="8K", jobs=5, readwrite="readwrite", ) # Getting IO results get_fio_rw_iops(self.pod_obj) # Checking Results for compression and replication if compression: compression_result = validate_compression(self.pool_name) if compression_result is False: raise PoolNotCompressedAsExpected( f"Pool {self.pool_name} compression did not reach expected value" ) replica_result = validate_replica_data(self.pool_name, replica) if replica_result is False: raise PoolNotReplicatedAsNeeded( f"Pool {self.pool_name} not replicated to size {replica}")
def test_multiple_sc_one_pool_rep2_comp( self, ceph_pool_factory, storageclass_factory, pvc_factory, pod_factory, ): """ This test function does below, *. Creates 2 Storage Class with creating one rbd pool for both *. Creates PVCs using new Storage Classes *. Mount PVC to an app pod *. Run IO on an app pod *. Verify compression and replication """ log.info("Creating new pool with replica2 and compression") pool_obj = ceph_pool_factory( interface=CEPHBLOCKPOOL, replica=self.replica, compression="aggressive", ) log.info(f"Creating first storageclass with pool {pool_obj.name}") sc_obj1 = storageclass_factory( interface=CEPHBLOCKPOOL, new_rbd_pool=False, pool_name=pool_obj.name, ) log.info(f"Creating second storageclass with pool {pool_obj.name}") sc_obj2 = storageclass_factory( interface=CEPHBLOCKPOOL, new_rbd_pool=False, pool_name=pool_obj.name, ) sc_obj_list = [sc_obj1, sc_obj2] pod_obj_list = [] log.info("Creating PVCs and PODs") for sc_obj in sc_obj_list: pvc_obj = pvc_factory(interface=CEPHBLOCKPOOL, storageclass=sc_obj, size=10) pod_obj_list.append( pod_factory(interface=CEPHBLOCKPOOL, pvc=pvc_obj)) log.info("Running IO on pods") for pod_obj in pod_obj_list: pod_obj.run_io( "fs", size="1G", rate="1500m", runtime=60, buffer_compress_percentage=60, buffer_pattern="0xdeadface", bs="8K", jobs=5, readwrite="readwrite", ) log.info(f"validating info on pool {pool_obj.name}") validate_rep_result = validate_replica_data(pool_obj.name, self.replica) if validate_rep_result is False: raise PoolNotReplicatedAsNeeded( f"pool {pool_obj.name} not replicated as expected") validate_comp_result = validate_compression(pool_obj.name) if validate_comp_result is False: raise PoolNotCompressedAsExpected( f"pool {pool_obj.name} not compressed as expected")
def test_sc_reclaim_policy_retain_rep2_comp( self, storageclass_factory, pvc_factory, pod_factory, ): """ This test function does below, *. Create storageclass with reclaim policy retain and pool with rep2 and compression *. Create pvc and pod *. Run IO on pod *. Verify compression and replication *. Delete Pod, Pvc, Pv, Rbd image """ log.info(f"Creating storageclass with replica {self.replica}" f", compression {self.compression} and" f"reclaim policy {self.reclaim_policy}") sc_obj = storageclass_factory( interface=CEPHBLOCKPOOL, new_rbd_pool=True, replica=self.replica, compression=self.compression, reclaim_policy=self.reclaim_policy, ) pool = sc_obj.get()["parameters"]["pool"] log.info("Creating PVCs and PODs") pvc_obj = pvc_factory(interface=CEPHBLOCKPOOL, storageclass=sc_obj, size=10) pod_obj = pod_factory(interface=CEPHBLOCKPOOL, pvc=pvc_obj) log.info("Running IO on pod") pod_obj.run_io( "fs", size="1G", rate="1500m", runtime=60, buffer_compress_percentage=60, buffer_pattern="0xdeadface", bs="8K", jobs=5, readwrite="readwrite", ) log.info(f"validating info on pool {pool}") validate_rep_result = validate_replica_data(pool, self.replica) if validate_rep_result is False: raise PoolNotReplicatedAsNeeded( f"pool {pool} not replicated as expected") validate_comp_result = validate_compression(pool) if validate_comp_result is False: raise PoolNotCompressedAsExpected( f"pool {pool} not compressed as expected") log.info("Deleting pod") pod_obj_list = [pod_obj] delete_pods(pod_obj_list, wait=True) log.info("Deleting pvc, pv and rbd image") pvc_obj.reload() pvc_uuid_map = pvc_obj.image_uuid pv_obj = pvc_obj.backed_pv_obj pvc_obj.delete() pv_obj.delete() delete_results = delete_volume_in_backend(img_uuid=pvc_uuid_map, pool_name=pool) if not delete_results: raise ImageIsNotDeletedOrNotFound( f"Could not delete or find image csi-vol-{pvc_uuid_map}")
def test_new_sc_rep2_rep3_at_once(self, storageclass_factory, pvc_factory, pod_factory): """ This test function does below, *. Creates 2 Storage Class with creating new rbd pool replica 2 and 3 with compression *. Creates PVCs using new Storage Classes *. Mount PVC to an app pod *. Run IO on an app pod *. Validate compression and replication """ log.info("Creating storageclasses") interface_type = constants.CEPHBLOCKPOOL sc_obj1 = storageclass_factory( interface=interface_type, new_rbd_pool=True, replica=2, compression="aggressive", ) sc_obj2 = storageclass_factory( interface=interface_type, new_rbd_pool=True, replica=3, compression="aggressive", ) replicas = dict() replicas[sc_obj1.name] = 2 replicas[sc_obj2.name] = 3 sc_obj_list = [sc_obj1, sc_obj2] log.info("Creating pvc and pods") pod_obj_list = [] for sc_obj in sc_obj_list: for pod_num in range(1, 5): pvc_obj = pvc_factory(interface=interface_type, storageclass=sc_obj, size=10) pod_obj_list.append( pod_factory(interface=interface_type, pvc=pvc_obj)) log.info("Running io on pods") for pod_obj in pod_obj_list: pod_obj.run_io( "fs", size="2G", rate="1500m", runtime=60, buffer_compress_percentage=60, buffer_pattern="0xdeadface", bs="8K", jobs=5, readwrite="readwrite", ) for sc_obj in sc_obj_list: cbp_name = sc_obj.get()["parameters"]["pool"] cbp_size = replicas[sc_obj.name] compression_result = validate_compression(cbp_name) replica_result = validate_replica_data(cbp_name, cbp_size) if compression_result is False: raise PoolNotCompressedAsExpected( f"Pool {cbp_name} compression did not reach expected value" ) if replica_result is False: raise PoolNotReplicatedAsNeeded( f"Pool {cbp_name} not replicated to size {cbp_size}")