def test_storageclass_invalid(self, invalid_storageclass): """ Test that Persistent Volume Claim can not be created from misconfigured CephFS Storage Class. """ pvc_data = templating.load_yaml(constants.CSI_PVC_YAML) pvc_name = helpers.create_unique_resource_name("test", "pvc") pvc_data["metadata"]["name"] = pvc_name pvc_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE pvc_data["spec"]["storageClassName"] = invalid_storageclass["metadata"]["name"] logger.info( f"Create PVC {pvc_name} " f"with storageClassName " f"{invalid_storageclass['metadata']['name']}" ) pvc = PVC(**pvc_data) pvc.create() pvc_status = pvc.status logger.debug(f"Status of PVC {pvc_name} after creation: {pvc_status}") assert pvc_status == constants.STATUS_PENDING logger.info( f"Waiting for status '{constants.STATUS_BOUND}' " f"for 60 seconds (it shouldn't change)" ) with pytest.raises(TimeoutExpiredError): # raising TimeoutExpiredError is expected behavior pvc_status_changed = pvc.ocp.wait_for_resource( resource_name=pvc_name, condition=constants.STATUS_BOUND, timeout=60, sleep=20, ) logger.debug("Check that PVC status did not changed") assert not pvc_status_changed pvc_status = pvc.status logger.info(f"Status of PVC {pvc_name} after 60 seconds: {pvc_status}") assert_msg = ( f"PVC {pvc_name} hasn't reached status " f"{constants.STATUS_PENDING}" ) assert pvc_status == constants.STATUS_PENDING, assert_msg logger.info(f"Deleting PVC {pvc_name}") pvc.delete()
def test_rbd_based_rwo_pvc(self, reclaim_policy): """ Verifies RBD Based RWO Dynamic PVC creation with Reclaim policy set to Delete/Retain Steps: 1. Create Storage Class with reclaimPolicy: Delete/Retain 2. Create PVC with 'accessModes' 'ReadWriteOnce' 3. Create two pods using same PVC 4. Run IO on first pod 5. Verify second pod is not getting into Running state 6. Delete first pod 7. Verify second pod is in Running state 8. Verify usage of volume in second pod is matching with usage in first pod 9. Run IO on second pod 10. Delete second pod 11. Delete PVC 12. Verify PV associated with deleted PVC is also deleted/released """ # Create Storage Class with reclaimPolicy: Delete sc_obj = helpers.create_storage_class( interface_type=constants.CEPHBLOCKPOOL, interface_name=self.cbp_obj.name, secret_name=self.rbd_secret_obj.name, reclaim_policy=reclaim_policy ) # Create PVC with 'accessModes' 'ReadWriteOnce' pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML) pvc_data['metadata']['name'] = helpers.create_unique_resource_name( 'test', 'pvc' ) pvc_data['metadata']['namespace'] = self.namespace pvc_data['spec']['storageClassName'] = sc_obj.name pvc_data['spec']['accessModes'] = ['ReadWriteOnce'] pvc_obj = PVC(**pvc_data) pvc_obj.create() # Create first pod log.info(f"Creating two pods which use PVC {pvc_obj.name}") pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML) pod_data['metadata']['name'] = helpers.create_unique_resource_name( 'test', 'pod' ) pod_data['metadata']['namespace'] = self.namespace pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_obj.name pod_obj = Pod(**pod_data) pod_obj.create() assert helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING) node_pod1 = pod_obj.get()['spec']['nodeName'] # Create second pod # Try creating pod until it is on a different node than first pod for retry in range(1, 6): pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML) pod_data['metadata']['name'] = helpers.create_unique_resource_name( 'test', 'pod' ) pod_data['metadata']['namespace'] = self.namespace pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_obj.name pod_obj2 = Pod(**pod_data) pod_obj2.create() assert helpers.wait_for_resource_state(pod_obj2, constants.STATUS_PENDING) node_pod2 = pod_obj2.get()['spec']['nodeName'] if node_pod1 != node_pod2: break log.info( f"Both pods are on same node. Deleting second pod and " f"creating another pod. Retry count:{retry}" ) pod_obj2.delete() if retry == 5: raise UnexpectedBehaviour( "Second pod is always created on same node as of first " "pod even after trying 5 times." ) # Run IO on first pod log.info(f"Running IO on first pod {pod_obj.name}") pod_obj.run_io('fs', '1G') logging.info(f"Waiting for IO results from pod {pod_obj.name}") fio_result = pod_obj.get_fio_results() logging.info("IOPs after FIO:") logging.info( f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}" ) logging.info( f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}" ) # Fetch usage details mount_point = pod_obj.exec_cmd_on_pod(command="df -kh") mount_point = mount_point.split() usage = mount_point[mount_point.index('/var/lib/www/html') - 1] # Verify that second pod is not getting into Running state. Check it # for some period of time. try: assert not pod_obj2.ocp.wait_for_resource( condition='Running', resource_name=pod_obj2.name, ), "Unexpected: Second pod is in Running state" except TimeoutExpiredError: log.info( f"Verified: Second pod {pod_obj2.name} is not in " f"Running state" ) # Delete first pod pod_obj.delete(wait=True) # Verify pod is deleted try: pod_obj.get() raise UnexpectedBehaviour( f"First pod {pod_obj.name} is not deleted." ) except CommandFailed as exp: assert "not found" in str(exp), ( "Failed to fetch pod details" ) log.info(f"First pod {pod_obj.name} is deleted.") # Wait for second pod to be in Running state try: pod_obj2.ocp.wait_for_resource( condition='Running', resource_name=pod_obj2.name, timeout=180 ) except TimeoutExpiredError as exp: raise TimeoutExpiredError( f"Second pod {pod_obj2.name} is not in Running state " f"after deleting first pod." ) from exp log.info( f"Second pod {pod_obj2.name} is in Running state after " f"deleting the first pod." ) # Verify that volume usage in second pod is matching with the usage in # first pod mount_point = pod_obj2.exec_cmd_on_pod(command="df -kh") mount_point = mount_point.split() usage_re = mount_point[mount_point.index('/var/lib/www/html') - 1] assert usage_re == usage, ( "Use percentage in new pod is not matching with old pod" ) # Run IO on second pod log.info(f"Running IO on second pod {pod_obj2.name}") pod_obj2.run_io('fs', '1G') logging.info(f"Waiting for IO results from pod {pod_obj2.name}") fio_result = pod_obj2.get_fio_results() logging.info("IOPs after FIO:") logging.info( f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}" ) logging.info( f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}" ) # Delete second pod pod_obj2.delete() # Verify pod is deleted try: pod_obj2.get() raise UnexpectedBehaviour( f"Second pod {pod_obj2.name} is not deleted." ) except CommandFailed as exp: assert "not found" in str(exp), ( "Failed to fetch pod details" ) log.info(f"Second pod {pod_obj2.name} is deleted.") # Get PV name pvc_obj.reload() pv_name = pvc_obj.backed_pv # Delete PVC pvc_obj.delete() # Verify PVC is deleted try: pvc_obj.get() raise UnexpectedBehaviour( f"PVC {pvc_obj.name} is not deleted." ) except CommandFailed as exp: assert "not found" in str(exp), ( "Failed to verify PVC deletion." ) log.info(f"PVC {pvc_obj.name} is deleted.") pv_obj = OCP( kind=constants.PV, namespace=self.namespace ) if reclaim_policy == "Delete": # Verify PV is deleted for pv_info in TimeoutSampler( 30, 2, pv_obj.get, out_yaml_format=False ): if pv_name not in pv_info: break log.warning( f"PV {pv_name} exists after deleting PVC {pvc_obj.name}. " f"Checking again." ) # TODO: Verify PV using ceph toolbox. PV should be deleted. # Blocked by bz 1723656 elif reclaim_policy == "Retain": # Wait for PV to be in Released state assert pv_obj.wait_for_resource( condition='Released', resource_name=pv_name ) log.info(f"PV {pv_name} is in Released state") # TODO: Delete PV from backend and verify # Blocked by bz 1723656 pv_obj.delete(resource_name=pv_name) # Delete Storage Class sc_obj.delete()