def test_basics_rbd(self, test_fixture): """ Testing basics: secret creation, storage class creation and pvc with cephfs """ self.cephfs_secret = templating.load_yaml_to_dict( constants.CSI_CEPHFS_SECRET_YAML) del self.cephfs_secret['data']['userID'] del self.cephfs_secret['data']['userKey'] self.cephfs_secret['data']['adminKey'] = ( get_admin_key_from_ceph_tools()) self.cephfs_secret['data']['adminID'] = constants.ADMIN_BASE64 logging.info(self.cephfs_secret) secret = OCS(**self.cephfs_secret) secret.create() self.cephfs_sc = templating.load_yaml_to_dict( constants.CSI_CEPHFS_STORAGECLASS_YAML) self.cephfs_sc['parameters']['monitors'] = self.mons self.cephfs_sc['parameters']['pool'] = ( f"{self.fs_data['metadata']['name']}-data0") storage_class = OCS(**self.cephfs_sc) storage_class.create() self.cephfs_pvc = templating.load_yaml_to_dict( constants.CSI_CEPHFS_PVC_YAML) pvc = PVC(**self.cephfs_pvc) pvc.create() log.info(pvc.status) assert 'Bound' in pvc.status pvc.delete() storage_class.delete() secret.delete()
def test_basics_cephfs(self): """ Testing basics: secret creation, storage class creation and pvc with rbd """ self.rbd_secret = templating.load_yaml_to_dict( constants.CSI_RBD_SECRET_YAML) del self.rbd_secret['data']['kubernetes'] self.rbd_secret['data']['admin'] = get_admin_key_from_ceph_tools() logging.info(self.rbd_secret) secret = OCS(**self.rbd_secret) secret.create() self.rbd_sc = templating.load_yaml_to_dict( constants.CSI_RBD_STORAGECLASS_YAML) self.rbd_sc['parameters']['monitors'] = self.mons del self.rbd_sc['parameters']['userid'] storage_class = OCS(**self.rbd_sc) storage_class.create() self.rbd_pvc = templating.load_yaml_to_dict(constants.CSI_RBD_PVC_YAML) pvc = PVC(**self.rbd_pvc) pvc.create() assert 'Bound' in pvc.status pvc.delete() storage_class.delete() secret.delete()
def create_pvc_invalid_name(pvcname): """ Creates a pvc with an user provided data Args: pvcname (str): Name of the pvc to be created Returns: None """ pvc_data = templating.load_yaml(constants.CSI_PVC_YAML) pvc_data['metadata']['name'] = pvcname pvc_data['spec']['storageClassName'] = SC_OBJ.name pvc_obj = PVC(**pvc_data) log.info(f"Creating a pvc with name {pvcname}") try: pvc_obj.create() except CommandFailed as ex: error = ("subdomain must consist of lower case alphanumeric " "characters, '-' or '.', and must start and end with " "an alphanumeric character") if error in str(ex): log.info(f"PVC creation failed with error \n {ex} \n as " "invalid pvc name is provided. EXPECTED") else: assert ("PVC creation with invalid name succeeded : " "NOT expected")
def create_pvc_invalid_size(pvcsize): """ Creates a pvc with an user provided data Args: pvcsize (str): Size of the pvc to be created Returns: None """ pvc_data = templating.load_yaml(constants.CSI_PVC_YAML) pvc_data['metadata']['name'] = "auto" pvc_data['spec']['resources']['requests']['storage'] = pvcsize pvc_data['spec']['storageClassName'] = SC_OBJ.name pvc_obj = PVC(**pvc_data) log.info(f"Creating a PVC with size {pvcsize}") try: pvc_obj.create() except CommandFailed as ex: error = ("quantities must match the regular expression '^([+-]?[0-9.]" "+)([eEinumkKMGTP]*[-+]?[0-9]*)$'") if error in str(ex): log.info(f"PVC creation failed with error \n {ex} \n as " "invalid pvc size is provided. EXPECTED") else: assert ("PVC creation with invalid size succeeded : " "NOT expected")
def test_storageclass_invalid(self, invalid_storageclass): """ Test that Persistent Volume Claim can not be created from misconfigured CephFS Storage Class. """ pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML) pvc_name = helpers.create_unique_resource_name('test', 'pvc') pvc_data['metadata']['name'] = pvc_name pvc_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE pvc_data['spec']['storageClassName'] = invalid_storageclass[ 'metadata']['name'] logger.info( f"Create PVC {pvc_name} " f"with storageClassName " f"{invalid_storageclass['metadata']['name']}" ) pvc = PVC(**pvc_data) pvc.create() pvc_status = pvc.status logger.debug(f"Status of PVC {pvc_name} after creation: {pvc_status}") assert pvc_status == constants.STATUS_PENDING logger.info( f"Waiting for status '{constants.STATUS_BOUND}' " f"for 60 seconds (it shouldn't change)" ) with pytest.raises(TimeoutExpiredError): # raising TimeoutExpiredError is expected behavior pvc_status_changed = pvc.ocp.wait_for_resource( resource_name=pvc_name, condition=constants.STATUS_BOUND, timeout=60, sleep=20 ) logger.debug('Check that PVC status did not changed') assert not pvc_status_changed pvc_status = pvc.status logger.info(f"Status of PVC {pvc_name} after 60 seconds: {pvc_status}") assert_msg = ( f"PVC {pvc_name} hasn't reached status " f"{constants.STATUS_PENDING}" ) assert pvc_status == constants.STATUS_PENDING, assert_msg logger.info(f"Deleting PVC {pvc_name}") pvc.delete()
def factory( interface=constants.CEPHBLOCKPOOL, project=None, storageclass=None, size=None, custom_data=None, status=constants.STATUS_BOUND, ): """ Args: interface (str): CephBlockPool or CephFileSystem. This decides whether a RBD based or CephFS resource is created. RBD is default. project (object): ocs_ci.ocs.resources.ocs.OCS instance of 'Project' kind. storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance of 'StorageClass' kind. size (int): The requested size for the PVC custom_data (dict): If provided then PVC object is created by using these data. Parameters `project` and `storageclass` are not used but reference is set if provided. status (str): If provided then factory waits for object to reach desired state. Returns: object: helpers.create_pvc instance. """ if custom_data: pvc_obj = PVC(**custom_data) pvc_obj.create(do_reload=False) else: project = project or project_factory() storageclass = storageclass or storageclass_factory(interface) pvc_size = f"{size}Gi" if size else None pvc_obj = helpers.create_pvc(sc_name=storageclass.name, namespace=project.namespace, size=pvc_size, wait=False) assert pvc_obj, "Failed to create PVC" if status: helpers.wait_for_resource_state(pvc_obj, status) pvc_obj.storageclass = storageclass pvc_obj.project = project instances.append(pvc_obj) return pvc_obj
def factory(interface=constants.CEPHBLOCKPOOL, project=None, storageclass=None, size=None, access_mode=constants.ACCESS_MODE_RWO, custom_data=None, status=constants.STATUS_BOUND, volume_mode=None): """ Args: interface (str): CephBlockPool or CephFileSystem. This decides whether a RBD based or CephFS resource is created. RBD is default. project (object): ocs_ci.ocs.resources.ocs.OCS instance of 'Project' kind. storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance of 'StorageClass' kind. size (int): The requested size for the PVC access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany. This decides the access mode to be used for the PVC. ReadWriteOnce is default. custom_data (dict): If provided then PVC object is created by using these data. Parameters `project` and `storageclass` are not used but reference is set if provided. status (str): If provided then factory waits for object to reach desired state. volume_mode (str): Volume mode for PVC. eg: volume_mode='Block' to create rbd `block` type volume Returns: object: helpers.create_pvc instance. """ if custom_data: pvc_obj = PVC(**custom_data) pvc_obj.create(do_reload=False) else: nonlocal active_project nonlocal active_rbd_storageclass nonlocal active_cephfs_storageclass project = project or active_project or project_factory() active_project = project if interface == constants.CEPHBLOCKPOOL: storageclass = (storageclass or active_rbd_storageclass or storageclass_factory(interface)) active_rbd_storageclass = storageclass elif interface == constants.CEPHFILESYSTEM: storageclass = (storageclass or active_cephfs_storageclass or storageclass_factory(interface)) active_cephfs_storageclass = storageclass pvc_size = f"{size}Gi" if size else None pvc_obj = helpers.create_pvc(sc_name=storageclass.name, namespace=project.namespace, size=pvc_size, do_reload=False, access_mode=access_mode, volume_mode=volume_mode) assert pvc_obj, "Failed to create PVC" if status: helpers.wait_for_resource_state(pvc_obj, status) pvc_obj.storageclass = storageclass pvc_obj.project = project pvc_obj.access_mode = access_mode instances.append(pvc_obj) return pvc_obj
def test_rbd_based_rwo_pvc(self, reclaim_policy): """ Verifies RBD Based RWO Dynamic PVC creation with Reclaim policy set to Delete/Retain Steps: 1. Create Storage Class with reclaimPolicy: Delete/Retain 2. Create PVC with 'accessModes' 'ReadWriteOnce' 3. Create two pods using same PVC 4. Run IO on first pod 5. Verify second pod is not getting into Running state 6. Delete first pod 7. Verify second pod is in Running state 8. Verify usage of volume in second pod is matching with usage in first pod 9. Run IO on second pod 10. Delete second pod 11. Delete PVC 12. Verify PV associated with deleted PVC is also deleted/released """ # Create Storage Class with reclaimPolicy: Delete sc_obj = helpers.create_storage_class( interface_type=constants.CEPHBLOCKPOOL, interface_name=self.cbp_obj.name, secret_name=self.rbd_secret_obj.name, reclaim_policy=reclaim_policy ) # Create PVC with 'accessModes' 'ReadWriteOnce' pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML) pvc_data['metadata']['name'] = helpers.create_unique_resource_name( 'test', 'pvc' ) pvc_data['metadata']['namespace'] = self.namespace pvc_data['spec']['storageClassName'] = sc_obj.name pvc_data['spec']['accessModes'] = ['ReadWriteOnce'] pvc_obj = PVC(**pvc_data) pvc_obj.create() # Create first pod log.info(f"Creating two pods which use PVC {pvc_obj.name}") pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML) pod_data['metadata']['name'] = helpers.create_unique_resource_name( 'test', 'pod' ) pod_data['metadata']['namespace'] = self.namespace pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_obj.name pod_obj = Pod(**pod_data) pod_obj.create() assert helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING) node_pod1 = pod_obj.get()['spec']['nodeName'] # Create second pod # Try creating pod until it is on a different node than first pod for retry in range(1, 6): pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML) pod_data['metadata']['name'] = helpers.create_unique_resource_name( 'test', 'pod' ) pod_data['metadata']['namespace'] = self.namespace pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_obj.name pod_obj2 = Pod(**pod_data) pod_obj2.create() assert helpers.wait_for_resource_state(pod_obj2, constants.STATUS_PENDING) node_pod2 = pod_obj2.get()['spec']['nodeName'] if node_pod1 != node_pod2: break log.info( f"Both pods are on same node. Deleting second pod and " f"creating another pod. Retry count:{retry}" ) pod_obj2.delete() if retry == 5: raise UnexpectedBehaviour( "Second pod is always created on same node as of first " "pod even after trying 5 times." ) # Run IO on first pod log.info(f"Running IO on first pod {pod_obj.name}") pod_obj.run_io('fs', '1G') logging.info(f"Waiting for IO results from pod {pod_obj.name}") fio_result = pod_obj.get_fio_results() logging.info("IOPs after FIO:") logging.info( f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}" ) logging.info( f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}" ) # Fetch usage details mount_point = pod_obj.exec_cmd_on_pod(command="df -kh") mount_point = mount_point.split() usage = mount_point[mount_point.index('/var/lib/www/html') - 1] # Verify that second pod is not getting into Running state. Check it # for some period of time. try: assert not pod_obj2.ocp.wait_for_resource( condition='Running', resource_name=pod_obj2.name, ), "Unexpected: Second pod is in Running state" except TimeoutExpiredError: log.info( f"Verified: Second pod {pod_obj2.name} is not in " f"Running state" ) # Delete first pod pod_obj.delete(wait=True) # Verify pod is deleted try: pod_obj.get() raise UnexpectedBehaviour( f"First pod {pod_obj.name} is not deleted." ) except CommandFailed as exp: assert "not found" in str(exp), ( "Failed to fetch pod details" ) log.info(f"First pod {pod_obj.name} is deleted.") # Wait for second pod to be in Running state try: pod_obj2.ocp.wait_for_resource( condition='Running', resource_name=pod_obj2.name, timeout=180 ) except TimeoutExpiredError as exp: raise TimeoutExpiredError( f"Second pod {pod_obj2.name} is not in Running state " f"after deleting first pod." ) from exp log.info( f"Second pod {pod_obj2.name} is in Running state after " f"deleting the first pod." ) # Verify that volume usage in second pod is matching with the usage in # first pod mount_point = pod_obj2.exec_cmd_on_pod(command="df -kh") mount_point = mount_point.split() usage_re = mount_point[mount_point.index('/var/lib/www/html') - 1] assert usage_re == usage, ( "Use percentage in new pod is not matching with old pod" ) # Run IO on second pod log.info(f"Running IO on second pod {pod_obj2.name}") pod_obj2.run_io('fs', '1G') logging.info(f"Waiting for IO results from pod {pod_obj2.name}") fio_result = pod_obj2.get_fio_results() logging.info("IOPs after FIO:") logging.info( f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}" ) logging.info( f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}" ) # Delete second pod pod_obj2.delete() # Verify pod is deleted try: pod_obj2.get() raise UnexpectedBehaviour( f"Second pod {pod_obj2.name} is not deleted." ) except CommandFailed as exp: assert "not found" in str(exp), ( "Failed to fetch pod details" ) log.info(f"Second pod {pod_obj2.name} is deleted.") # Get PV name pvc_obj.reload() pv_name = pvc_obj.backed_pv # Delete PVC pvc_obj.delete() # Verify PVC is deleted try: pvc_obj.get() raise UnexpectedBehaviour( f"PVC {pvc_obj.name} is not deleted." ) except CommandFailed as exp: assert "not found" in str(exp), ( "Failed to verify PVC deletion." ) log.info(f"PVC {pvc_obj.name} is deleted.") pv_obj = OCP( kind=constants.PV, namespace=self.namespace ) if reclaim_policy == "Delete": # Verify PV is deleted for pv_info in TimeoutSampler( 30, 2, pv_obj.get, out_yaml_format=False ): if pv_name not in pv_info: break log.warning( f"PV {pv_name} exists after deleting PVC {pvc_obj.name}. " f"Checking again." ) # TODO: Verify PV using ceph toolbox. PV should be deleted. # Blocked by bz 1723656 elif reclaim_policy == "Retain": # Wait for PV to be in Released state assert pv_obj.wait_for_resource( condition='Released', resource_name=pv_name ) log.info(f"PV {pv_name} is in Released state") # TODO: Delete PV from backend and verify # Blocked by bz 1723656 pv_obj.delete(resource_name=pv_name) # Delete Storage Class sc_obj.delete()