def test_pvc_delete_create_same_name(self, test_fixture): """ TC OCS 324 """ global PVC_OBJ PVC_OBJ = helpers.create_pvc(sc_name=self.sc_obj.name) pv_obj = ocp.OCP( kind=constants.PV, namespace=config.ENV_DATA['cluster_namespace'] ) backed_pv = PVC_OBJ.get().get('spec').get('volumeName') pv_status = pv_obj.get(backed_pv).get('status').get('phase') assert constants.STATUS_BOUND in pv_status, ( f"{pv_obj.kind} {backed_pv} failed to reach {constants.STATUS_BOUND}" ) logger.info(f"Deleting {PVC_OBJ.kind} {PVC_OBJ.name}") assert PVC_OBJ.delete(), f"Failed to delete PVC" logger.info(f"Creating {PVC_OBJ.kind} with same name {PVC_OBJ.name}") PVC_OBJ = helpers.create_pvc( sc_name=self.sc_obj.name, pvc_name=PVC_OBJ.name ) backed_pv = PVC_OBJ.get().get('spec').get('volumeName') pv_status = pv_obj.get(backed_pv).get('status').get('phase') assert constants.STATUS_BOUND in pv_status, ( f"{pv_obj.kind} {backed_pv} failed to reach {constants.STATUS_BOUND}" )
def setup(): """ Setting up the environment - Creating Secret """ global RBD_POOL, RBD_STORAGE_CLASS, RBD_SECRET, CEPHFS_OBJ, \ CEPHFS_STORAGE_CLASS, CEPHFS_SECRET, RBD_PVC, CEPHFS_PVC log.info("Creating RBD Pool") RBD_POOL = helpers.create_ceph_block_pool() log.info("Creating RBD Secret") RBD_SECRET = helpers.create_secret(constants.CEPHBLOCKPOOL) log.info("Creating RBD StorageClass") RBD_STORAGE_CLASS = helpers.create_storage_class(constants.CEPHBLOCKPOOL, RBD_POOL.name, RBD_SECRET.name) log.info("Creating CephFilesystem") CEPHFS_OBJ = helpers.create_cephfilesystem() log.info("Creating FS Secret") CEPHFS_SECRET = helpers.create_secret(constants.CEPHFILESYSTEM) log.info("Creating FS StorageClass") CEPHFS_STORAGE_CLASS = helpers.create_storage_class( constants.CEPHFILESYSTEM, helpers.get_cephfs_data_pool_name(), CEPHFS_SECRET.name) log.info("Creating RBC PVC") RBD_PVC = helpers.create_pvc(sc_name=RBD_STORAGE_CLASS.name) log.info("Creating CephFs PVC") CEPHFS_PVC = helpers.create_pvc(sc_name=CEPHFS_STORAGE_CLASS.name)
def test_pvc_delete_create_same_name(self, test_fixture): """ TC OCS 324 """ global PVC_OBJ PVC_OBJ = helpers.create_pvc(sc_name=self.sc_obj.name) logger.info(f"Deleting PersistentVolumeClaim with name {PVC_OBJ.name}") assert PVC_OBJ.delete(), f"Failed to delete PVC" PVC_OBJ = helpers.create_pvc( sc_name=self.sc_obj.name, pvc_name=PVC_OBJ.name ) logger.info( f"PersistentVolumeClaim created with same name {PVC_OBJ.name}" )
def create_mutiple_pvcs_statistics(self, num_of_samples, teardown_factory, pvc_size): """ Creates number (samples_num) of PVCs, measures creation time for each PVC and returns list of creation times. Args: num_of_samples: Number of the sampled created PVCs. teardown_factory: A fixture used when we want a new resource that was created during the tests. pvc_size: Size of the created PVCs. Returns: List of the creation times of all the created PVCs. """ time_measures = [] for i in range(num_of_samples): log.info(f'Start creation of PVC number {i + 1}.') pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size) helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND) pvc_obj.reload() teardown_factory(pvc_obj) create_time = helpers.measure_pvc_creation_time( self.interface, pvc_obj.name) logging.info(f"PVC created in {create_time} seconds") time_measures.append(create_time) return time_measures
def change_registry_backend_to_ocs(): """ Function to deploy registry with OCS backend. Raises: AssertionError: When failure in change of registry backend to OCS """ pv_obj = helpers.create_pvc( sc_name=constants.DEFAULT_SC_CEPHFS, pvc_name='registry-cephfs-rwx-pvc', namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE, size='100Gi', access_mode=constants.ACCESS_MODE_RWX) helpers.wait_for_resource_state(pv_obj, 'Bound') ocp_obj = ocp.OCP(kind=constants.CONFIG, namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE) param_cmd = f'[{{"op": "add", "path": "/spec/storage", "value": {{"pvc": {{"claim": "{pv_obj.name}"}}}}}}]' assert ocp_obj.patch( resource_name=constants.IMAGE_REGISTRY_RESOURCE_NAME, params=param_cmd ), f"Registry pod storage backend to OCS is not success" # Validate registry pod status validate_registry_pod_status() # Validate pvc mount in the registry pod validate_pvc_mount_on_registry_pod()
def change_registry_backend_to_ocs(): """ Function to deploy registry with OCS backend. Raises: AssertionError: When failure in change of registry backend to OCS """ sc_name = f"{constants.DEFAULT_STORAGECLASS_CEPHFS}" pv_obj = helpers.create_pvc( sc_name=sc_name, pvc_name='registry-cephfs-rwx-pvc', namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE, size='100Gi', access_mode=constants.ACCESS_MODE_RWX) helpers.wait_for_resource_state(pv_obj, 'Bound') param_cmd = f'[{{"op": "add", "path": "/spec/storage", "value": {{"pvc": {{"claim": "{pv_obj.name}"}}}}}}]' run_cmd(f"oc patch {constants.IMAGE_REGISTRY_CONFIG} -p " f"'{param_cmd}' --type json") # Validate registry pod status retry((CommandFailed, UnexpectedBehaviour), tries=3, delay=15)(validate_registry_pod_status)() # Validate pvc mount in the registry pod retry((CommandFailed, UnexpectedBehaviour, AssertionError), tries=3, delay=15)(validate_pvc_mount_on_registry_pod)()
def test_pvc_deletion_measurement_performance(self, teardown_factory, pvc_size): """ Measuring PVC deletion time is within supported limits """ logging.info('Start creating new PVC') pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size) helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND) pvc_obj.reload() pv_name = pvc_obj.backed_pv pvc_reclaim_policy = pvc_obj.reclaim_policy teardown_factory(pvc_obj) pvc_obj.delete() logging.info('Start deletion of PVC') pvc_obj.ocp.wait_for_delete(pvc_obj.name) if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE: helpers.validate_pv_delete(pvc_obj.backed_pv) delete_time = helpers.measure_pvc_deletion_time( self.interface, pv_name) # Deletion time for CephFS PVC is a little over 3 seconds deletion_time = 4 if self.interface == constants.CEPHFILESYSTEM else 3 logging.info(f"PVC deleted in {delete_time} seconds") if delete_time > deletion_time: raise ex.PerformanceException( f"PVC deletion time is {delete_time} and greater than {deletion_time} second" ) push_to_pvc_time_dashboard(self.interface, "deletion", delete_time)
def test_delete_create_pvc_same_name(self, interface, pvc_factory, teardown_factory): """ Delete PVC and create a new PVC with same name """ # Create a PVC pvc_obj1 = pvc_factory(interface=interface, access_mode=constants.ACCESS_MODE_RWO, status=constants.STATUS_BOUND) # Delete the PVC logger.info(f"Deleting PVC {pvc_obj1.name}") pvc_obj1.delete() pvc_obj1.ocp.wait_for_delete(pvc_obj1.name) logger.info(f"Deleted PVC {pvc_obj1.name}") # Create a new PVC with same name logger.info(f"Creating new PVC with same name {pvc_obj1.name}") pvc_obj2 = helpers.create_pvc(sc_name=pvc_obj1.storageclass.name, pvc_name=pvc_obj1.name, namespace=pvc_obj1.project.namespace, do_reload=False) teardown_factory(pvc_obj2) # Check the new PVC and PV are Bound helpers.wait_for_resource_state(resource=pvc_obj2, state=constants.STATUS_BOUND) pv_obj2 = pvc_obj2.backed_pv_obj helpers.wait_for_resource_state(resource=pv_obj2, state=constants.STATUS_BOUND)
def test_ocs_347(self, resources): pod, pvc, storageclass = resources log.info("Creating RBD StorageClass") storageclass.append( helpers.create_storage_class( interface_type=constants.CEPHBLOCKPOOL, interface_name=self.cbp_obj.name, secret_name=self.rbd_secret_obj.name, ) ) log.info("Creating a PVC") pvc.append(helpers.create_pvc(sc_name=storageclass[0].name)) for pvc_obj in pvc: helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND) pvc_obj.reload() log.info( f"Creating a pod on with pvc {pvc[0].name}" ) pod_obj = helpers.create_pod( interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvc[0].name, pod_dict_path=constants.NGINX_POD_YAML ) pod.append(pod_obj) helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING) pod_obj.reload()
def dynamic_pvc_base(self, interface_type, reclaim_policy): """ Base function for Dynamic PVC creation tests Fetches the worker nodes name list, creates StorageClass and PVC """ self.interface_type = interface_type self.reclaim_policy = reclaim_policy self.worker_nodes_list = helpers.get_worker_nodes() if self.interface_type == constants.CEPHBLOCKPOOL: self.interface_name = self.cbp_obj.name self.secret_name = self.rbd_secret_obj.name elif self.interface_type == constants.CEPHFILESYSTEM: self.interface_name = helpers.get_cephfs_data_pool_name() self.secret_name = self.cephfs_secret_obj.name logger.info( f"Creating Storage Class with reclaimPolicy: {self.reclaim_policy}" ) self.sc_obj = helpers.create_storage_class( interface_type=self.interface_type, interface_name=self.interface_name, secret_name=self.secret_name, reclaim_policy=self.reclaim_policy) logger.info(f"Creating PVC with accessModes: {self.access_mode}") self.pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, namespace=self.namespace, size=self.pvc_size, wait=True, access_mode=self.access_mode)
def test_reclaim_policy_retain(self): """ Calling functions for pvc invalid name and size """ pvc_count = len(list_ceph_images(pool_name=self.cbp_obj.name)) pvc_obj = helpers.create_pvc( sc_name=self.sc_obj_retain.name, pvc_name=helpers.create_unique_resource_name('retain', 'pvc') ) helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND) pvc_obj.reload() pv_name = pvc_obj.get()['spec']['volumeName'] pv_namespace = pvc_obj.get()['metadata']['namespace'] pv_obj = ocp.OCP(kind='PersistentVolume', namespace=pv_namespace) assert pvc_obj.delete() pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name) assert pv_obj.get(pv_name).get('status').get('phase') == 'Released', ( f"Status of PV {pv_obj.get(pv_name)} is not 'Released'" ) log.info("Status of PV is Released") assert pvc_count + 1 == len(list_ceph_images(pool_name=self.cbp_obj.name)) assert pv_obj.delete(resource_name=pv_name) assert pv_obj.wait_for_delete(pv_name, 60), ( f"PV {pv_name} is not deleted" )
def test_ocs_347(self): global PVC, STORAGE_CLASS log.info("Creating RBD StorageClass") STORAGE_CLASS = helpers.create_storage_class(constants.CEPHBLOCKPOOL, 'rbd', SECRET.name) log.info("Creating a PVC") PVC = helpers.create_pvc(STORAGE_CLASS.name)
def create_pvc(request): """ Create a persistent Volume Claim """ class_instance = request.node.cls class_instance.pvc_obj = helpers.create_pvc( sc_name=class_instance.sc_obj.name)
def test_create_multiple_sc_with_same_pool_name(self, interface_type, resources): """ This test function does below, *. Creates multiple Storage Classes with same pool name *. Creates PVCs using each Storage Class *. Mount each PVC to an app pod *. Run IO on each app pod """ # Unpack resources pods, pvcs, storageclasses = resources # Create 3 Storage Classes with same pool name if interface_type == constants.CEPHBLOCKPOOL: secret = self.rbd_secret_obj.name interface_name = self.cbp_obj.name else: interface_type = constants.CEPHFILESYSTEM secret = self.cephfs_secret_obj.name interface_name = helpers.get_cephfs_data_pool_name() for i in range(3): log.info(f"Creating a {interface_type} storage class") storageclasses.append( helpers.create_storage_class(interface_type=interface_type, interface_name=interface_name, secret_name=secret)) log.info(f"{interface_type}StorageClass: {storageclasses[i].name} " f"created successfully") # Create PVCs using each SC for i in range(3): log.info(f"Creating a PVC using {storageclasses[i].name}") pvcs.append(helpers.create_pvc(storageclasses[i].name)) for pvc in pvcs: helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND) pvc.reload() # Create app pod and mount each PVC for i in range(3): log.info(f"Creating an app pod and mount {pvcs[i].name}") pods.append( helpers.create_pod(interface_type=interface_type, pvc_name=pvcs[i].name, namespace=defaults.ROOK_CLUSTER_NAMESPACE)) for pod in pods: helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING) pod.reload() log.info(f"{pods[i].name} created successfully and " f"mounted {pvcs[i].name}") # Run IO on each app pod for sometime for pod in pods: log.info(f"Running FIO on {pod.name}") pod.run_io('fs', size='2G') for pod in pods: get_fio_rw_iops(pod)
def test_fixture(request, storageclass_factory): """ Setup and teardown """ def teardown(): # Delete created app pods and pvcs assert pod.delete_pods(pod_objs) assert pvc.delete_pvcs(pvc_objs) # Switch to default project ret = ocp.switch_to_default_rook_cluster_project() assert ret, 'Failed to switch to default rook cluster project' # Delete created projects for prj in namespace_list: prj.delete(resource_name=prj.namespace) # Validate all nodes are in READY state wait_for_nodes_status() request.addfinalizer(teardown) # Create a storage class sc = storageclass_factory() # Create projects namespace_list = helpers.create_multilpe_projects(number_of_project=1) # Create pvcs pvc_objs = [ helpers.create_pvc(sc_name=sc.name, namespace=each_namespace.namespace) for each_namespace in namespace_list ] for pvc_obj in pvc_objs: helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND) pvc_obj.reload() # Create app pods pod_objs = [ helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL, pvc_name=each_pvc.name, namespace=each_pvc.namespace) for each_pvc in pvc_objs ] for pod_obj in pod_objs: helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING) pod_obj.reload() # Check for the created pvc metrics on prometheus pod for pvc_obj in pvc_objs: assert check_pvcdata_collected_on_prometheus(pvc_obj.name), ( f"On prometheus pod for created pvc {pvc_obj.name} related data is not collected" ) return namespace_list, pvc_objs, pod_objs, sc
def test_create_storage_class_with_wrong_provisioner(self, interface): """ Test function which creates Storage Class with wrong provisioner and verifies PVC status """ log.info(f"Creating a {interface} storage class") if interface == "RBD": interface_type = constants.CEPHBLOCKPOOL secret = self.rbd_secret_obj.name interface_name = self.cbp_obj.name else: interface_type = constants.CEPHFILESYSTEM secret = self.cephfs_secret_obj.name interface_name = helpers.get_cephfs_data_pool_name() sc_obj = helpers.create_storage_class( interface_type=interface_type, interface_name=interface_name, secret_name=secret, provisioner=constants.AWS_EFS_PROVISIONER) log.info( f"{interface}Storage class: {sc_obj.name} created successfully") # Create PVC pvc_obj = helpers.create_pvc(sc_name=sc_obj.name, do_reload=False) # Check PVC status pvc_output = pvc_obj.get() pvc_status = pvc_output['status']['phase'] log.info(f"Status of PVC {pvc_obj.name} after creation: {pvc_status}") log.info(f"Waiting for status '{constants.STATUS_PENDING}' " f"for 20 seconds (it shouldn't change)") pvc_obj.ocp.wait_for_resource(resource_name=pvc_obj.name, condition=constants.STATUS_PENDING, timeout=20, sleep=5) # Check PVC status again after 20 seconds pvc_output = pvc_obj.get() pvc_status = pvc_output['status']['phase'] assert_msg = ( f"PVC {pvc_obj.name} is not in {constants.STATUS_PENDING} " f"status") assert pvc_status == constants.STATUS_PENDING, assert_msg log.info(f"Status of {pvc_obj.name} after 20 seconds: {pvc_status}") # Delete PVC log.info(f"Deleting PVC: {pvc_obj.name}") assert pvc_obj.delete() log.info(f"PVC {pvc_obj.name} delete successfully") # Delete Storage Class log.info(f"Deleting Storageclass: {sc_obj.name}") assert sc_obj.delete() log.info(f"Storage Class: {sc_obj.name} deleted successfully")
def create_pvc_and_verify_pvc_exists(sc_name, cbp_name): """ Create pvc, verify pvc is bound in state and pvc exists on ceph side """ pvc_obj = helpers.create_pvc(sc_name=sc_name, size='10Gi') # Validate pv is created on ceph logger.info(f"Verifying pv exists on backend") pvc_obj.verify_pv_exists_in_backend(cbp_name) return pvc_obj
def create_pvc(request): """ Create a persistent Volume Claim """ class_instance = request.node.cls class_instance.pvc_obj = helpers.create_pvc( sc_name=class_instance.sc_obj.name, namespace=class_instance.namespace) helpers.wait_for_resource_state(class_instance.pvc_obj, constants.STATUS_BOUND) class_instance.pvc_obj.reload()
def test_monitoring_after_respinning_ceph_pods(self, test_fixture): """ Test case to validate respinning the ceph pods and its interaction with prometheus pod """ namespace_list, pvc_objs, pod_objs, sc = test_fixture # Re-spin the ceph pods(i.e mgr, mon, osd, mds) one by one resource_to_delete = ['mgr', 'mon', 'osd'] disruption = disruption_helpers.Disruptions() for res_to_del in resource_to_delete: disruption.set_resource(resource=res_to_del) disruption.delete_resource() # Check for the created pvc metrics after respinning ceph pods for pvc_obj in pvc_objs: assert check_pvcdata_collected_on_prometheus(pvc_obj.name), ( f"On prometheus pod for created pvc {pvc_obj.name} related data is not collected" ) # Create projects after the respinning ceph pods namespaces = helpers.create_multilpe_projects(number_of_project=2) namespace_list.extend(namespaces) # Create pvcs after the respinning ceph pods pvcs = [ helpers.create_pvc(sc_name=sc.name, namespace=each_namespace.namespace) for each_namespace in namespaces ] for pvc_obj in pvcs: helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND) pvc_obj.reload() pvc_objs.extend(pvcs) # Create app pods after the respinning ceph pods pods = [ helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL, pvc_name=each_pvc.name, namespace=each_pvc.namespace) for each_pvc in pvcs ] for pod_obj in pods: helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING) pod_obj.reload() pod_objs.extend(pods) # Check for the created pvc metrics on prometheus pod for pvc_obj in pvcs: assert check_pvcdata_collected_on_prometheus(pvc_obj.name), ( f"On prometheus pod for created pvc {pvc_obj.name} related data is not collected" )
def create_pvc_and_verify_pvc_exists(sc_name, cbp_name): """ Create pvc, verify pvc is bound in state and pvc exists on ceph side """ pvc_obj = helpers.create_pvc(sc_name=sc_name, size='10Gi') helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND) pvc_obj.reload() # Validate pv is created on ceph logger.info(f"Verifying pv exists on backend") pvc_obj.verify_pv_exists_in_backend(cbp_name) return pvc_obj
def test_pvc_creation_measurement_performance(self): """ Measuring PVC creation time """ log.info('Start creating new PVC') pvc_obj = create_pvc(sc_name=self.sc_obj.name, size=self.pvc_size) create_time = measure_pvc_creation_time('CephBlockPool', pvc_obj.name) if create_time > 1: raise AssertionError( f"PVC creation time is {create_time} and greater than 1 second" ) logging.info("PVC creation took less than a 1 second")
def create_pvc(storageclass_list, count=1): """ Function for creating pvc and multiple pvc Args: storageclass_list (list): This will contain storageclass list count (int): count specify no of pvc want's to create """ for i in range(count): sc_name = random.choice(storageclass_list) pvc_obj = helpers.create_pvc(sc_name) log.info(f"{pvc_obj.name} got Created and got Bounded") return True
def test_basics_rbd(self, test_fixture_rbd): """ Testing basics: secret creation, storage class creation,pvc and pod with rbd """ global RBD_PVC_OBJ, RBD_POD_OBJ log.info('creating pvc for RBD ') pvc_name = helpers.create_unique_resource_name('test-rbd', 'pvc') RBD_PVC_OBJ = helpers.create_pvc(sc_name=RBD_SC_OBJ.name, pvc_name=pvc_name) if RBD_PVC_OBJ.backed_pv is None: RBD_PVC_OBJ.reload() RBD_POD_OBJ = helpers.create_pod( interface_type=constants.CEPHBLOCKPOOL, pvc_name=RBD_PVC_OBJ.name)
def test_basics_cephfs(self, test_fixture_cephfs): """ Testing basics: secret creation, storage class creation, pvc and pod with cephfs """ global CEPHFS_PVC_OBJ, CEPHFS_POD_OBJ log.info('creating pvc for CephFS ') pvc_name = helpers.create_unique_resource_name('test-cephfs', 'pvc') CEPHFS_PVC_OBJ = helpers.create_pvc(sc_name=CEPHFS_SC_OBJ.name, pvc_name=pvc_name) log.info('creating cephfs pod') CEPHFS_POD_OBJ = helpers.create_pod( interface_type=constants.CEPHFILESYSTEM, pvc_name=CEPHFS_PVC_OBJ.name)
def create_pvc(storageclass_list, count=1): """ Function for creating pvc and multiple pvc Args: storageclass_list (list): This will contain storageclass list count (int): count specify no of pvc want's to create """ global PVC_OBJS PVC_OBJS = [0] * count for i in range(count): sc_name = random.choice(storageclass_list) PVC_OBJS[i] = helpers.create_pvc(sc_name) log.info(f"{PVC_OBJS[i].name} got created and got Bounded") return True
def create_pvc_and_verify_pvc_exists(sc_name, cbp_name): """ Create pvc, verify pvc is bound in state and pvc exists on ceph side """ pvc_obj = helpers.create_pvc(sc_name=sc_name, size='10Gi') helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND) pvc_obj.reload() # Validate pv is created on ceph logger.info(f"Verifying PV exists on backend") assert not helpers.verify_volume_deleted_in_backend( interface=constants.CEPHBLOCKPOOL, image_uuid=pvc_obj.image_uuid, pool_name=cbp_name) return pvc_obj
def create_jenkins_pvc(self): """ create jenkins pvc Returns: List: pvc_objs """ pvc_objs = [] for project in self.projects: log.info(f'create jenkins pvc on project {project}') pvc_obj = create_pvc(pvc_name='dependencies', size='10Gi', sc_name=constants.DEFAULT_STORAGECLASS_RBD, namespace=project) pvc_objs.append(pvc_obj) return pvc_objs
def factory( interface=constants.CEPHBLOCKPOOL, project=None, storageclass=None, size=None, custom_data=None, status=constants.STATUS_BOUND, ): """ Args: interface (str): CephBlockPool or CephFileSystem. This decides whether a RBD based or CephFS resource is created. RBD is default. project (object): ocs_ci.ocs.resources.ocs.OCS instance of 'Project' kind. storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance of 'StorageClass' kind. size (int): The requested size for the PVC custom_data (dict): If provided then PVC object is created by using these data. Parameters `project` and `storageclass` are not used but reference is set if provided. status (str): If provided then factory waits for object to reach desired state. Returns: object: helpers.create_pvc instance. """ if custom_data: pvc_obj = PVC(**custom_data) pvc_obj.create(do_reload=False) else: project = project or project_factory() storageclass = storageclass or storageclass_factory(interface) pvc_size = f"{size}Gi" if size else None pvc_obj = helpers.create_pvc(sc_name=storageclass.name, namespace=project.namespace, size=pvc_size, wait=False) assert pvc_obj, "Failed to create PVC" if status: helpers.wait_for_resource_state(pvc_obj, status) pvc_obj.storageclass = storageclass pvc_obj.project = project instances.append(pvc_obj) return pvc_obj
def create_pvc(storageclass_list, count=1): """ Function for creating pvc and multiple pvc Args: storageclass_list (list): This will contain storageclass list count (int): count specify no of pvc want's to create """ global PVC_OBJS PVC_OBJS = [0] * count for i in range(count): sc_name = random.choice(storageclass_list) PVC_OBJS[i] = helpers.create_pvc(sc_name) for pvc_obj in PVC_OBJS: helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND) pvc_obj.reload() return True
def dynamic_pvc_base(self, interface_type, reclaim_policy): """ Base function for Dynamic PVC creation tests Fetches the worker nodes name list, creates StorageClass and PVC """ self.interface_type = interface_type self.reclaim_policy = reclaim_policy self.worker_nodes_list = helpers.get_worker_nodes() if self.interface_type == constants.CEPHBLOCKPOOL: self.interface_name = self.cbp_obj.name self.secret_name = self.rbd_secret_obj.name elif self.interface_type == constants.CEPHFILESYSTEM: self.interface_name = helpers.get_cephfs_data_pool_name() self.secret_name = self.cephfs_secret_obj.name logger.info( f"Creating Storage Class with reclaimPolicy: {self.reclaim_policy}" ) self.sc_obj = helpers.create_storage_class( interface_type=self.interface_type, interface_name=self.interface_name, secret_name=self.secret_name, reclaim_policy=self.reclaim_policy ) logger.info(f"Creating PVC with accessModes: {self.access_mode}") self.pvc_obj = helpers.create_pvc( sc_name=self.sc_obj.name, namespace=self.namespace, size=self.pvc_size, access_mode=self.access_mode ) helpers.wait_for_resource_state(self.pvc_obj, constants.STATUS_BOUND) self.pvc_obj.reload() logger.info( f"Creating first pod on node: {self.worker_nodes_list[0]}" f" with pvc {self.pvc_obj.name}" ) self.pod_obj1 = helpers.create_pod( interface_type=self.interface_type, pvc_name=self.pvc_obj.name, namespace=self.namespace, node_name=self.worker_nodes_list[0], pod_dict_path=constants.NGINX_POD_YAML ) helpers.wait_for_resource_state(self.pod_obj1, constants.STATUS_RUNNING) self.pod_obj1.reload()