def _create_consumer_pods(self, num_of_volumes): for i in range(num_of_volumes): pod_name = 'consumer-{}'.format(i) pvc_name = 'vol-{}'.format(i) pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod)
def test_step_2_create_consumer_pods(self): for i in range(TestConfig.NumberOfVolumes): pod_name = 'consumer-{}'.format(i) pvc_name = 'vol-{}'.format(i) pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) for i in range(TestConfig.NumberOfVolumes): pod_name = 'consumer-{}'.format(i) KubeUtils.wait_for_pod_to_be_running(pod_name)
def test_read_write_once(self): pvc_name = 'pvc-rwo' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, TestAccessModes.StorageClass, access_modes=['ReadWriteOnce'], volumeMode='Filesystem') # First Pod Should Succeed pod = KubeUtils.get_fs_consumer_pod_template('pod-1-node-1', pvc_name) self.set_pod_node(pod, node_index=1) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name']) # Second Pod on the same Node - should be running pod = KubeUtils.get_fs_consumer_pod_template('pod-2-node-1', pvc_name) self.set_pod_node(pod, node_index=1) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name']) # Third Pod on a different Node should Fail pod = KubeUtils.get_fs_consumer_pod_template('pod-3-node-2', pvc_name) self.set_pod_node(pod, node_index=2) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_event(pod['metadata']['name'], keyword='Access Mode Denied', attempts=20)
def _test_extend_fs_volume(self, storage_class_name, fs_type): # Create PVC pvc_name = 'pvc-extend-fs' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, storage_class_name, access_modes=['ReadWriteMany']) # Create Pod pod_name = 'extend-fs-consumer' pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod_name)) KubeUtils.wait_for_pod_to_be_running(pod_name) # Edit the PVC to increase the volume capacity new_size = '5Gi' pvc_patch = { 'spec': { 'resources': { 'requests': { 'storage': new_size } }, } } logger.info("Extending Volume {}".format(pvc_name)) KubeUtils.patch_pvc(pvc_name, pvc_patch) # verify kuberentes object updated KubeUtils.wait_for_pvc_to_extend(pvc_name, new_size) # wait for NVMesh Volume to show the updated size nvmesh_vol_name = KubeUtils.get_nvmesh_vol_name_from_pvc_name(pvc_name) size_5_gib_in_bytes = 5368709120 NVMeshUtils.wait_for_nvmesh_vol_properties( nvmesh_vol_name, {'capacity': size_5_gib_in_bytes}, self, attempts=15) # check block device size in container (using lsblk) KubeUtils.wait_for_block_device_resize(self, pod_name, nvmesh_vol_name, '5G') # check file system size inside the container (using df -h) expected_size = '4.9G' if fs_type == FSType.EXT4 else '5.0G' self._wait_for_file_system_resize(pod_name, expected_size)
def test_migration(self): # create the PVC pvc_name = 'pvc-migration-test' sc_name = 'nvmesh-raid10' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name) # Create Deployment dep_name = 'test-pod-migration' pod = KubeUtils.get_fs_consumer_pod_template(dep_name, pvc_name) deployment = KubeUtils.get_deployment_template(dep_name, pod['spec']) KubeUtils.create_deployment(deployment) self.addCleanup(lambda: KubeUtils.delete_deployment(dep_name)) attempts = 10 pod = None while attempts: logger.debug('Waiting for deployment pods to be scheduled') pod_list = KubeUtils.get_pods_for_deployment(dep_name) if len(pod_list): pod = pod_list[0] break attempts = attempts - 1 self.assertNotEqual(attempts, 0, 'Timed out waiting for deployment pod to be scheduled') time.sleep(1) initial_pod_name = pod.metadata.name # Set node as NoSchedule initial_node = pod.spec.node_name logger.debug("Tainting node %s with noSchedule" % initial_node) KubeUtils.node_prevent_schedule(initial_node) self.addCleanup(lambda: KubeUtils.node_allow_schedule(initial_node)) # Delete the pod (it is expected to be re-created on a different node KubeUtils.delete_pod(initial_pod_name) KubeUtils.wait_for_pod_to_delete(initial_pod_name, attempts=120) # Get the second Pod pods = KubeUtils.get_pods_for_deployment(dep_name) pod = pods[0] second_pod_name = pod.metadata.name self.assertNotEqual(initial_pod_name, second_pod_name) self.assertNotEqual(pod.spec.node_name, initial_node) KubeUtils.wait_for_pod_to_be_running(second_pod_name)
def _test_fs_type(self, fs_type, **kwargs): # Create Storage class for the specific File System Type sc_name = self._create_storage_class_for_fs_type(fs_type, **kwargs) # create the PVC pvc_name = 'test-{}'.format(fs_type) KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name) # Create Consumer pod pod_name = 'consumer-{}'.format(fs_type) pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) def cleanup_pod(): KubeUtils.delete_pod(pod_name) KubeUtils.wait_for_pod_to_delete(pod_name) self.addCleanup(cleanup_pod) KubeUtils.wait_for_pod_to_be_running(pod_name)
def _test_fs_type(self, fs_type): # Create Storage class for the specific File System Type sc_name = 'raid1-{}'.format(fs_type) sc_params = {'fsType': fs_type, 'vpg': 'DEFAULT_RAID_1_VPG'} KubeUtils.create_storage_class(sc_name, sc_params) self.addCleanup(lambda: KubeUtils.delete_storage_class(sc_name)) # create the PVC pvc_name = 'test-{}'.format(fs_type) KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name) # Create Consumer pod pod_name = 'consumer-{}'.format(fs_type) pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) def cleanup_pod(): KubeUtils.delete_pod(pod_name) KubeUtils.wait_for_pod_to_delete(pod_name) self.addCleanup(cleanup_pod) KubeUtils.wait_for_pod_to_be_running(pod_name)