def test_read_write_once(self): pvc_name = 'pvc-rwo' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, TestAccessModes.StorageClass, access_modes=['ReadWriteOnce'], volumeMode='Block') # First Pod Should Succeed pod1_name = 'pod-1-node-1' self._create_pod_on_specific_node(pod1_name, pvc_name, node_index=1) KubeUtils.wait_for_pod_to_be_running(pod1_name) # Second Pod on the same Node should Fail pod2_name = 'pod-2-node-1' self._create_pod_on_specific_node(pod2_name, pvc_name, node_index=1) KubeUtils.wait_for_pod_to_fail(pod2_name) # Third Pod on a different Node should Fail pod3_name = 'pod-3-node-2' self._create_pod_on_specific_node(pod3_name, pvc_name, node_index=2) KubeUtils.wait_for_pod_to_fail(pod3_name) KubeUtils.delete_pod(pod1_name) KubeUtils.delete_pod(pod2_name) KubeUtils.delete_pod(pod3_name)
def test_step_2_create_consumer_pods(self): for i in range(TestConfig.NumberOfVolumes): pod_name = 'consumer-{}'.format(i) pvc_name = 'vol-{}'.format(i) pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) for i in range(TestConfig.NumberOfVolumes): pod_name = 'consumer-{}'.format(i) KubeUtils.wait_for_pod_to_be_running(pod_name)
def _test_extend_fs_volume(self, storage_class_name, fs_type): # Create PVC pvc_name = 'pvc-extend-fs' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, storage_class_name, access_modes=['ReadWriteMany']) # Create Pod pod_name = 'extend-fs-consumer' pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod_name)) KubeUtils.wait_for_pod_to_be_running(pod_name) # Edit the PVC to increase the volume capacity new_size = '5Gi' pvc_patch = { 'spec': { 'resources': { 'requests': { 'storage': new_size } }, } } logger.info("Extending Volume {}".format(pvc_name)) KubeUtils.patch_pvc(pvc_name, pvc_patch) # verify kuberentes object updated KubeUtils.wait_for_pvc_to_extend(pvc_name, new_size) # wait for NVMesh Volume to show the updated size nvmesh_vol_name = KubeUtils.get_nvmesh_vol_name_from_pvc_name(pvc_name) size_5_gib_in_bytes = 5368709120 NVMeshUtils.wait_for_nvmesh_vol_properties( nvmesh_vol_name, {'capacity': size_5_gib_in_bytes}, self, attempts=15) # check block device size in container (using lsblk) KubeUtils.wait_for_block_device_resize(self, pod_name, nvmesh_vol_name, '5G') # check file system size inside the container (using df -h) expected_size = '4.9G' if fs_type == FSType.EXT4 else '5.0G' self._wait_for_file_system_resize(pod_name, expected_size)
def test_block_volume(self): # create the PVC pvc_name = 'test-block-volume' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, 'nvmesh-raid10', volumeMode='Block') # Create Consumer pod pod_name = 'block-volume-consumer' pod = KubeUtils.get_block_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) KubeUtils.wait_for_pod_to_be_running(pod_name) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod_name))
def test_block_volume_extend(self): # Create PVC pvc_name = 'pvc-extend-block' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, 'nvmesh-raid10', volumeMode='Block', access_modes=['ReadWriteMany']) # Create Pod pod_name = 'extend-block-consumer' pod = KubeUtils.get_block_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod_name)) KubeUtils.wait_for_pod_to_be_running(pod_name) # Edit the PVC to increase the volume capacity new_size = '5Gi' pvc_patch = { 'spec': { 'resources': { 'requests': { 'storage': new_size } }, } } logger.info("Extending Volume {}".format(pvc_name)) KubeUtils.patch_pvc(pvc_name, pvc_patch) # verify kuberentes object updated KubeUtils.wait_for_pvc_to_extend(pvc_name, new_size) # wait for NVMesh Volume to show the updated size nvmesh_vol_name = KubeUtils.get_nvmesh_vol_name_from_pvc_name(pvc_name) size_5_gib_in_bytes = 5368709120 NVMeshUtils.wait_for_nvmesh_vol_properties( nvmesh_vol_name, {'capacity': size_5_gib_in_bytes}, self, attempts=15) # check block device size in container (using lsblk) KubeUtils.wait_for_block_device_resize(self, pod_name, nvmesh_vol_name, '5G')
def test_migration(self): # create the PVC pvc_name = 'pvc-migration-test' sc_name = 'nvmesh-raid10' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name) # Create Deployment dep_name = 'test-pod-migration' pod = KubeUtils.get_fs_consumer_pod_template(dep_name, pvc_name) deployment = KubeUtils.get_deployment_template(dep_name, pod['spec']) KubeUtils.create_deployment(deployment) self.addCleanup(lambda: KubeUtils.delete_deployment(dep_name)) attempts = 10 pod = None while attempts: logger.debug('Waiting for deployment pods to be scheduled') pod_list = KubeUtils.get_pods_for_deployment(dep_name) if len(pod_list): pod = pod_list[0] break attempts = attempts - 1 self.assertNotEqual(attempts, 0, 'Timed out waiting for deployment pod to be scheduled') time.sleep(1) initial_pod_name = pod.metadata.name # Set node as NoSchedule initial_node = pod.spec.node_name logger.debug("Tainting node %s with noSchedule" % initial_node) KubeUtils.node_prevent_schedule(initial_node) self.addCleanup(lambda: KubeUtils.node_allow_schedule(initial_node)) # Delete the pod (it is expected to be re-created on a different node KubeUtils.delete_pod(initial_pod_name) KubeUtils.wait_for_pod_to_delete(initial_pod_name, attempts=120) # Get the second Pod pods = KubeUtils.get_pods_for_deployment(dep_name) pod = pods[0] second_pod_name = pod.metadata.name self.assertNotEqual(initial_pod_name, second_pod_name) self.assertNotEqual(pod.spec.node_name, initial_node) KubeUtils.wait_for_pod_to_be_running(second_pod_name)
def _test_fs_type(self, fs_type, **kwargs): # Create Storage class for the specific File System Type sc_name = self._create_storage_class_for_fs_type(fs_type, **kwargs) # create the PVC pvc_name = 'test-{}'.format(fs_type) KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name) # Create Consumer pod pod_name = 'consumer-{}'.format(fs_type) pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) def cleanup_pod(): KubeUtils.delete_pod(pod_name) KubeUtils.wait_for_pod_to_delete(pod_name) self.addCleanup(cleanup_pod) KubeUtils.wait_for_pod_to_be_running(pod_name)
def _check_pods_using_same_pvc_are_on_same_zone(self, storage_class_name): pod_names = set() for i in range(5): pod_names.add('pod-' + str(i)) # create PVC pvc_name = 'topology-single-pvc-' + storage_class_name KubeUtils.create_pvc_with_cleanup(self, pvc_name, storage_class_name, volumeMode='Block') # Create Pods for pod_name in pod_names: pod = KubeUtils.get_block_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) # cleanup def pods_cleanup(): for pod_name in pod_names: KubeUtils.delete_pod(pod_name) for pod_name in pod_names: KubeUtils.wait_for_pod_to_delete(pod_name) self.addCleanup(pods_cleanup) for pod_name in pod_names: KubeUtils.wait_for_pod_to_be_running(pod_name, attempts=15) # collect Pod, Node and Zone data info = self._collect_pods_info(pod_names) # at least 2 different zones all_zones = set() for pod_name, pod_info in info.iteritems(): all_zones.add(pod_info['zone']) print('all_zones = %s' % all_zones) self.assertEqual(len(all_zones), 1)
def test_read_write_once(self): pvc_name = 'pvc-rwo' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, TestAccessModes.StorageClass, access_modes=['ReadWriteOnce'], volumeMode='Filesystem') # First Pod Should Succeed pod = KubeUtils.get_fs_consumer_pod_template('pod-1-node-1', pvc_name) self.set_pod_node(pod, node_index=1) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name']) # Second Pod on the same Node - should be running pod = KubeUtils.get_fs_consumer_pod_template('pod-2-node-1', pvc_name) self.set_pod_node(pod, node_index=1) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name']) # Third Pod on a different Node should Fail pod = KubeUtils.get_fs_consumer_pod_template('pod-3-node-2', pvc_name) self.set_pod_node(pod, node_index=2) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_event(pod['metadata']['name'], keyword='Access Mode Denied', attempts=20)
def _create_multiple_pods_each_with_own_pvc(self, storage_class_name, num_of_pods=6): pod_names = set() pvc_names = set() for i in range(num_of_pods): pod_names.add('pod-' + str(i)) pvc_names.add('pvc-' + str(i)) # create PVCs for pvc_name in pvc_names: KubeUtils.create_pvc_with_cleanup(self, pvc_name, storage_class_name, volumeMode='Block') # Create Pods for i in range(num_of_pods): pod_name = 'pod-' + str(i) pvc_name = 'pvc-' + str(i) pod = KubeUtils.get_block_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) # pods cleanups def cleanup(): for pod_name in pod_names: KubeUtils.delete_pod(pod_name) for pod_name in pod_names: KubeUtils.wait_for_pod_to_delete(pod_name) self.addCleanup(cleanup) for pod_name in pod_names: KubeUtils.wait_for_pod_to_be_running(pod_name, attempts=30) return pod_names
def _test_fs_type(self, fs_type): # Create Storage class for the specific File System Type sc_name = 'raid1-{}'.format(fs_type) sc_params = {'fsType': fs_type, 'vpg': 'DEFAULT_RAID_1_VPG'} KubeUtils.create_storage_class(sc_name, sc_params) self.addCleanup(lambda: KubeUtils.delete_storage_class(sc_name)) # create the PVC pvc_name = 'test-{}'.format(fs_type) KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name) # Create Consumer pod pod_name = 'consumer-{}'.format(fs_type) pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) def cleanup_pod(): KubeUtils.delete_pod(pod_name) KubeUtils.wait_for_pod_to_delete(pod_name) self.addCleanup(cleanup_pod) KubeUtils.wait_for_pod_to_be_running(pod_name)
def test_read_only_many_can_read_from_different_pods_and_nodes(self): pvc_name = 'pvc-rox' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, TestAccessModes.StorageClass, access_modes=['ReadOnlyMany'], volumeMode='Block') # First Pod Should Succeed pod = KubeUtils.get_block_consumer_pod_template('pod-1-node-1', pvc_name) self.set_pod_node(pod, node_index=1) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name']) # Second Pod on the same Node should Succeed pod = KubeUtils.get_block_consumer_pod_template('pod-2-node-1', pvc_name) self.set_pod_node(pod, node_index=1) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name']) # Third Pod on a different Node should Succeed pod = KubeUtils.get_block_consumer_pod_template('pod-3-node-2', pvc_name) self.set_pod_node(pod, node_index=2) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name'])
def test_static_provisioning(self): # Create NVMesh Volume nvmesh_volume_name = "csi-testing-static-prov" volume = Volume( name=nvmesh_volume_name, RAIDLevel=RAIDLevels.STRIPED_AND_MIRRORED_RAID_10, VPG='DEFAULT_RAID_10_VPG', capacity=5 * GiB, description="Volume for CSI Driver Static Provisioning") err, out = NVMeshUtils.getVolumeAPI().save([volume]) self.assertIsNone(err, 'Error Creating NVMesh Volume. %s' % err) create_res = out[0] self.assertTrue( create_res['success'], 'Error Creating NVMesh Volume. %s' % create_res['error']) self.addCleanup(lambda: NVMeshUtils.getVolumeAPI().delete([volume])) # Create PV pv_name = 'pv-name-in-k8s' accessModes = ['ReadWriteOnce'] volume_size = '5Gi' sc_name = 'nvmesh-raid10' pv = KubeUtils.get_pv_for_static_provisioning(pv_name, nvmesh_volume_name, accessModes, sc_name, volume_size) core_api.create_persistent_volume(pv) self.addCleanup(lambda: core_api.delete_persistent_volume(pv_name)) pv_list = core_api.list_persistent_volume( field_selector='metadata.name={}'.format(pv_name)) self.assertIsNotNone(pv_list) self.assertTrue(len(pv_list.items)) self.assertEqual(pv_list.items[0].metadata.name, pv_name) # Create PVC pvc_name = 'pvc-static-prov' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name, access_modes=accessModes, storage=volume_size, volumeMode='Block') self.addCleanup(lambda: KubeUtils.delete_pvc(pvc_name)) # Create Consumer pod pod_name = 'pod-static-prov' cmd = 'echo hello ; while true ; do sleep 60; done' pod = KubeUtils.get_shell_pod_template(pod_name, pvc_name, cmd, volume_mode_block=True) KubeUtils.create_pod(pod) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod_name)) KubeUtils.wait_for_pod_to_be_running(pod_name)
def _wait_for_pods(self, num_of_volumes): for i in range(num_of_volumes): pod_name = 'consumer-{}'.format(i) KubeUtils.wait_for_pod_to_be_running(pod_name)