def _create_consumer_pods(self, num_of_volumes): for i in range(num_of_volumes): pod_name = 'consumer-{}'.format(i) pvc_name = 'vol-{}'.format(i) pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod)
def _delete_pods(num_of_volumes): for i in range(num_of_volumes): pod_name = 'consumer-{}'.format(i) KubeUtils.delete_pod(pod_name) for i in range(num_of_volumes): pod_name = 'consumer-{}'.format(i) KubeUtils.wait_for_pod_to_delete(pod_name)
def _create_storage_class_for_fs_type(self, fs_type, vpg='DEFAULT_RAID_1_VPG'): sc_name = 'file-system-volume-{}'.format(fs_type) sc_params = {'fsType': fs_type, 'vpg': vpg} self.addCleanup(lambda: KubeUtils.delete_storage_class(sc_name)) KubeUtils.create_storage_class(sc_name, sc_params) return sc_name
def test_step_3_delete_pods(self): for i in range(TestConfig.NumberOfVolumes): pod_name = 'consumer-{}'.format(i) KubeUtils.delete_pod(pod_name) for i in range(TestConfig.NumberOfVolumes): pod_name = 'consumer-{}'.format(i) KubeUtils.wait_for_pod_to_delete(pod_name)
def test_pods_sharing_pvc_on_same_zone_first_consumer(self): sc_name = 'test-pods-sharing-zone' KubeUtils.create_storage_class_with_cleanup( self, sc_name, PARAMS_CONCATENATED_VPG, volumeBindingMode=VolumeBindingMode.Immediate) self._check_pods_using_same_pvc_are_on_same_zone(sc_name)
def test_step_2_create_consumer_pods(self): for i in range(TestConfig.NumberOfVolumes): pod_name = 'consumer-{}'.format(i) pvc_name = 'vol-{}'.format(i) pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) for i in range(TestConfig.NumberOfVolumes): pod_name = 'consumer-{}'.format(i) KubeUtils.wait_for_pod_to_be_running(pod_name)
def test_step_1_create_volumes(self): storage_class = 'nvmesh-raid1' num_of_volumes = TestConfig.NumberOfVolumes for i in range(num_of_volumes): pvc_name = 'vol-{}'.format(i) pvc = KubeUtils.get_pvc_template(pvc_name, storage_class) KubeUtils.create_pvc(pvc) for i in range(num_of_volumes): pvc_name = 'vol-{}'.format(i) KubeUtils.wait_for_pvc_to_bound(pvc_name)
def _wait_for_file_system_resize(self, pod_name, new_size, attempts=30): size = None while attempts: attempts = attempts - 1 stdout = KubeUtils.run_command_in_container( pod_name, 'df -h /mnt/vol') if stdout: lines = stdout.split('\n') line = lines[2] columns = line.split() size = columns[0] if size == new_size: # success logger.info('File System on {} was extended to {}'.format( pod_name, new_size)) return else: logger.debug( 'Waiting for file system to extend to {} current size is {}' .format(new_size, size)) time.sleep(1) self.assertEqual(size, new_size, 'Timed out waiting for File System to resize')
def create_pv_for_static_prov(self, nvmesh_volume_name, pv_name, sc_name, volume_size): all_access_modes = ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'] pv = KubeUtils.get_pv_for_static_provisioning( pv_name=pv_name, nvmesh_volume_name=nvmesh_volume_name, accessModes=all_access_modes, sc_name=sc_name, volume_size=volume_size, volumeMode='Filesystem') core_api.create_persistent_volume(pv) def pv_cleanup(pv_name): try: KubeUtils.delete_pv(pv_name) except ApiException: pass self.addCleanup(lambda: pv_cleanup(pv_name)) pv_list = core_api.list_persistent_volume( field_selector='metadata.name={}'.format(pv_name)) self.assertIsNotNone(pv_list) self.assertTrue(len(pv_list.items)) self.assertEqual(pv_list.items[0].metadata.name, pv_name) return volume_size
def test_file_system_persistency(self): # create the PVC pvc_name = 'pvc-fs-persistency-test' sc_name = 'nvmesh-raid10' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name) data_to_write = "Persistency Test" # Create pod to write a file in the FS self._run_shell_pod('job-writer', pvc_name, "echo '{}' > /vol/file1".format(data_to_write)) # Create pod to read the file content pod_reader_name = 'job-reader' self._run_shell_pod(pod_reader_name, pvc_name, "cat /vol/file1") # get logs and verify output pod_log = KubeUtils.get_pod_log(pod_reader_name) self.assertEqual(pod_log.strip(), data_to_write)
def _collect_pods_info(self, pod_names): info = {} for pod_name in pod_names: info[pod_name] = {} pod = KubeUtils.get_pod_by_name(pod_name) node_name = pod.spec.node_name node = KubeUtils.get_node_by_name(node_name) zone = KubeUtils.get_zone_from_node(node) info[pod_name] = { 'pod': pod, 'node_name': node_name, 'node': node, 'zone': zone } return info
def _test_single_allowed_topology(self, volumeBindingMode): storage_class_name = 'sc-test-allowed-topologies' zone_name = TestConfig.Topology['zones'].keys()[0] allowed_zones = [zone_name] allowedTopologies = self.build_allowed_topologies(allowed_zones) KubeUtils.create_storage_class_with_cleanup( self, storage_class_name, PARAMS_CONCATENATED_VPG, volumeBindingMode=volumeBindingMode, allowedTopologies=allowedTopologies) pod_names = self._create_multiple_pods_each_with_own_pvc( storage_class_name, num_of_pods=6) # collect Pod, Node and Zone data info = self._collect_pods_info(pod_names) self.make_sure_pods_in_correct_zone(allowed_zones, info)
def _test_raid_type(self, raid_type): storage_class_name = 'nvmesh-{}'.format(raid_type).replace('_', '-') pvc_name = storage_class_name KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, storage_class_name) # wait for nvmesh volume to be created nvmesh_vol_name = KubeUtils.get_nvmesh_vol_name_from_pvc_name(pvc_name) NVMeshUtils.wait_for_nvmesh_volume(nvmesh_vol_name) # verify NVMesh Volumes Properties NVMeshUtils.wait_for_nvmesh_vol_properties(nvmesh_vol_name, {'raidLevel': raid_type}, self) def cleanup_volume(): KubeUtils.delete_pvc(pvc_name) KubeUtils.wait_for_pvc_to_delete(pvc_name) self.addCleanup(cleanup_volume)
def setUpClass(cls): zones = KubeUtils.get_all_node_names_by_zone() TestAccessModes.node_to_zone_map = zones for zone, nodes in zones.iteritems(): if len(nodes) > 1: print('Picked zone {} zone with {} nodes: {}'.format(zone, len(nodes), nodes)) TestAccessModes.used_zone = zone TestAccessModes.used_nodes = nodes break if not TestAccessModes.used_zone: raise ValueError('Test requires at least one zone with more than one node. found topology: %s' % zones)
def test_block_volume(self): # create the PVC pvc_name = 'test-block-volume' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, 'nvmesh-raid10', volumeMode='Block') # Create Consumer pod pod_name = 'block-volume-consumer' pod = KubeUtils.get_block_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) KubeUtils.wait_for_pod_to_be_running(pod_name) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod_name))
def _test_fs_type(self, fs_type): # Create Storage class for the specific File System Type sc_name = 'raid1-{}'.format(fs_type) sc_params = {'fsType': fs_type, 'vpg': 'DEFAULT_RAID_1_VPG'} KubeUtils.create_storage_class(sc_name, sc_params) self.addCleanup(lambda: KubeUtils.delete_storage_class(sc_name)) # create the PVC pvc_name = 'test-{}'.format(fs_type) KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name) # Create Consumer pod pod_name = 'consumer-{}'.format(fs_type) pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) def cleanup_pod(): KubeUtils.delete_pod(pod_name) KubeUtils.wait_for_pod_to_delete(pod_name) self.addCleanup(cleanup_pod) KubeUtils.wait_for_pod_to_be_running(pod_name)
def _test_storage_class_case(self, sc_name, params): pvc_name = 'pvc-{}'.format(sc_name) KubeUtils.create_storage_class(sc_name, params) self.addCleanup(lambda: KubeUtils.delete_storage_class(sc_name)) KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name) nvmesh_vol_name = KubeUtils.get_nvmesh_vol_name_from_pvc_name(pvc_name) NVMeshUtils.wait_for_nvmesh_volume(nvmesh_vol_name) # Verify NVMesh Volume has the required properties NVMeshUtils.wait_for_nvmesh_vol_properties(nvmesh_vol_name, params, self)
def _test_fs_type(self, fs_type, **kwargs): # Create Storage class for the specific File System Type sc_name = self._create_storage_class_for_fs_type(fs_type, **kwargs) # create the PVC pvc_name = 'test-{}'.format(fs_type) KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name) # Create Consumer pod pod_name = 'consumer-{}'.format(fs_type) pod = KubeUtils.get_fs_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) def cleanup_pod(): KubeUtils.delete_pod(pod_name) KubeUtils.wait_for_pod_to_delete(pod_name) self.addCleanup(cleanup_pod) KubeUtils.wait_for_pod_to_be_running(pod_name)
def _check_pods_using_same_pvc_are_on_same_zone(self, storage_class_name): pod_names = set() for i in range(5): pod_names.add('pod-' + str(i)) # create PVC pvc_name = 'topology-single-pvc-' + storage_class_name KubeUtils.create_pvc_with_cleanup(self, pvc_name, storage_class_name, volumeMode='Block') # Create Pods for pod_name in pod_names: pod = KubeUtils.get_block_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) # cleanup def pods_cleanup(): for pod_name in pod_names: KubeUtils.delete_pod(pod_name) for pod_name in pod_names: KubeUtils.wait_for_pod_to_delete(pod_name) self.addCleanup(pods_cleanup) for pod_name in pod_names: KubeUtils.wait_for_pod_to_be_running(pod_name, attempts=15) # collect Pod, Node and Zone data info = self._collect_pods_info(pod_names) # at least 2 different zones all_zones = set() for pod_name, pod_info in info.iteritems(): all_zones.add(pod_info['zone']) print('all_zones = %s' % all_zones) self.assertEqual(len(all_zones), 1)
def _test_storage_class_case(self, sc_name, params): pvc_name = 'pvc-{}'.format(sc_name) KubeUtils.create_storage_class(sc_name, params) self.addCleanup(lambda: KubeUtils.delete_storage_class(sc_name)) additional_fields = { 'description': 'Storage Class Parameters test for ' + sc_name } KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name, **additional_fields) nvmesh_vol_name = KubeUtils.get_nvmesh_vol_name_from_pvc_name(pvc_name) mgmt_address = NVMeshUtils.wait_for_nvmesh_volume(nvmesh_vol_name) # Verify NVMesh Volume has the required properties NVMeshUtils.wait_for_nvmesh_vol_properties(nvmesh_vol_name, params, self)
def _create_multiple_pods_each_with_own_pvc(self, storage_class_name, num_of_pods=6): pod_names = set() pvc_names = set() for i in range(num_of_pods): pod_names.add('pod-' + str(i)) pvc_names.add('pvc-' + str(i)) # create PVCs for pvc_name in pvc_names: KubeUtils.create_pvc_with_cleanup(self, pvc_name, storage_class_name, volumeMode='Block') # Create Pods for i in range(num_of_pods): pod_name = 'pod-' + str(i) pvc_name = 'pvc-' + str(i) pod = KubeUtils.get_block_consumer_pod_template(pod_name, pvc_name) KubeUtils.create_pod(pod) # pods cleanups def cleanup(): for pod_name in pod_names: KubeUtils.delete_pod(pod_name) for pod_name in pod_names: KubeUtils.wait_for_pod_to_delete(pod_name) self.addCleanup(cleanup) for pod_name in pod_names: KubeUtils.wait_for_pod_to_be_running(pod_name, attempts=30) return pod_names
def test_static_provisioning(self): # Create NVMesh Volume nvmesh_volume_name = "csi-testing-static-prov" volume = Volume( name=nvmesh_volume_name, RAIDLevel=RAIDLevels.STRIPED_AND_MIRRORED_RAID_10, VPG='DEFAULT_RAID_10_VPG', capacity=5 * GiB, description="Volume for CSI Driver Static Provisioning") err, out = NVMeshUtils.getVolumeAPI().save([volume]) self.assertIsNone(err, 'Error Creating NVMesh Volume. %s' % err) create_res = out[0] self.assertTrue( create_res['success'], 'Error Creating NVMesh Volume. %s' % create_res['error']) self.addCleanup(lambda: NVMeshUtils.getVolumeAPI().delete([volume])) # Create PV pv_name = 'pv-name-in-k8s' accessModes = ['ReadWriteOnce'] volume_size = '5Gi' sc_name = 'nvmesh-raid10' pv = KubeUtils.get_pv_for_static_provisioning(pv_name, nvmesh_volume_name, accessModes, sc_name, volume_size) core_api.create_persistent_volume(pv) self.addCleanup(lambda: core_api.delete_persistent_volume(pv_name)) pv_list = core_api.list_persistent_volume( field_selector='metadata.name={}'.format(pv_name)) self.assertIsNotNone(pv_list) self.assertTrue(len(pv_list.items)) self.assertEqual(pv_list.items[0].metadata.name, pv_name) # Create PVC pvc_name = 'pvc-static-prov' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name, access_modes=accessModes, storage=volume_size, volumeMode='Block') self.addCleanup(lambda: KubeUtils.delete_pvc(pvc_name)) # Create Consumer pod pod_name = 'pod-static-prov' cmd = 'echo hello ; while true ; do sleep 60; done' pod = KubeUtils.get_shell_pod_template(pod_name, pvc_name, cmd, volume_mode_block=True) KubeUtils.create_pod(pod) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod_name)) KubeUtils.wait_for_pod_to_be_running(pod_name)
def pods_cleanup(): for pod_name in pod_names: KubeUtils.delete_pod(pod_name) for pod_name in pod_names: KubeUtils.wait_for_pod_to_delete(pod_name)
def test_read_write_once(self): pvc_name = 'pvc-rwo' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, TestAccessModes.StorageClass, access_modes=['ReadWriteOnce'], volumeMode='Filesystem') # First Pod Should Succeed pod = KubeUtils.get_fs_consumer_pod_template('pod-1-node-1', pvc_name) self.set_pod_node(pod, node_index=1) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name']) # Second Pod on the same Node - should be running pod = KubeUtils.get_fs_consumer_pod_template('pod-2-node-1', pvc_name) self.set_pod_node(pod, node_index=1) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name']) # Third Pod on a different Node should Fail pod = KubeUtils.get_fs_consumer_pod_template('pod-3-node-2', pvc_name) self.set_pod_node(pod, node_index=2) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_event(pod['metadata']['name'], keyword='Access Mode Denied', attempts=20)
def test_read_only_many_can_read_from_different_pods_and_nodes(self): pvc_name = 'pvc-rox' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, TestAccessModes.StorageClass, access_modes=['ReadOnlyMany'], volumeMode='Block') # First Pod Should Succeed pod = KubeUtils.get_block_consumer_pod_template('pod-1-node-1', pvc_name) self.set_pod_node(pod, node_index=1) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name']) # Second Pod on the same Node should Succeed pod = KubeUtils.get_block_consumer_pod_template('pod-2-node-1', pvc_name) self.set_pod_node(pod, node_index=1) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name']) # Third Pod on a different Node should Succeed pod = KubeUtils.get_block_consumer_pod_template('pod-3-node-2', pvc_name) self.set_pod_node(pod, node_index=2) self.create_pod_with_cleanup(pod) KubeUtils.wait_for_pod_to_be_running(pod['metadata']['name'])
def test_migration(self): # create the PVC pvc_name = 'pvc-migration-test' sc_name = 'nvmesh-raid10' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name) # Create Deployment dep_name = 'test-pod-migration' pod = KubeUtils.get_fs_consumer_pod_template(dep_name, pvc_name) deployment = KubeUtils.get_deployment_template(dep_name, pod['spec']) KubeUtils.create_deployment(deployment) self.addCleanup(lambda: KubeUtils.delete_deployment(dep_name)) attempts = 10 pod = None while attempts: pod_list = KubeUtils.get_pods_for_deployment(dep_name) if len(pod_list): pod = pod_list[0] break time.sleep(1) initial_pod_name = pod.metadata.name # Set node as NoSchedule initial_node = pod.spec.node_name KubeUtils.node_prevent_schedule(initial_node) self.addCleanup(lambda: KubeUtils.node_allow_schedule(initial_node)) # Delete the pod (it is expected to be re-created on a different node KubeUtils.delete_pod(initial_pod_name) KubeUtils.wait_for_pod_to_delete(initial_pod_name) # Get the second Pod pods = KubeUtils.get_pods_for_deployment(dep_name) pod = pods[0] second_pod_name = pod.metadata.name self.assertNotEqual(initial_pod_name, second_pod_name) self.assertNotEqual(pod.spec.node_name, initial_node) KubeUtils.wait_for_pod_to_be_running(second_pod_name)
def test_read_only_many(self): pvc_name = 'pvc-rox' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, TestAccessModes.StorageClass, access_modes=['ReadOnlyMany'], volumeMode='Block') # First Pod Should Succeed pod1_name = 'pod-1-node-1' self._create_pod_on_specific_node(pod1_name, pvc_name, node_index=1) KubeUtils.wait_for_pod_to_be_running(pod1_name) # Second Pod on the same Node should Succeed pod2_name = 'pod-2-node-1' self._create_pod_on_specific_node(pod2_name, pvc_name, node_index=1) KubeUtils.wait_for_pod_to_be_running(pod2_name) # Third Pod on a different Node should Succeed pod3_name = 'pod-3-node-2' self._create_pod_on_specific_node(pod3_name, pvc_name, node_index=2) KubeUtils.wait_for_pod_to_be_running(pod3_name) KubeUtils.delete_pod(pod1_name) KubeUtils.delete_pod(pod2_name) KubeUtils.delete_pod(pod3_name)
def _create_pod_on_specific_node(self, pod_name, pvc_name, node_index): pod = KubeUtils.get_block_consumer_pod_template(pod_name, pvc_name) pod['spec']['nodeSelector'] = {'worker-index': str(node_index)} KubeUtils.create_pod(pod) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod_name))
def pv_cleanup(pv_name): try: KubeUtils.delete_pv(pv_name) except ApiException: pass
def test_mixed_access_modes(self): # This test creates a PV with reclaimPolicy: Retain (making sure it is not deleted when the bounded PVC is deleted) # Then will create PVC's with different AccessModes to use the same PV. in between some PV clean needs to be done. # Create Storage Class with reclaimPolicy: Retain sc_name = 'sc-nvmesh-retain' KubeUtils.create_storage_class(sc_name, {'vpg': 'DEFAULT_RAID_10_VPG'}, reclaimPolicy='Retain') self.addCleanup(lambda: KubeUtils.delete_storage_class(sc_name)) # Create NVMesh Volume nvmesh_volume_name = "vol1" volume = Volume( name=nvmesh_volume_name, RAIDLevel=RAIDLevels.STRIPED_AND_MIRRORED_RAID_10, VPG='DEFAULT_RAID_10_VPG', capacity=5 * GiB, description="Volume for CSI Driver Static Provisioning") err, out = NVMeshUtils.getVolumeAPI().save([volume]) self.assertIsNone(err, 'Error Creating NVMesh Volume. %s' % err) create_res = out[0] self.assertTrue( create_res['success'], 'Error Creating NVMesh Volume. %s' % create_res['error']) self.addCleanup(lambda: NVMeshUtils.getVolumeAPI().delete([volume])) # Create PV pv_name = 'csi-testing-pv-vol1' volume_size = '5Gi' self.create_pv_for_static_prov(nvmesh_volume_name, pv_name, sc_name, volume_size) # Create PVC with accessMode ReadWriteOnce pvc_name = 'pvc-rwo' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name, access_modes=['ReadWriteOnce'], storage=volume_size, volumeMode='Filesystem') self.addCleanup(lambda: KubeUtils.delete_pvc(pvc_name)) # Create Pod to create a file on the File System pod_name = 'pod-file-writer' cmd = 'echo hello > /vol/file1' pod = KubeUtils.get_shell_pod_template(pod_name, pvc_name, cmd) KubeUtils.create_pod(pod) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod_name)) KubeUtils.wait_for_pod_to_complete(pod_name) KubeUtils.delete_pod_and_wait(pod_name) # Delete the PVC KubeUtils.delete_pvc(pvc_name) KubeUtils.wait_for_pv_to_be_released(pv_name) # Make PV Available for the next PVC (by removing claimRef field from the PV ==OR== deleting and recreating the PV) KubeUtils.delete_pv(pv_name) KubeUtils.wait_for_pv_to_delete(pv_name) self.create_pv_for_static_prov(nvmesh_volume_name, pv_name, sc_name, volume_size) # Create PVC With ReadOnlyMany pvc_name = 'pvc-rox' KubeUtils.create_pvc_and_wait_to_bound(self, pvc_name, sc_name, access_modes=['ReadOnlyMany'], storage=volume_size, volumeMode='Filesystem') self.addCleanup(lambda: KubeUtils.delete_pvc(pvc_name)) # 7. Create 2 Pods that will read the File System - Should Succeed pod1_name = 'pod-file-reader1' cmd = 'cat /vol/file1' pod = KubeUtils.get_shell_pod_template(pod1_name, pvc_name, cmd) KubeUtils.create_pod(pod) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod1_name)) pod2_name = 'pod-file-reader2' pod = KubeUtils.get_shell_pod_template(pod2_name, pvc_name, cmd) KubeUtils.create_pod(pod) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod2_name)) KubeUtils.wait_for_pod_to_complete(pod2_name) # 8. Create 1 Pod that will try to write to the FileSystem - Should Fail pod_name = 'pod-file-writer' cmd = 'echo hello > /vol/file1' pod = KubeUtils.get_shell_pod_template(pod_name, pvc_name, cmd) KubeUtils.create_pod(pod) self.addCleanup(lambda: KubeUtils.delete_pod_and_wait(pod_name)) KubeUtils.wait_for_pod_to_fail(pod_name)