def test_statefulset_pod_deletion(core_api, storage_class, statefulset): # NOQA """ Test that a StatefulSet can spin up a new Pod with the same data after a previous Pod has been deleted. 1. Create a StatefulSet with VolumeClaimTemplate and Longhorn. 2. Wait for pods to run. 3. Write some data to one of the pod. 4. Delete that pod. 5. Wait for the StatefulSet to recreate the pod 6. Verify the data in the pod. """ statefulset_name = 'statefulset-pod-deletion-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) test_pod_name = statefulset_name + '-' + \ str(randrange(statefulset['spec']['replicas'])) test_data = generate_random_data(VOLUME_RWTEST_SIZE) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) write_pod_volume_data(core_api, test_pod_name, test_data) # Not using delete_and_wait_pod here because there is the small chance the # StatefulSet recreates the Pod quickly enough where the function won't # detect that the Pod was deleted, which will time out and throw an error. core_api.delete_namespaced_pod(name=test_pod_name, namespace='default', body=k8sclient.V1DeleteOptions()) wait_statefulset(statefulset) resp = read_volume_data(core_api, test_pod_name) assert resp == test_data
def test_statefulset_pod_deletion(core_api, storage_class, statefulset): # NOQA """ Test that a StatefulSet can spin up a new Pod with the same data after a previous Pod has been deleted. This test will only work in a CSI environment. It will automatically be disabled in FlexVolume environments. """ statefulset_name = 'statefulset-pod-deletion-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) test_pod_name = statefulset_name + '-' + \ str(randrange(statefulset['spec']['replicas'])) test_data = generate_random_data(VOLUME_RWTEST_SIZE) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) write_volume_data(core_api, test_pod_name, test_data) # Not using delete_and_wait_pod here because there is the small chance the # StatefulSet recreates the Pod quickly enough where the function won't # detect that the Pod was deleted, which will time out and throw an error. core_api.delete_namespaced_pod(name=test_pod_name, namespace='default', body=k8sclient.V1DeleteOptions()) wait_statefulset(statefulset) resp = read_volume_data(core_api, test_pod_name) assert resp == test_data
def test_statefulset_pod_deletion(core_api, storage_class, statefulset): # NOQA """ Test that a StatefulSet can spin up a new Pod with the same data after a previous Pod has been deleted. This test will only work in a CSI environment. It will automatically be disabled in FlexVolume environments. """ statefulset_name = 'statefulset-pod-deletion-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) test_pod_name = statefulset_name + '-' + \ str(randrange(statefulset['spec']['replicas'])) test_data = generate_random_data(VOLUME_RWTEST_SIZE) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) write_pod_volume_data(core_api, test_pod_name, test_data) # Not using delete_and_wait_pod here because there is the small chance the # StatefulSet recreates the Pod quickly enough where the function won't # detect that the Pod was deleted, which will time out and throw an error. core_api.delete_namespaced_pod(name=test_pod_name, namespace='default', body=k8sclient.V1DeleteOptions()) wait_statefulset(statefulset) resp = read_volume_data(core_api, test_pod_name) assert resp == test_data
def test_rwx_statefulset_scale_down_up(core_api, statefulset): # NOQA """ Test Scaling up and down of pods attached to rwx volume. 1. Create a StatefulSet of 2 pods with VolumeClaimTemplate where accessMode is 'RWX'. 2. Wait for StatefulSet pods to come up healthy. 3. Write data and compute md5sum in the both pods. 4. Delete the pods. 5. Wait for the pods to be terminated. 6. Verify the share manager pods are no longer available and the volume is detached. 6. Recreate the pods 7. Wait for new pods to come up. 8. Check the data md5sum in new pods. """ statefulset_name = 'statefulset-rwx-scale-down-up-test' share_manager_name = [] statefulset['metadata']['name'] = \ statefulset['spec']['selector']['matchLabels']['app'] = \ statefulset['spec']['serviceName'] = \ statefulset['spec']['template']['metadata']['labels']['app'] = \ statefulset_name statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\ = 'longhorn' statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \ = ['ReadWriteMany'] create_and_wait_statefulset(statefulset) for i in range(2): pvc_name = \ statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name']\ + '-' + statefulset_name + '-' + str(i) pv_name = get_volume_name(core_api, pvc_name) assert pv_name is not None share_manager_name.append('share-manager-' + pv_name) check_pod_existence(core_api, share_manager_name[i], namespace=LONGHORN_NAMESPACE) md5sum_pod = [] for i in range(2): test_pod_name = statefulset_name + '-' + str(i) test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, test_pod_name, test_data) md5sum_pod.append(test_data) statefulset['spec']['replicas'] = replicas = 0 apps_api = get_apps_api_client() apps_api.patch_namespaced_stateful_set( name=statefulset_name, namespace='default', body={'spec': { 'replicas': replicas }}) for i in range(DEFAULT_STATEFULSET_TIMEOUT): s_set = apps_api.read_namespaced_stateful_set( name=statefulset['metadata']['name'], namespace='default') if s_set.status.ready_replicas == replicas or \ (replicas == 0 and not s_set.status.ready_replicas): break time.sleep(DEFAULT_STATEFULSET_INTERVAL) pods = core_api.list_namespaced_pod(namespace=LONGHORN_NAMESPACE) found = False for item in pods.items: if item.metadata.name == share_manager_name[0] or \ item.metadata.name == share_manager_name[1]: found = True break assert not found statefulset['spec']['replicas'] = replicas = 2 apps_api = get_apps_api_client() apps_api.patch_namespaced_stateful_set( name=statefulset_name, namespace='default', body={'spec': { 'replicas': replicas }}) wait_statefulset(statefulset) for i in range(2): test_pod_name = statefulset_name + '-' + str(i) command = 'cat /data/test' pod_data = exec_command_in_pod(core_api, command, test_pod_name, 'default') assert pod_data == md5sum_pod[i]
def test_upgrade(upgrade_image_tag, settings_reset, volume_name, pod_make, statefulset, storage_class): # NOQA """ Test Longhorn upgrade Prerequisite: - Disable Auto Salvage Setting 1. Find the upgrade image tag 2. Create a volume, generate and write data into the volume. 3. Create a Pod using a volume, generate and write data 4. Create a StatefulSet with 2 replicas, generate and write data to their volumes 5. Keep all volumes attached 6. Upgrade Longhorn system. 7. Check Pod and StatefulSet didn't restart after upgrade 8. Check All volumes data 9. Write data to StatefulSet pods, and Attached volume 10. Check data written to StatefulSet pods, and attached volume. 11. Detach the volume, and Delete Pod, and StatefulSet to detach theirvolumes 12. Upgrade all volumes engine images. 13. Attach the volume, and recreate Pod, and StatefulSet 14. Check All volumes data """ new_ei_name = "longhornio/longhorn-engine:" + upgrade_image_tag client = get_longhorn_api_client() core_api = get_core_api_client() host_id = get_self_host_id() pod_data_path = "/data/test" pod_volume_name = generate_volume_name() auto_salvage_setting = client.by_id_setting(SETTING_AUTO_SALVAGE) setting = client.update(auto_salvage_setting, value="false") assert setting.name == SETTING_AUTO_SALVAGE assert setting.value == "false" # Create Volume attached to a node. volume1 = create_and_check_volume(client, volume_name, size=SIZE) volume1.attach(hostId=host_id) volume1 = wait_for_volume_healthy(client, volume_name) volume1_data = write_volume_random_data(volume1) # Create Volume used by Pod pod_name, pv_name, pvc_name, pod_md5sum = \ prepare_pod_with_data_in_mb(client, core_api, pod_make, pod_volume_name, data_path=pod_data_path, add_liveness_prope=False) # Create multiple volumes used by StatefulSet statefulset_name = 'statefulset-upgrade-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) statefulset_pod_info = get_statefulset_pod_info(core_api, statefulset) for sspod_info in statefulset_pod_info: sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, sspod_info['pod_name'], sspod_info['data']) # upgrade Longhorn assert longhorn_upgrade(upgrade_image_tag) client = get_longhorn_api_client() # wait for 1 minute before checking pod restarts time.sleep(60) pod = core_api.read_namespaced_pod(name=pod_name, namespace='default') assert pod.status.container_statuses[0].restart_count == 0 for sspod_info in statefulset_pod_info: sspod = core_api.read_namespaced_pod(name=sspod_info['pod_name'], namespace='default') assert \ sspod.status.container_statuses[0].restart_count == 0 for sspod_info in statefulset_pod_info: resp = read_volume_data(core_api, sspod_info['pod_name']) assert resp == sspod_info['data'] res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path) assert res_pod_md5sum == pod_md5sum check_volume_data(volume1, volume1_data) for sspod_info in statefulset_pod_info: sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, sspod_info['pod_name'], sspod_info['data']) for sspod_info in statefulset_pod_info: resp = read_volume_data(core_api, sspod_info['pod_name']) assert resp == sspod_info['data'] volume1 = client.by_id_volume(volume_name) volume1_data = write_volume_random_data(volume1) check_volume_data(volume1, volume1_data) statefulset['spec']['replicas'] = replicas = 0 apps_api = get_apps_api_client() apps_api.patch_namespaced_stateful_set( name=statefulset_name, namespace='default', body={ 'spec': { 'replicas': replicas } }) delete_and_wait_pod(core_api, pod_name) volume = client.by_id_volume(volume_name) volume.detach() volumes = client.list_volume() for v in volumes: wait_for_volume_detached(client, v.name) engineimages = client.list_engine_image() for ei in engineimages: if ei.image == new_ei_name: new_ei = ei volumes = client.list_volume() for v in volumes: volume = client.by_id_volume(v.name) volume.engineUpgrade(image=new_ei.image) statefulset['spec']['replicas'] = replicas = 2 apps_api = get_apps_api_client() apps_api.patch_namespaced_stateful_set( name=statefulset_name, namespace='default', body={ 'spec': { 'replicas': replicas } }) wait_statefulset(statefulset) pod = pod_make(name=pod_name) pod['spec']['volumes'] = [create_pvc_spec(pvc_name)] create_and_wait_pod(core_api, pod) volume1 = client.by_id_volume(volume_name) volume1.attach(hostId=host_id) volume1 = wait_for_volume_healthy(client, volume_name) for sspod_info in statefulset_pod_info: resp = read_volume_data(core_api, sspod_info['pod_name']) assert resp == sspod_info['data'] res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path) assert res_pod_md5sum == pod_md5sum check_volume_data(volume1, volume1_data)