def provision_and_wait_pv(client, core_api, storage_class, pvc): # NOQA """ Provision a new Longhorn Volume via Storage Class and wait for the Volume and its associated resources to be created. This method also waits for the Kubernetes Status to be properly set on the Volume. :param client: An instance of the Longhorn client. :param core_api: An instance of the Kubernetes CoreV1API client. :param storage_class: A dict representing a Storage Class spec. :param pvc: A dict representing a Persistent Volume Claim spec. :return: The Persistent Volume that was provisioned. """ create_storage_class(storage_class) pvc['spec']['storageClassName'] = storage_class['metadata']['name'] pvc_name = pvc['metadata']['name'] create_pvc(pvc) pv = wait_and_get_pv_for_pvc(core_api, pvc_name) volume_name = pv.spec.csi.volume_handle # NOQA ks = { 'pvName': pv.metadata.name, 'pvStatus': 'Bound', 'namespace': 'default', 'pvcName': pvc_name, 'lastPVCRefAt': '', 'lastPodRefAt': '', } wait_volume_kubernetes_status(client, volume_name, ks) return pv
def test_statefulset_pod_deletion(core_api, storage_class, statefulset): # NOQA """ Test that a StatefulSet can spin up a new Pod with the same data after a previous Pod has been deleted. This test will only work in a CSI environment. It will automatically be disabled in FlexVolume environments. """ statefulset_name = 'statefulset-pod-deletion-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) test_pod_name = statefulset_name + '-' + \ str(randrange(statefulset['spec']['replicas'])) test_data = generate_random_data(VOLUME_RWTEST_SIZE) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) write_pod_volume_data(core_api, test_pod_name, test_data) # Not using delete_and_wait_pod here because there is the small chance the # StatefulSet recreates the Pod quickly enough where the function won't # detect that the Pod was deleted, which will time out and throw an error. core_api.delete_namespaced_pod(name=test_pod_name, namespace='default', body=k8sclient.V1DeleteOptions()) wait_statefulset(statefulset) resp = read_volume_data(core_api, test_pod_name) assert resp == test_data
def test_statefulset_mount(client, core_api, storage_class, statefulset): # NOQA """ Tests that volumes provisioned for a StatefulSet can be properly created, mounted, unmounted, and deleted on the Kubernetes cluster. """ statefulset_name = 'statefulset-mount-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volumes = client.list_volume() assert len(volumes) == statefulset['spec']['replicas'] for v in volumes: # Workaround for checking volume name since they differ per pod. found = False for pod in pod_info: if v['name'] == pod['pv_name']: found = True break assert found pod_info.remove(pod) assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi) assert v['numberOfReplicas'] == \ int(storage_class['parameters']['numberOfReplicas']) assert v['state'] == 'attached' # Confirm that we've iterated through all the volumes. assert len(pod_info) == 0
def test_statefulset_pod_deletion(core_api, storage_class, statefulset): # NOQA """ Test that a StatefulSet can spin up a new Pod with the same data after a previous Pod has been deleted. 1. Create a StatefulSet with VolumeClaimTemplate and Longhorn. 2. Wait for pods to run. 3. Write some data to one of the pod. 4. Delete that pod. 5. Wait for the StatefulSet to recreate the pod 6. Verify the data in the pod. """ statefulset_name = 'statefulset-pod-deletion-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) test_pod_name = statefulset_name + '-' + \ str(randrange(statefulset['spec']['replicas'])) test_data = generate_random_data(VOLUME_RWTEST_SIZE) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) write_pod_volume_data(core_api, test_pod_name, test_data) # Not using delete_and_wait_pod here because there is the small chance the # StatefulSet recreates the Pod quickly enough where the function won't # detect that the Pod was deleted, which will time out and throw an error. core_api.delete_namespaced_pod(name=test_pod_name, namespace='default', body=k8sclient.V1DeleteOptions()) wait_statefulset(statefulset) resp = read_volume_data(core_api, test_pod_name) assert resp == test_data
def test_recurring_job_in_storageclass(client, core_api, storage_class, statefulset): # NOQA """ Test create volume with StorageClass contains recurring jobs 1. Create a StorageClass with recurring jobs 2. Create a StatefulSet with PVC template and StorageClass 3. Verify the recurring jobs run correctly. """ set_random_backupstore(client) statefulset_name = 'recurring-job-in-storageclass-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) storage_class["parameters"]["recurringJobs"] = json.dumps(create_jobs1()) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volume_info = [p['pv_name'] for p in pod_info] # 5 minutes time.sleep(300) for volume_name in volume_info: # NOQA volume = client.by_id_volume(volume_name) check_jobs1_result(volume)
def test_statefulset_pod_deletion(core_api, storage_class, statefulset): # NOQA """ Test that a StatefulSet can spin up a new Pod with the same data after a previous Pod has been deleted. This test will only work in a CSI environment. It will automatically be disabled in FlexVolume environments. """ statefulset_name = 'statefulset-pod-deletion-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) test_pod_name = statefulset_name + '-' + \ str(randrange(statefulset['spec']['replicas'])) test_data = generate_random_data(VOLUME_RWTEST_SIZE) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) write_volume_data(core_api, test_pod_name, test_data) # Not using delete_and_wait_pod here because there is the small chance the # StatefulSet recreates the Pod quickly enough where the function won't # detect that the Pod was deleted, which will time out and throw an error. core_api.delete_namespaced_pod(name=test_pod_name, namespace='default', body=k8sclient.V1DeleteOptions()) wait_statefulset(statefulset) resp = read_volume_data(core_api, test_pod_name) assert resp == test_data
def test_cloning_with_backing_image(client, core_api, pvc, pod, clone_pvc, clone_pod, storage_class): # NOQA """ 1. Deploy a storage class that has backing image parameter ```yaml kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: longhorn-bi-parrot provisioner: driver.longhorn.io allowVolumeExpansion: true parameters: numberOfReplicas: "3" staleReplicaTimeout: "2880" # 48 hours in minutes backingImage: "bi-parrot" backingImageURL: "https://longhorn-backing-image.s3-us-west-1.amazonaws.com/parrot.qcow2" # NOQA ``` 2. Repeat the `test_cloning_without_backing_image()` test with `source-pvc` and `cloned-pvc` use `longhorn-bi-parrot` instead of `longhorn` storageclass 3. Clean up the test """ # Create storage class with backing image backing_img_storage_class_name = 'longhorn-bi-parrot' storage_class['metadata']['name'] = backing_img_storage_class_name storage_class['parameters']['backingImage'] = 'bi-parrot' storage_class['parameters']['backingImageURL'] = \ 'https://longhorn-backing-image.s3-us-west-1.amazonaws.com/parrot.qcow2' # NOQA storage_class['reclaimPolicy'] = 'Delete' create_storage_class(storage_class) test_cloning_basic(client, core_api, pvc, pod, clone_pvc, clone_pod, storage_class_name=backing_img_storage_class_name)
def test_statefulset_backup(client, core_api, storage_class, statefulset): # NOQA """ Test that backups on StatefulSet volumes work properly. 1. Create a StatefulSet with VolumeClaimTemplate and Longhorn. 2. Wait for pods to run. Then create backup using following steps for each pod: 1. Create a snapshot 2. Write some data into it 3. Create another snapshot `backup_snapshot` 4. Create a third snapshot 5. Backup the snapshot `backup_snapshot` 6. Wait for backup to show up. 1 Verify the backup informations """ statefulset_name = 'statefulset-backup-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) create_and_test_backups(core_api, client, pod_info)
def test_statefulset_recurring_backup(set_random_backupstore, client, core_api, storage_class, statefulset): # NOQA """ Scenario : test recurring backups on StatefulSets Given 1 default backup recurring jobs created. When create a statefulset. And write data to every statefulset pod. And wait for 5 minutes. Then 2 snapshots created for every statefulset pod. """ # backup every minute recurring_jobs = { "backup": { "task": "backup", "groups": ["default"], "cron": "* * * * *", "retain": 2, "concurrency": 2, "labels": {}, }, } create_recurring_jobs(client, recurring_jobs) check_recurring_jobs(client, recurring_jobs) statefulset_name = 'statefulset-backup-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_data = get_statefulset_pod_info(core_api, statefulset) for pod in pod_data: pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE) pod['backup_snapshot'] = '' for pod in pod_data: volume = client.by_id_volume(pod['pv_name']) write_pod_volume_data(core_api, pod['pod_name'], pod['data']) time.sleep(150) for pod in pod_data: volume = client.by_id_volume(pod['pv_name']) write_pod_volume_data(core_api, pod['pod_name'], pod['data']) time.sleep(150) for pod in pod_data: volume = client.by_id_volume(pod['pv_name']) snapshots = volume.snapshotList() count = 0 for snapshot in snapshots: if snapshot.removed is False: count += 1 # one backup + volume-head assert count == 2
def test_statefulset_mount(client, core_api, storage_class, statefulset): # NOQA """ Tests that volumes provisioned for a StatefulSet can be properly created, mounted, unmounted, and deleted on the Kubernetes cluster. """ statefulset_name = 'statefulset-mount-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volumes = client.list_volume() assert len(volumes) == statefulset['spec']['replicas'] for v in volumes: # Workaround for checking volume name since they differ per pod. found = False for pod in pod_info: if v['name'] == pod['pv_name']: found = True break assert found pod_info.remove(pod) assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi) assert v['numberOfReplicas'] == \ int(storage_class['parameters']['numberOfReplicas']) assert v['state'] == 'attached' # Confirm that we've iterated through all the volumes. assert len(pod_info) == 0
def test_statefulset_recurring_backup( client, core_api, storage_class, # NOQA statefulset): # NOQA """ Test that recurring backups on StatefulSets work properly. 1. Create a StatefulSet with VolumeClaimTemplate and Longhorn. 2. Wait for pods to run. 3. Write some data to every pod 4. Schedule recurring jobs for volumes using Longhorn API 5. Wait for 5 minutes 6. Verify the snapshots created by the recurring jobs. """ statefulset_name = 'statefulset-backup-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) # backup every minute job_backup = { "name": "backup", "cron": "* * * * *", "task": "backup", "retain": 2 } pod_data = get_statefulset_pod_info(core_api, statefulset) for pod in pod_data: pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE) pod['backup_snapshot'] = '' for pod in pod_data: volume = client.by_id_volume(pod['pv_name']) write_pod_volume_data(core_api, pod['pod_name'], pod['data']) volume.recurringUpdate(jobs=[job_backup]) time.sleep(150) for pod in pod_data: volume = client.by_id_volume(pod['pv_name']) write_pod_volume_data(core_api, pod['pod_name'], pod['data']) volume.recurringUpdate(jobs=[job_backup]) time.sleep(150) for pod in pod_data: volume = client.by_id_volume(pod['pv_name']) snapshots = volume.snapshotList() count = 0 for snapshot in snapshots: if snapshot.removed is False: count += 1 # one backups + volume-head assert count == 2
def test_statefulset_backup(client, core_api, storage_class, statefulset): # NOQA """ Test that backups on StatefulSet volumes work properly. """ statefulset_name = 'statefulset-backup-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) create_and_test_backups(core_api, client, pod_info)
def test_statefulset_backup(client, core_api, storage_class, statefulset): # NOQA """ Test that backups on StatefulSet volumes work properly. """ statefulset_name = 'statefulset-backup-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) create_and_test_backups(core_api, client, pod_info)
def test_recurring_job_in_storageclass(client, core_api, storage_class, statefulset): # NOQA statefulset_name = 'recurring-job-in-storageclass-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) storage_class['parameters']['recurringJobs'] = json.dumps(create_jobs1()) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volume_info = [p['pv_name'] for p in pod_info] # 5 minutes time.sleep(300) for volume_name in volume_info: # NOQA volume = client.by_id_volume(volume_name) check_jobs1_result(volume)
def test_recurring_job_in_storageclass(client, core_api, storage_class, statefulset): # NOQA statefulset_name = 'recurring-job-in-storageclass-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) storage_class['parameters']['recurringJobs'] = json.dumps(create_jobs1()) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volume_info = [p['pv_name'] for p in pod_info] # 5 minutes time.sleep(300) for volume_name in volume_info: # NOQA volume = client.by_id_volume(volume_name) check_jobs1_result(volume)
def test_statefulset_recurring_backup( client, core_api, storage_class, # NOQA statefulset): # NOQA """ Test that recurring backups on StatefulSets work properly. """ statefulset_name = 'statefulset-backup-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) # backup every minute job_backup = { "name": "backup", "cron": "* * * * *", "task": "backup", "retain": 2 } pod_data = get_statefulset_pod_info(core_api, statefulset) for pod in pod_data: pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE) pod['backup_snapshot'] = '' for pod in pod_data: volume = client.by_id_volume(pod['pv_name']) write_pod_volume_data(core_api, pod['pod_name'], pod['data']) volume.recurringUpdate(jobs=[job_backup]) time.sleep(300) for pod in pod_data: volume = client.by_id_volume(pod['pv_name']) snapshots = volume.snapshotList() count = 0 for snapshot in snapshots: if snapshot['removed'] is False: count += 1 # two backups + volume-head assert count == 3
def test_recurring_job_in_storageclass(set_random_backupstore, client, core_api, storage_class, statefulset): # NOQA """ Test create volume with StorageClass contains recurring jobs 1. Create a StorageClass with recurring jobs 2. Create a StatefulSet with PVC template and StorageClass 3. Verify the recurring jobs run correctly. """ statefulset_name = 'recurring-job-in-storageclass-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) storage_class["parameters"]["recurringJobs"] = json.dumps(create_jobs1()) create_storage_class(storage_class) # wait until the beginning of an even minute wait_until_begin_of_an_even_minute() start_time = datetime.utcnow() create_and_wait_statefulset(statefulset) statefulset_creating_duration = datetime.utcnow() - start_time assert 150 > statefulset_creating_duration.seconds # We want to write data exactly at the 150th second since the start_time time.sleep(150 - statefulset_creating_duration.seconds) pod_info = get_statefulset_pod_info(core_api, statefulset) volume_info = [p['pv_name'] for p in pod_info] pod_names = [p['pod_name'] for p in pod_info] # write random data to volume to trigger recurring snapshot and backup job volume_data_path = "/data/test" for pod_name in pod_names: write_pod_volume_random_data(core_api, pod_name, volume_data_path, 2) time.sleep(150) # 2.5 minutes for volume_name in volume_info: # NOQA volume = client.by_id_volume(volume_name) check_jobs1_result(volume)
def test_statefulset_recurring_backup(client, core_api, storage_class, # NOQA statefulset): # NOQA """ Test that recurring backups on StatefulSets work properly. """ statefulset_name = 'statefulset-backup-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) # backup every minute job_backup = {"name": "backup", "cron": "* * * * *", "task": "backup", "retain": 2} pod_data = get_statefulset_pod_info(core_api, statefulset) for pod in pod_data: pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE) pod['backup_snapshot'] = '' for pod in pod_data: volume = client.by_id_volume(pod['pv_name']) write_volume_data(core_api, pod['pod_name'], pod['data']) volume.recurringUpdate(jobs=[job_backup]) time.sleep(300) for pod in pod_data: volume = client.by_id_volume(pod['pv_name']) snapshots = volume.snapshotList() count = 0 for snapshot in snapshots: if snapshot['removed'] is False: count += 1 # two backups + volume-head assert count == 3
def test_kubernetes_status( client, core_api, storage_class, # NOQA statefulset, csi_pv, pvc, pod): # NOQA """ Test Volume feature: Kubernetes Status 1. Create StorageClass with `reclaimPolicy = Retain` 2. Create a statefulset `kubernetes-status-test` with the StorageClass 1. The statefulset has scale of 2. 3. Get the volume name from the SECOND pod of the StateufulSet pod and create an `extra_pod` with the same volume on the same node 4. Check the volumes that used by the StatefulSet 1. The volume used by the FIRST StatefulSet pod will have one workload 2. The volume used by the SECOND StatefulSet pod will have two workloads 3. Validate related status, e.g. pv/pod name/state, workload name/type 5. Check the volumes again 1. PV/PVC should still be bound 2. The volume used by the FIRST pod should have history data 3. The volume used by the SECOND and extra pod should have current data point to the extra pod 6. Delete the extra pod 1. Now all the volume's should only have history data(`lastPodRefAt` set) 7. Delete the PVC 1. PVC should be updated with status `Released` and become history data 8. Delete PV 1. All the Kubernetes status information should be cleaned up. 9. Reuse the two Longhorn volumes to create new pods 1. Since the `reclaimPolicy == Retain`, volume won't be deleted by Longhorn 2. Check the Kubernetes status now updated, with pod info but empty workload 3. Default Longhorn Static StorageClass will remove the PV with PVC, but leave Longhorn volume """ statefulset_name = 'kubernetes-status-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) storage_class['reclaimPolicy'] = 'Retain' create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volume_info = [p['pv_name'] for p in pod_info] extra_pod_name = 'extra-pod-using-' + volume_info[1] pod['metadata']['name'] = extra_pod_name p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'], namespace='default') pod['spec']['nodeName'] = p2.spec.node_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pod_info[1]['pvc_name'], }, }] create_and_wait_pod(core_api, pod) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] # NOQA volume = client.by_id_volume(volume_name) k_status = volume.kubernetesStatus workloads = k_status.workloadsStatus assert k_status.pvName == p['pv_name'] assert k_status.pvStatus == 'Bound' assert k_status.namespace == 'default' assert k_status.pvcName == p['pvc_name'] assert not k_status.lastPVCRefAt assert not k_status.lastPodRefAt if i == 0: assert len(workloads) == 1 assert workloads[0].podName == p['pod_name'] assert workloads[0].workloadName == statefulset_name assert workloads[0].workloadType == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0].podStatus == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume.kubernetesStatus workloads = k_status.workloadsStatus assert workloads[0].podStatus == 'Running' if i == 1: assert len(k_status.workloadsStatus) == 2 if workloads[0].podName == pod_info[i]['pod_name']: assert workloads[1].podName == extra_pod_name assert workloads[0].workloadName == statefulset_name assert workloads[0].workloadType == 'StatefulSet' assert not workloads[1].workloadName assert not workloads[1].workloadType else: assert workloads[1].podName == pod_info[i]['pod_name'] assert workloads[0].podName == extra_pod_name assert not workloads[0].workloadName assert not workloads[0].workloadType assert workloads[1].workloadName == statefulset_name assert workloads[1].workloadType == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0].podStatus == 'Running' and \ workloads[1].podStatus == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume.kubernetesStatus workloads = k_status.workloadsStatus assert len(workloads) == 2 assert workloads[0].podStatus == 'Running' assert workloads[1].podStatus == 'Running' ks_list = [{}, {}] delete_and_wait_statefulset_only(core_api, statefulset) # the extra pod is still using the 2nd volume for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] ks_list[i]['pvName'] = p['pv_name'] ks_list[i]['pvStatus'] = 'Bound' ks_list[i]['namespace'] = 'default' ks_list[i]['pvcName'] = p['pvc_name'] ks_list[i]['lastPVCRefAt'] = '' if i == 0: ks_list[i]['lastPodRefAt'] = 'not empty' ks_list[i]['workloadsStatus'] = [ { 'podName': p['pod_name'], 'podStatus': 'Running', 'workloadName': statefulset_name, 'workloadType': 'StatefulSet', }, ] if i == 1: ks_list[i]['lastPodRefAt'] = '' ks_list[i]['workloadsStatus'] = [{ 'podName': extra_pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }] wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted extra_pod, all volumes have no workload delete_and_wait_pod(core_api, pod['metadata']['name']) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] ks_list[i]['lastPodRefAt'] = 'not empty' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted pvc only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pvc(core_api, p['pvc_name']) ks_list[i]['pvStatus'] = 'Released' ks_list[i]['lastPVCRefAt'] = 'not empty' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted pv only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pv(core_api, p['pv_name']) ks_list[i]['pvName'] = '' ks_list[i]['pvStatus'] = '' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # reuse that volume for p, volume_name in zip(pod_info, volume_info): p['pod_name'] = p['pod_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pv_name'] = p['pvc_name'] csi_pv['metadata']['name'] = p['pv_name'] csi_pv['spec']['csi']['volumeHandle'] = volume_name csi_pv['spec']['storageClassName'] = \ DEFAULT_LONGHORN_STATIC_STORAGECLASS_NAME core_api.create_persistent_volume(csi_pv) pvc['metadata']['name'] = p['pvc_name'] pvc['spec']['volumeName'] = p['pv_name'] pvc['spec']['storageClassName'] = \ DEFAULT_LONGHORN_STATIC_STORAGECLASS_NAME core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace='default') pod['metadata']['name'] = p['pod_name'] pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': p['pvc_name'], }, }] create_and_wait_pod(core_api, pod) ks = { 'pvName': p['pv_name'], 'pvStatus': 'Bound', 'namespace': 'default', 'pvcName': p['pvc_name'], 'lastPVCRefAt': '', 'lastPodRefAt': '', 'workloadsStatus': [ { 'podName': p['pod_name'], 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }, ], } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, p['pod_name']) # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`, # we don't need to delete bounded pv manually delete_and_wait_pvc(core_api, p['pvc_name']) wait_delete_pv(core_api, p['pv_name'])
def test_csi_expansion_with_replica_failure(client, core_api, storage_class, pvc, pod_manifest): # NOQA """ Test expansion success but with one replica expansion failure 1. Create a new `storage_class` with `allowVolumeExpansion` set 2. Create PVC and Pod with dynamic provisioned volume from the StorageClass 3. Create an empty directory with expansion snapshot tmp meta file path for one replica so that the replica expansion will fail 4. Generate `test_data` and write to the pod 5. Delete the pod and wait for volume detachment 6. Update pvc.spec.resources to expand the volume 7. Check expansion result using Longhorn API. There will be expansion error caused by the failed replica but overall the expansion should succeed. 8. Create a new pod and check if the volume will rebuild the failed replica 9. Validate the volume content, then check if data writing looks fine """ create_storage_class(storage_class) pod_name = 'csi-expansion-with-replica-failure-test' pvc_name = pod_name + "-pvc" pvc['metadata']['name'] = pvc_name pvc['spec']['storageClassName'] = storage_class['metadata']['name'] create_pvc(pvc) pod_manifest['metadata']['name'] = pod_name pod_manifest['spec']['volumes'] = [{ 'name': pod_manifest['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': {'claimName': pvc_name}, }] create_and_wait_pod(core_api, pod_manifest) expand_size = str(EXPANDED_VOLUME_SIZE*Gi) pv = wait_and_get_pv_for_pvc(core_api, pvc_name) assert pv.status.phase == "Bound" volume_name = pv.spec.csi.volume_handle volume = client.by_id_volume(volume_name) failed_replica = volume.replicas[0] fail_replica_expansion(client, core_api, volume_name, expand_size, [failed_replica]) test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name, test_data) delete_and_wait_pod(core_api, pod_name) wait_for_volume_detached(client, volume_name) # There will be replica expansion error info # but the expansion should succeed. pvc['spec']['resources'] = { 'requests': { 'storage': size_to_string(EXPANDED_VOLUME_SIZE*Gi) } } expand_and_wait_for_pvc(core_api, pvc) wait_for_expansion_failure(client, volume_name) wait_for_volume_expansion(client, volume_name) volume = client.by_id_volume(volume_name) assert volume.state == "detached" assert volume.size == expand_size for r in volume.replicas: if r.name == failed_replica.name: assert r.failedAt != "" else: assert r.failedAt == "" # Check if the replica will be rebuilded # and if the volume still works fine. create_and_wait_pod(core_api, pod_manifest) volume = wait_for_volume_healthy(client, volume_name) for r in volume.replicas: if r.name == failed_replica.name: assert r.mode == "" else: assert r.mode == "RW" resp = read_volume_data(core_api, pod_name) assert resp == test_data test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name, test_data) resp = read_volume_data(core_api, pod_name) assert resp == test_data
def test_csi_offline_expansion(client, core_api, storage_class, pvc, pod_manifest): # NOQA """ Test CSI feature: offline expansion 1. Create a new `storage_class` with `allowVolumeExpansion` set 2. Create PVC and Pod with dynamic provisioned volume from the StorageClass 3. Generate `test_data` and write to the pod 4. Delete the pod 5. Update pvc.spec.resources to expand the volume 6. Verify the volume expansion done using Longhorn API 7. Create a new pod and validate the volume content """ create_storage_class(storage_class) pod_name = 'csi-offline-expand-volume-test' pvc_name = pod_name + "-pvc" pvc['metadata']['name'] = pvc_name pvc['spec']['storageClassName'] = storage_class['metadata']['name'] create_pvc(pvc) pod_manifest['metadata']['name'] = pod_name pod_manifest['spec']['volumes'] = [{ 'name': pod_manifest['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': {'claimName': pvc_name}, }] create_and_wait_pod(core_api, pod_manifest) test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name, test_data) delete_and_wait_pod(core_api, pod_name) pv = wait_and_get_pv_for_pvc(core_api, pvc_name) assert pv.status.phase == "Bound" volume_name = pv.spec.csi.volume_handle wait_for_volume_detached(client, volume_name) pvc['spec']['resources'] = { 'requests': { 'storage': size_to_string(EXPANDED_VOLUME_SIZE*Gi) } } expand_and_wait_for_pvc(core_api, pvc) wait_for_volume_expansion(client, volume_name) volume = client.by_id_volume(volume_name) assert volume.state == "detached" assert volume.size == str(EXPANDED_VOLUME_SIZE*Gi) pod_manifest['metadata']['name'] = pod_name pod_manifest['spec']['volumes'] = [{ 'name': pod_manifest['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': {'claimName': pvc_name}, }] create_and_wait_pod(core_api, pod_manifest) resp = read_volume_data(core_api, pod_name) assert resp == test_data volume = client.by_id_volume(volume_name) engine = get_volume_engine(volume) assert volume.size == str(EXPANDED_VOLUME_SIZE*Gi) assert volume.size == engine.size
def test_csi_block_volume(client, core_api, storage_class, pvc, pod_manifest): # NOQA """ Test CSI feature: raw block volume 1. Create a PVC with `volumeMode = Block` 2. Create a pod using the PVC to dynamic provision a volume 3. Verify the pod creation 4. Generate `test_data` and write to the block volume directly in the pod 5. Read the data back for validation 6. Delete the pod and create `pod2` to use the same volume 7. Validate the data in `pod2` is consistent with `test_data` """ pod_name = 'csi-block-volume-test' pvc_name = pod_name + "-pvc" device_path = "/dev/longhorn/longhorn-test-blk" storage_class['reclaimPolicy'] = 'Retain' pvc['metadata']['name'] = pvc_name pvc['spec']['volumeMode'] = 'Block' pvc['spec']['storageClassName'] = storage_class['metadata']['name'] pvc['spec']['resources'] = { 'requests': { 'storage': size_to_string(1 * Gi) } } pod_manifest['metadata']['name'] = pod_name pod_manifest['spec']['volumes'] = [{ 'name': 'longhorn-blk', 'persistentVolumeClaim': { 'claimName': pvc_name, }, }] pod_manifest['spec']['containers'][0]['volumeMounts'] = [] pod_manifest['spec']['containers'][0]['volumeDevices'] = [ {'name': 'longhorn-blk', 'devicePath': device_path} ] create_storage_class(storage_class) create_pvc(pvc) pv_name = wait_and_get_pv_for_pvc(core_api, pvc_name).metadata.name create_and_wait_pod(core_api, pod_manifest) test_data = generate_random_data(VOLUME_RWTEST_SIZE) test_offset = random.randint(0, VOLUME_RWTEST_SIZE) write_pod_block_volume_data( core_api, pod_name, test_data, test_offset, device_path) returned_data = read_pod_block_volume_data( core_api, pod_name, len(test_data), test_offset, device_path ) assert test_data == returned_data md5_sum = get_pod_data_md5sum( core_api, pod_name, device_path) delete_and_wait_pod(core_api, pod_name) common.wait_for_volume_detached(client, pv_name) pod_name_2 = 'csi-block-volume-test-reuse' pod_manifest['metadata']['name'] = pod_name_2 create_and_wait_pod(core_api, pod_manifest) returned_data = read_pod_block_volume_data( core_api, pod_name_2, len(test_data), test_offset, device_path ) assert test_data == returned_data md5_sum_2 = get_pod_data_md5sum( core_api, pod_name_2, device_path) assert md5_sum == md5_sum_2 delete_and_wait_pod(core_api, pod_name_2) delete_and_wait_pvc(core_api, pvc_name) delete_and_wait_pv(core_api, pv_name)
def test_statefulset_restore( client, core_api, storage_class, # NOQA statefulset): # NOQA """ Test that data can be restored into volumes usable by a StatefulSet. 1. Create a StatefulSet with VolumeClaimTemplate and Longhorn. 2. Wait for pods to run. 3. Create a backup for each pod. 4. Delete the StatefulSet, including the Longhorn volumes. 5. Create volumes and PV/PVC using previous backups from each Pod. 1. PVs will be created using the previous names. 2. PVCs will be created using previous name + "-2" due to statefulset has a naming policy for what should be PVC name for them. 6. Create a new StatefulSet using the previous name + "-2" 7. Wait for pods to be up. . Verify the pods contain the previous backed up data """ statefulset_name = 'statefulset-restore-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) create_and_test_backups(core_api, client, pod_info) delete_and_wait_statefulset(core_api, client, statefulset) csi = check_csi(core_api) # StatefulSet fixture already cleans these up, use the manifests instead of # the fixtures to avoid issues during teardown. pv = { 'apiVersion': 'v1', 'kind': 'PersistentVolume', 'metadata': { 'name': '' }, 'spec': { 'capacity': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) }, 'volumeMode': 'Filesystem', 'accessModes': ['ReadWriteOnce'], 'persistentVolumeReclaimPolicy': 'Delete', 'storageClassName': DEFAULT_STORAGECLASS_NAME } } pvc = { 'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim', 'metadata': { 'name': '' }, 'spec': { 'accessModes': ['ReadWriteOnce'], 'resources': { 'requests': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } }, 'storageClassName': DEFAULT_STORAGECLASS_NAME } } assert csi pv['spec']['csi'] = { 'driver': 'driver.longhorn.io', 'fsType': 'ext4', 'volumeAttributes': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'] }, 'volumeHandle': '' } # Make sure that volumes still work even if the Pod and StatefulSet names # are different. for pod in pod_info: pod['pod_name'] = pod['pod_name'].replace( 'statefulset-restore-test', 'statefulset-restore-test-2') pod['pvc_name'] = pod['pvc_name'].replace( 'statefulset-restore-test', 'statefulset-restore-test-2') pv['metadata']['name'] = pod['pvc_name'] client.create_volume( name=pod['pvc_name'], size=size_to_string(DEFAULT_VOLUME_SIZE * Gi), numberOfReplicas=int( storage_class['parameters']['numberOfReplicas']), fromBackup=pod['backup_snapshot']['url']) wait_for_volume_detached(client, pod['pvc_name']) pv['spec']['csi']['volumeHandle'] = pod['pvc_name'] core_api.create_persistent_volume(pv) pvc['metadata']['name'] = pod['pvc_name'] pvc['spec']['volumeName'] = pod['pvc_name'] core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace='default') statefulset_name = 'statefulset-restore-test-2' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_and_wait_statefulset(statefulset) for pod in pod_info: resp = read_volume_data(core_api, pod['pod_name']) assert resp == pod['data']
def test_statefulset_scaling(client, core_api, storage_class, statefulset): # NOQA """ Test that scaling up a StatefulSet successfully provisions new volumes. """ statefulset_name = 'statefulset-scaling-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volumes = client.list_volume() assert len(volumes) == statefulset['spec']['replicas'] for v in volumes: found = False for pod in pod_info: if v['name'] == pod['pv_name']: found = True break assert found pod_info.remove(pod) assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi) assert v['numberOfReplicas'] == \ int(storage_class['parameters']['numberOfReplicas']) assert v['state'] == 'attached' assert len(pod_info) == 0 statefulset['spec']['replicas'] = replicas = 3 apps_api = get_apps_api_client() apps_api.patch_namespaced_stateful_set( name=statefulset_name, namespace='default', body={ 'spec': { 'replicas': replicas } }) for i in range(DEFAULT_POD_TIMEOUT): s_set = apps_api.read_namespaced_stateful_set( name=statefulset_name, namespace='default') if s_set.status.ready_replicas == replicas: break time.sleep(DEFAULT_POD_INTERVAL) assert s_set.status.ready_replicas == replicas pod_info = get_statefulset_pod_info(core_api, statefulset) volumes = client.list_volume() assert len(volumes) == replicas for v in volumes: found = False for pod in pod_info: if v['name'] == pod['pv_name']: found = True break assert found pod_info.remove(pod) assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi) assert v['numberOfReplicas'] == \ int(storage_class['parameters']['numberOfReplicas']) assert v['state'] == 'attached' assert len(pod_info) == 0
def test_statefulset_scaling(client, core_api, storage_class, statefulset): # NOQA """ Test that scaling up a StatefulSet successfully provisions new volumes. 1. Create a StatefulSet with VolumeClaimTemplate and Longhorn. 2. Wait for pods to run. 3. Verify the properities of volumes. 4. Scale the StatefulSet to 3 replicas 5. Wait for the new pod to become ready. 6. Verify the new volume properties. """ statefulset_name = 'statefulset-scaling-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volumes = client.list_volume() assert len(volumes) == statefulset['spec']['replicas'] for v in volumes: found = False for pod in pod_info: if v.name == pod['pv_name']: found = True break assert found pod_info.remove(pod) assert v.size == str(DEFAULT_VOLUME_SIZE * Gi) assert v.numberOfReplicas == \ int(storage_class['parameters']['numberOfReplicas']) assert v.state == 'attached' assert len(pod_info) == 0 statefulset['spec']['replicas'] = replicas = 3 apps_api = get_apps_api_client() apps_api.patch_namespaced_stateful_set( name=statefulset_name, namespace='default', body={'spec': { 'replicas': replicas }}) for i in range(DEFAULT_POD_TIMEOUT): s_set = apps_api.read_namespaced_stateful_set(name=statefulset_name, namespace='default') if s_set.status.ready_replicas == replicas: break time.sleep(DEFAULT_POD_INTERVAL) assert s_set.status.ready_replicas == replicas pod_info = get_statefulset_pod_info(core_api, statefulset) volumes = client.list_volume() assert len(volumes) == replicas for v in volumes: found = False for pod in pod_info: if v.name == pod['pv_name']: found = True break assert found pod_info.remove(pod) assert v.size == str(DEFAULT_VOLUME_SIZE * Gi) assert v.numberOfReplicas == \ int(storage_class['parameters']['numberOfReplicas']) assert v.state == 'attached' assert len(pod_info) == 0
def test_upgrade(upgrade_image_tag, settings_reset, volume_name, pod_make, statefulset, storage_class): # NOQA """ Test Longhorn upgrade Prerequisite: - Disable Auto Salvage Setting 1. Find the upgrade image tag 2. Create a volume, generate and write data into the volume. 3. Create a Pod using a volume, generate and write data 4. Create a StatefulSet with 2 replicas, generate and write data to their volumes 5. Keep all volumes attached 6. Upgrade Longhorn system. 7. Check Pod and StatefulSet didn't restart after upgrade 8. Check All volumes data 9. Write data to StatefulSet pods, and Attached volume 10. Check data written to StatefulSet pods, and attached volume. 11. Detach the volume, and Delete Pod, and StatefulSet to detach theirvolumes 12. Upgrade all volumes engine images. 13. Attach the volume, and recreate Pod, and StatefulSet 14. Check All volumes data """ new_ei_name = "longhornio/longhorn-engine:" + upgrade_image_tag client = get_longhorn_api_client() core_api = get_core_api_client() host_id = get_self_host_id() pod_data_path = "/data/test" pod_volume_name = generate_volume_name() auto_salvage_setting = client.by_id_setting(SETTING_AUTO_SALVAGE) setting = client.update(auto_salvage_setting, value="false") assert setting.name == SETTING_AUTO_SALVAGE assert setting.value == "false" # Create Volume attached to a node. volume1 = create_and_check_volume(client, volume_name, size=SIZE) volume1.attach(hostId=host_id) volume1 = wait_for_volume_healthy(client, volume_name) volume1_data = write_volume_random_data(volume1) # Create Volume used by Pod pod_name, pv_name, pvc_name, pod_md5sum = \ prepare_pod_with_data_in_mb(client, core_api, pod_make, pod_volume_name, data_path=pod_data_path, add_liveness_prope=False) # Create multiple volumes used by StatefulSet statefulset_name = 'statefulset-upgrade-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) statefulset_pod_info = get_statefulset_pod_info(core_api, statefulset) for sspod_info in statefulset_pod_info: sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, sspod_info['pod_name'], sspod_info['data']) # upgrade Longhorn assert longhorn_upgrade(upgrade_image_tag) client = get_longhorn_api_client() # wait for 1 minute before checking pod restarts time.sleep(60) pod = core_api.read_namespaced_pod(name=pod_name, namespace='default') assert pod.status.container_statuses[0].restart_count == 0 for sspod_info in statefulset_pod_info: sspod = core_api.read_namespaced_pod(name=sspod_info['pod_name'], namespace='default') assert \ sspod.status.container_statuses[0].restart_count == 0 for sspod_info in statefulset_pod_info: resp = read_volume_data(core_api, sspod_info['pod_name']) assert resp == sspod_info['data'] res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path) assert res_pod_md5sum == pod_md5sum check_volume_data(volume1, volume1_data) for sspod_info in statefulset_pod_info: sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, sspod_info['pod_name'], sspod_info['data']) for sspod_info in statefulset_pod_info: resp = read_volume_data(core_api, sspod_info['pod_name']) assert resp == sspod_info['data'] volume1 = client.by_id_volume(volume_name) volume1_data = write_volume_random_data(volume1) check_volume_data(volume1, volume1_data) statefulset['spec']['replicas'] = replicas = 0 apps_api = get_apps_api_client() apps_api.patch_namespaced_stateful_set( name=statefulset_name, namespace='default', body={ 'spec': { 'replicas': replicas } }) delete_and_wait_pod(core_api, pod_name) volume = client.by_id_volume(volume_name) volume.detach() volumes = client.list_volume() for v in volumes: wait_for_volume_detached(client, v.name) engineimages = client.list_engine_image() for ei in engineimages: if ei.image == new_ei_name: new_ei = ei volumes = client.list_volume() for v in volumes: volume = client.by_id_volume(v.name) volume.engineUpgrade(image=new_ei.image) statefulset['spec']['replicas'] = replicas = 2 apps_api = get_apps_api_client() apps_api.patch_namespaced_stateful_set( name=statefulset_name, namespace='default', body={ 'spec': { 'replicas': replicas } }) wait_statefulset(statefulset) pod = pod_make(name=pod_name) pod['spec']['volumes'] = [create_pvc_spec(pvc_name)] create_and_wait_pod(core_api, pod) volume1 = client.by_id_volume(volume_name) volume1.attach(hostId=host_id) volume1 = wait_for_volume_healthy(client, volume_name) for sspod_info in statefulset_pod_info: resp = read_volume_data(core_api, sspod_info['pod_name']) assert resp == sspod_info['data'] res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path) assert res_pod_md5sum == pod_md5sum check_volume_data(volume1, volume1_data)
def test_statefulset_restore(client, core_api, storage_class, # NOQA statefulset): # NOQA """ Test that data can be restored into volumes usable by a StatefulSet. """ statefulset_name = 'statefulset-restore-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) create_and_test_backups(core_api, client, pod_info) delete_and_wait_statefulset(core_api, client, statefulset) csi = check_csi(core_api) # StatefulSet fixture already cleans these up, use the manifests instead of # the fixtures to avoid issues during teardown. pv = { 'apiVersion': 'v1', 'kind': 'PersistentVolume', 'metadata': { 'name': '' }, 'spec': { 'capacity': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) }, 'volumeMode': 'Filesystem', 'accessModes': ['ReadWriteOnce'], 'persistentVolumeReclaimPolicy': 'Delete', 'storageClassName': DEFAULT_STORAGECLASS_NAME } } pvc = { 'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim', 'metadata': { 'name': '' }, 'spec': { 'accessModes': [ 'ReadWriteOnce' ], 'resources': { 'requests': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } }, 'storageClassName': DEFAULT_STORAGECLASS_NAME } } if csi: pv['spec']['csi'] = { 'driver': 'io.rancher.longhorn', 'fsType': 'ext4', 'volumeAttributes': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'] }, 'volumeHandle': '' } else: pv['spec']['flexVolume'] = { 'driver': 'rancher.io/longhorn', 'fsType': 'ext4', 'options': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'], 'fromBackup': '', 'size': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } } # Make sure that volumes still work even if the Pod and StatefulSet names # are different. for pod in pod_info: pod['pod_name'] = pod['pod_name'].replace('statefulset-restore-test', 'statefulset-restore-test-2') pod['pvc_name'] = pod['pvc_name'].replace('statefulset-restore-test', 'statefulset-restore-test-2') pv['metadata']['name'] = pod['pvc_name'] if csi: client.create_volume( name=pod['pvc_name'], size=size_to_string(DEFAULT_VOLUME_SIZE * Gi), numberOfReplicas=int( storage_class['parameters']['numberOfReplicas']), fromBackup=pod['backup_snapshot']['url']) wait_for_volume_detached(client, pod['pvc_name']) pv['spec']['csi']['volumeHandle'] = pod['pvc_name'] else: pv['spec']['flexVolume']['options']['fromBackup'] = \ pod['backup_snapshot']['url'] core_api.create_persistent_volume(pv) pvc['metadata']['name'] = pod['pvc_name'] pvc['spec']['volumeName'] = pod['pvc_name'] core_api.create_namespaced_persistent_volume_claim( body=pvc, namespace='default') statefulset_name = 'statefulset-restore-test-2' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_and_wait_statefulset(statefulset) for pod in pod_info: resp = read_volume_data(core_api, pod['pod_name']) assert resp == pod['data']
def test_kubernetes_status(client, core_api, storage_class, # NOQA statefulset, csi_pv, pvc, pod): # NOQA statefulset_name = 'kubernetes-status-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) storage_class['reclaimPolicy'] = 'Retain' create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volume_info = [p['pv_name'] for p in pod_info] extra_pod_name = 'extra-pod-using-' + volume_info[1] pod['metadata']['name'] = extra_pod_name p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'], namespace='default') pod['spec']['nodeName'] = p2.spec.node_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pod_info[1]['pvc_name'], }, }] create_and_wait_pod(core_api, pod) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == p['pv_name'] assert k_status['pvStatus'] == 'Bound' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] if i == 0: assert len(workloads) == 1 assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert workloads[0]['podStatus'] == 'Running' if i == 1: assert len(k_status['workloadsStatus']) == 2 if workloads[0]['podName'] == pod_info[i]['pod_name']: assert workloads[1]['podName'] == extra_pod_name assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' assert not workloads[1]['workloadName'] assert not workloads[1]['workloadType'] else: assert workloads[1]['podName'] == pod_info[i]['pod_name'] assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] assert workloads[1]['workloadName'] == statefulset_name assert workloads[1]['workloadType'] == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running' and \ workloads[1]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 2 assert workloads[0]['podStatus'] == 'Running' assert workloads[1]['podStatus'] == 'Running' # the extra pod is still using the 2nd volume delete_and_wait_statefulset_only(core_api, statefulset) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == p['pv_name'] assert k_status['pvStatus'] == 'Bound' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert not k_status['lastPVCRefAt'] assert len(workloads) == 1 if i == 0: assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' assert k_status['lastPodRefAt'] if i == 1: assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] assert not k_status['lastPodRefAt'] # deleted extra_pod, all volumes have no workload delete_and_wait_pod(core_api, pod['metadata']['name']) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == p['pv_name'] assert k_status['pvStatus'] == 'Bound' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert not k_status['lastPVCRefAt'] assert k_status['lastPodRefAt'] assert len(workloads) == 1 if i == 0: assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' if i == 1: assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] # deleted pvc only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pvc(core_api, p['pvc_name']) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] for _ in range(RETRY_COUNTS): if k_status['pvStatus'] == 'Released': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == p['pv_name'] assert k_status['pvStatus'] == 'Released' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert k_status['lastPVCRefAt'] assert k_status['lastPodRefAt'] assert len(workloads) == 1 if i == 0: assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' if i == 1: assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] # deleted pv only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pv(core_api, p['pv_name']) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == '' assert k_status['pvStatus'] == '' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert k_status['lastPVCRefAt'] assert k_status['lastPodRefAt'] assert len(workloads) == 1 if i == 0: assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' if i == 1: assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] # reuse that volume for p, volume_name in zip(pod_info, volume_info): p['pod_name'] = p['pod_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pv_name'] = p['pvc_name'] csi_pv['metadata']['name'] = p['pv_name'] csi_pv['spec']['csi']['volumeHandle'] = volume_name core_api.create_persistent_volume(csi_pv) pvc['metadata']['name'] = p['pvc_name'] pvc['spec']['volumeName'] = p['pv_name'] core_api.create_namespaced_persistent_volume_claim( body=pvc, namespace='default') pod['metadata']['name'] = p['pod_name'] pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': p['pvc_name'], }, }] create_and_wait_pod(core_api, pod) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 1 assert k_status['pvName'] == p['pv_name'] for _ in range(RETRY_COUNTS): if k_status['pvStatus'] == 'Bound': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 1 assert k_status['pvStatus'] == 'Bound' for _ in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 1 assert workloads[0]['podStatus'] == 'Running' assert workloads[0]['podName'] == p['pod_name'] assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] delete_and_wait_pod(core_api, p['pod_name']) # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`, # we don't need to delete bounded pv manually delete_and_wait_pvc(core_api, p['pvc_name']) wait_delete_pv(core_api, p['pv_name'])
def test_statefulset_restore( client, core_api, storage_class, # NOQA statefulset): # NOQA """ Test that data can be restored into volumes usable by a StatefulSet. """ statefulset_name = 'statefulset-restore-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) create_and_test_backups(core_api, client, pod_info) delete_and_wait_statefulset(core_api, client, statefulset) csi = check_csi(core_api) # StatefulSet fixture already cleans these up, use the manifests instead of # the fixtures to avoid issues during teardown. pv = { 'apiVersion': 'v1', 'kind': 'PersistentVolume', 'metadata': { 'name': '' }, 'spec': { 'capacity': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) }, 'volumeMode': 'Filesystem', 'accessModes': ['ReadWriteOnce'], 'persistentVolumeReclaimPolicy': 'Delete', 'storageClassName': DEFAULT_STORAGECLASS_NAME } } pvc = { 'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim', 'metadata': { 'name': '' }, 'spec': { 'accessModes': ['ReadWriteOnce'], 'resources': { 'requests': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } }, 'storageClassName': DEFAULT_STORAGECLASS_NAME } } if csi: pv['spec']['csi'] = { 'driver': 'io.rancher.longhorn', 'fsType': 'ext4', 'volumeAttributes': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'] }, 'volumeHandle': '' } else: pv['spec']['flexVolume'] = { 'driver': 'rancher.io/longhorn', 'fsType': 'ext4', 'options': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'], 'fromBackup': '', 'size': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } } # Make sure that volumes still work even if the Pod and StatefulSet names # are different. for pod in pod_info: pod['pod_name'] = pod['pod_name'].replace( 'statefulset-restore-test', 'statefulset-restore-test-2') pod['pvc_name'] = pod['pvc_name'].replace( 'statefulset-restore-test', 'statefulset-restore-test-2') pv['metadata']['name'] = pod['pvc_name'] if csi: client.create_volume( name=pod['pvc_name'], size=size_to_string(DEFAULT_VOLUME_SIZE * Gi), numberOfReplicas=int( storage_class['parameters']['numberOfReplicas']), fromBackup=pod['backup_snapshot']['url']) wait_for_volume_detached(client, pod['pvc_name']) pv['spec']['csi']['volumeHandle'] = pod['pvc_name'] else: pv['spec']['flexVolume']['options']['fromBackup'] = \ pod['backup_snapshot']['url'] core_api.create_persistent_volume(pv) pvc['metadata']['name'] = pod['pvc_name'] pvc['spec']['volumeName'] = pod['pvc_name'] core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace='default') statefulset_name = 'statefulset-restore-test-2' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_and_wait_statefulset(statefulset) for pod in pod_info: resp = read_volume_data(core_api, pod['pod_name']) assert resp == pod['data']
def test_pvc_creation_with_default_sc_set(client, core_api, storage_class, pod): # NOQA """ Test creating PVC with default StorageClass set The target is to make sure the newly create PV/PVC won't use default StorageClass, and if there is no default StorageClass, PV/PVC can still be created. 1. Create a StorageClass and set it to be the default StorageClass 2. Update static StorageClass to `longhorn-static-test` 3. Create volume then PV/PVC. 4. Make sure the newly created PV/PVC using StorageClass `longhorn-static-test` 5. Create pod with PVC. 6. Verify volume's Kubernetes Status 7. Remove PVC and Pod. 8. Verify volume's Kubernetes Status only contains current PV and history 9. Wait for volume to detach (since pod is deleted) 10. Reuse the volume on a new pod. Wait for the pod to start 11. Verify volume's Kubernetes Status reflect the new pod. 12. Delete PV/PVC/Pod. 13. Verify volume's Kubernetes Status only contains history 14. Delete the default StorageClass. 15. Create PV/PVC for the volume. 16. Make sure the PV's StorageClass is static StorageClass """ # set default storage class storage_class['metadata']['annotations'] = \ {"storageclass.kubernetes.io/is-default-class": "true"} create_storage_class(storage_class) static_sc_name = "longhorn-static-test" setting = client.by_id_setting(SETTING_DEFAULT_LONGHORN_STATIC_SC) setting = client.update(setting, value=static_sc_name) assert setting.value == static_sc_name volume_name = "test-pvc-creation-with-sc" # NOQA pod_name = "pod-" + volume_name client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name pvc_name = "pvc-" + volume_name pvc_name_extra = "pvc-" + volume_name + "-extra" create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) ret = core_api.list_namespaced_persistent_volume_claim(namespace='default') for item in ret.items: if item.metadata.name == pvc_name: pvc_found = item break assert pvc_found assert pvc_found.spec.storage_class_name == static_sc_name pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pvc_name, }, }] create_and_wait_pod(core_api, pod) ks = { 'pvName': pv_name, 'pvStatus': 'Bound', 'namespace': 'default', 'pvcName': pvc_name, 'lastPVCRefAt': '', 'lastPodRefAt': '', 'workloadsStatus': [ { 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }, ], } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name) ks = { 'pvName': pv_name, 'pvStatus': 'Released', 'namespace': 'default', 'pvcName': pvc_name, 'lastPVCRefAt': 'not empty', 'lastPodRefAt': 'not empty', } wait_volume_kubernetes_status(client, volume_name, ks) # try to reuse the pv volume = wait_for_volume_detached(client, volume_name) create_pvc_for_volume(client, core_api, volume, pvc_name_extra) pod['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = \ pvc_name_extra create_and_wait_pod(core_api, pod) ks = { 'pvName': pv_name, 'pvStatus': 'Bound', 'namespace': 'default', 'pvcName': pvc_name_extra, 'lastPVCRefAt': '', 'lastPodRefAt': '', 'workloadsStatus': [ { 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }, ], } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name_extra) delete_and_wait_pv(core_api, pv_name) ks = { 'pvName': '', 'pvStatus': '', 'namespace': 'default', 'pvcName': pvc_name_extra, 'lastPVCRefAt': 'not empty', 'lastPodRefAt': 'not empty', } wait_volume_kubernetes_status(client, volume_name, ks) # without default storage class delete_storage_class(storage_class['metadata']['name']) create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) ret = core_api.list_namespaced_persistent_volume_claim(namespace='default') for item in ret.items: if item.metadata.name == pvc_name: pvc2 = item break assert pvc2 assert pvc2.spec.storage_class_name == static_sc_name delete_and_wait_pvc(core_api, pvc_name) delete_and_wait_pv(core_api, pv_name)
def test_pvc_creation_with_default_sc_set(client, core_api, storage_class, pod): # NOQA # set default storage class storage_class['metadata']['annotations'] = \ {"storageclass.kubernetes.io/is-default-class": "true"} create_storage_class(storage_class) static_sc_name = "longhorn-static-test" setting = client.by_id_setting(SETTING_DEFAULT_LONGHORN_STATIC_SC) setting = client.update(setting, value=static_sc_name) assert setting["value"] == static_sc_name volume_name = "test-pvc-creation-with-sc" pod_name = "pod-" + volume_name client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name pvc_name = "pvc-" + volume_name pvc_name_extra = "pvc-" + volume_name + "-extra" create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) ret = core_api.list_namespaced_persistent_volume_claim(namespace='default') for item in ret.items: if item.metadata.name == pvc_name: pvc_found = item break assert pvc_found assert pvc_found.spec.storage_class_name == static_sc_name pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pvc_name, }, }] create_and_wait_pod(core_api, pod) ks = { 'pvName': pv_name, 'pvStatus': 'Bound', 'namespace': 'default', 'pvcName': pvc_name, 'lastPVCRefAt': '', 'lastPodRefAt': '', 'workloadsStatus': [ { 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }, ], } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name) # try to reuse the pv volume = wait_for_volume_detached(client, volume_name) create_pvc_for_volume(client, core_api, volume, pvc_name_extra) pod['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = \ pvc_name_extra create_and_wait_pod(core_api, pod) ks['pvcName'] = pvc_name_extra wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name_extra) delete_and_wait_pv(core_api, pv_name) # without default storage class delete_storage_class(storage_class['metadata']['name']) create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) ret = core_api.list_namespaced_persistent_volume_claim(namespace='default') for item in ret.items: if item.metadata.name == pvc_name: pvc2 = item break assert pvc2 assert pvc2.spec.storage_class_name == static_sc_name delete_and_wait_pvc(core_api, pvc_name) delete_and_wait_pv(core_api, pv_name)
def test_kubernetes_status( client, core_api, storage_class, # NOQA statefulset, csi_pv, pvc, pod): # NOQA statefulset_name = 'kubernetes-status-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) storage_class['reclaimPolicy'] = 'Retain' create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volume_info = [p['pv_name'] for p in pod_info] extra_pod_name = 'extra-pod-using-' + volume_info[1] pod['metadata']['name'] = extra_pod_name p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'], namespace='default') pod['spec']['nodeName'] = p2.spec.node_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pod_info[1]['pvc_name'], }, }] create_and_wait_pod(core_api, pod) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == p['pv_name'] assert k_status['pvStatus'] == 'Bound' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] if i == 0: assert len(workloads) == 1 assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert workloads[0]['podStatus'] == 'Running' if i == 1: assert len(k_status['workloadsStatus']) == 2 if workloads[0]['podName'] == pod_info[i]['pod_name']: assert workloads[1]['podName'] == extra_pod_name assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' assert not workloads[1]['workloadName'] assert not workloads[1]['workloadType'] else: assert workloads[1]['podName'] == pod_info[i]['pod_name'] assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] assert workloads[1]['workloadName'] == statefulset_name assert workloads[1]['workloadType'] == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running' and \ workloads[1]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 2 assert workloads[0]['podStatus'] == 'Running' assert workloads[1]['podStatus'] == 'Running' ks_list = [{}, {}] delete_and_wait_statefulset_only(core_api, statefulset) # the extra pod is still using the 2nd volume for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] ks_list[i]['pvName'] = p['pv_name'] ks_list[i]['pvStatus'] = 'Bound' ks_list[i]['namespace'] = 'default' ks_list[i]['pvcName'] = p['pvc_name'] ks_list[i]['lastPVCRefAt'] = '' if i == 0: ks_list[i]['lastPodRefAt'] = 'not empty' ks_list[i]['workloadsStatus'] = [ { 'podName': p['pod_name'], 'podStatus': 'Running', 'workloadName': statefulset_name, 'workloadType': 'StatefulSet', }, ] if i == 1: ks_list[i]['lastPodRefAt'] = '' ks_list[i]['workloadsStatus'] = [{ 'podName': extra_pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }] wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted extra_pod, all volumes have no workload delete_and_wait_pod(core_api, pod['metadata']['name']) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] ks_list[i]['lastPodRefAt'] = 'not empty' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted pvc only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pvc(core_api, p['pvc_name']) ks_list[i]['pvStatus'] = 'Released' ks_list[i]['lastPVCRefAt'] = 'not empty' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted pv only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pv(core_api, p['pv_name']) ks_list[i]['pvName'] = '' ks_list[i]['pvStatus'] = '' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # reuse that volume for p, volume_name in zip(pod_info, volume_info): p['pod_name'] = p['pod_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pv_name'] = p['pvc_name'] csi_pv['metadata']['name'] = p['pv_name'] csi_pv['spec']['csi']['volumeHandle'] = volume_name core_api.create_persistent_volume(csi_pv) pvc['metadata']['name'] = p['pvc_name'] pvc['spec']['volumeName'] = p['pv_name'] core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace='default') pod['metadata']['name'] = p['pod_name'] pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': p['pvc_name'], }, }] create_and_wait_pod(core_api, pod) ks = { 'pvName': p['pv_name'], 'pvStatus': 'Bound', 'namespace': 'default', 'pvcName': p['pvc_name'], 'lastPVCRefAt': '', 'lastPodRefAt': '', 'workloadsStatus': [ { 'podName': p['pod_name'], 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }, ], } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, p['pod_name']) # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`, # we don't need to delete bounded pv manually delete_and_wait_pvc(core_api, p['pvc_name']) wait_delete_pv(core_api, p['pv_name'])