def test_pv_creation(client, core_api): # NOQA volume_name = "test-pv-creation" client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name create_pv_for_volume(client, core_api, volume, pv_name) # try to create one more pv for the volume pv_name_2 = "pv2-" + volume_name with pytest.raises(Exception) as e: volume.pvCreate(pvName=pv_name_2) assert "already exist" in str(e.value) ks = { 'pvName': pv_name, 'pvStatus': 'Available', 'namespace': '', 'pvcName': '', 'lastPVCRefAt': '', 'lastPodRefAt': '', } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pv(core_api, pv_name)
def csi_io_test(client, core_api, csi_pv, pvc, pod_make, backing_image=""): # NOQA pv_name = generate_volume_name() pod_name = 'csi-io-test' create_and_wait_csi_pod_named_pv(pv_name, pod_name, client, core_api, csi_pv, pvc, pod_make, backing_image, "") test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name, test_data) delete_and_wait_pod(core_api, pod_name) common.wait_for_volume_detached(client, csi_pv['metadata']['name']) pod_name = 'csi-io-test-2' pod = pod_make(name=pod_name) pod['spec']['volumes'] = [ create_pvc_spec(pv_name) ] csi_pv['metadata']['name'] = pv_name csi_pv['spec']['csi']['volumeHandle'] = pv_name pvc['metadata']['name'] = pv_name pvc['spec']['volumeName'] = pv_name update_storageclass_references(CSI_PV_TEST_STORAGE_NAME, csi_pv, pvc) create_and_wait_pod(core_api, pod) resp = read_volume_data(core_api, pod_name) assert resp == test_data delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc['metadata']['name']) delete_and_wait_pv(core_api, pv_name)
def csi_backup_test(client, core_api, csi_pv, pvc, pod_make, backing_image=""): # NOQA pod_name = 'csi-backup-test' vol_name = create_and_wait_csi_pod( pod_name, client, core_api, csi_pv, pvc, pod_make, backing_image, "") test_data = generate_random_data(VOLUME_RWTEST_SIZE) setting = client.by_id_setting(common.SETTING_BACKUP_TARGET) # test backupTarget for multiple settings backupstores = common.get_backupstore_url() i = 1 for backupstore in backupstores: if common.is_backupTarget_s3(backupstore): backupsettings = backupstore.split("$") setting = client.update(setting, value=backupsettings[0]) assert setting.value == backupsettings[0] credential = client.by_id_setting( common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET) credential = client.update(credential, value=backupsettings[1]) assert credential.value == backupsettings[1] else: setting = client.update(setting, value=backupstore) assert setting.value == backupstore credential = client.by_id_setting( common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET) credential = client.update(credential, value="") assert credential.value == "" backupstore_test(client, core_api, csi_pv, pvc, pod_make, pod_name, vol_name, backing_image, test_data, i) i += 1 delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, vol_name) delete_and_wait_pv(core_api, vol_name)
def test_pv_creation(client, core_api): # NOQA """ Test creating PV using Longhorn API 1. Create volume 2. Create PV for the volume 3. Try to create another PV for the same volume. It should fail. 4. Check Kubernetes Status for the volume since PV is created. """ volume_name = "test-pv-creation" # NOQA client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name create_pv_for_volume(client, core_api, volume, pv_name) # try to create one more pv for the volume pv_name_2 = "pv2-" + volume_name with pytest.raises(Exception) as e: volume.pvCreate(pvName=pv_name_2) assert "already exist" in str(e.value) ks = { 'pvName': pv_name, 'pvStatus': 'Available', 'namespace': '', 'pvcName': '', 'lastPVCRefAt': '', 'lastPodRefAt': '', } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pv(core_api, pv_name)
def csi_backup_test(client, core_api, csi_pv, pvc, pod_make, backing_image=""): # NOQA pod_name = 'csi-backup-test' vol_name = create_and_wait_csi_pod( pod_name, client, core_api, csi_pv, pvc, pod_make, backing_image, "") test_data = generate_random_data(VOLUME_RWTEST_SIZE) backupstore_test(client, core_api, csi_pv, pvc, pod_make, pod_name, vol_name, backing_image, test_data) delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, vol_name) delete_and_wait_pv(core_api, vol_name)
def test_pv_creation(client, core_api): # NOQA volume_name = "test-pv-creation" client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name volume.pvCreate(pvName=pv_name) for i in range(RETRY_COUNTS): if check_pv_existence(core_api, pv_name): break time.sleep(RETRY_INTERVAL) assert check_pv_existence(core_api, pv_name) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] for i in range(RETRY_COUNTS): if k_status['pvName'] and k_status['pvStatus'] == 'Available': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == pv_name assert k_status['pvStatus'] == 'Available' assert not k_status['namespace'] assert not k_status['pvcName'] assert not workloads assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] # try to create one more pv for the volume pv_name_2 = "pv2-" + volume_name with pytest.raises(Exception) as e: volume.pvCreate(pvName=pv_name_2) assert "already exist" in str(e.value) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == pv_name assert k_status['pvStatus'] == 'Available' assert not k_status['namespace'] assert not k_status['pvcName'] assert not workloads assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] delete_and_wait_pv(core_api, pv_name)
def csi_mount_test(client, core_api, csi_pv, pvc, pod_make, # NOQA volume_size, backing_image=""): # NOQA pod_name = 'csi-mount-test' create_and_wait_csi_pod(pod_name, client, core_api, csi_pv, pvc, pod_make, backing_image, "") volumes = client.list_volume().data assert len(volumes) == 1 assert volumes[0].name == csi_pv['metadata']['name'] assert volumes[0].size == str(volume_size) assert volumes[0].numberOfReplicas == \ int(csi_pv['spec']['csi']['volumeAttributes']["numberOfReplicas"]) assert volumes[0].state == "attached" assert volumes[0].backingImage == backing_image delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc['metadata']['name']) delete_and_wait_pv(core_api, csi_pv['metadata']['name'])
def test_reset_env(): k8s_api_client = get_core_api_client() k8s_storage_client = get_storage_api_client() longhorn_api_client = get_longhorn_api_client() pod_list = k8s_api_client.list_namespaced_pod("default") for pod in pod_list.items: if STRESS_POD_NAME_PREFIX in pod.metadata.name: delete_and_wait_pod(k8s_api_client, pod.metadata.name) pvc_list = \ k8s_api_client.list_namespaced_persistent_volume_claim("default") for pvc in pvc_list.items: if STRESS_PVC_NAME_PREFIX in pvc.metadata.name: delete_and_wait_pvc(k8s_api_client, pvc.metadata.name) pv_list = k8s_api_client.list_persistent_volume() for pv in pv_list.items: pv_name = pv.metadata.name if STRESS_PV_NAME_PREFIX in pv_name: try: delete_and_wait_pv(k8s_api_client, pv_name) except AssertionError: volumeattachment_list = \ k8s_storage_client.list_volume_attachment() for volumeattachment in volumeattachment_list.items: volume_attachment_name = \ volumeattachment.spec.source.persistent_volume_name if volume_attachment_name == pv_name: delete_and_wait_volume_attachment( k8s_storage_client, volume_attachment_name ) delete_and_wait_pv(k8s_api_client, pv.metadata.name) volume_list = \ longhorn_api_client.list_volume() for volume in volume_list.data: if STRESS_VOLUME_NAME_PREFIX in volume.name: delete_and_wait_longhorn(longhorn_api_client, volume.name)
def test_csi_block_volume(client, core_api, storage_class, pvc, pod_manifest): # NOQA """ Test CSI feature: raw block volume 1. Create a PVC with `volumeMode = Block` 2. Create a pod using the PVC to dynamic provision a volume 3. Verify the pod creation 4. Generate `test_data` and write to the block volume directly in the pod 5. Read the data back for validation 6. Delete the pod and create `pod2` to use the same volume 7. Validate the data in `pod2` is consistent with `test_data` """ pod_name = 'csi-block-volume-test' pvc_name = pod_name + "-pvc" device_path = "/dev/longhorn/longhorn-test-blk" storage_class['reclaimPolicy'] = 'Retain' pvc['metadata']['name'] = pvc_name pvc['spec']['volumeMode'] = 'Block' pvc['spec']['storageClassName'] = storage_class['metadata']['name'] pvc['spec']['resources'] = { 'requests': { 'storage': size_to_string(1 * Gi) } } pod_manifest['metadata']['name'] = pod_name pod_manifest['spec']['volumes'] = [{ 'name': 'longhorn-blk', 'persistentVolumeClaim': { 'claimName': pvc_name, }, }] pod_manifest['spec']['containers'][0]['volumeMounts'] = [] pod_manifest['spec']['containers'][0]['volumeDevices'] = [ {'name': 'longhorn-blk', 'devicePath': device_path} ] create_storage_class(storage_class) create_pvc(pvc) pv_name = wait_and_get_pv_for_pvc(core_api, pvc_name).metadata.name create_and_wait_pod(core_api, pod_manifest) test_data = generate_random_data(VOLUME_RWTEST_SIZE) test_offset = random.randint(0, VOLUME_RWTEST_SIZE) write_pod_block_volume_data( core_api, pod_name, test_data, test_offset, device_path) returned_data = read_pod_block_volume_data( core_api, pod_name, len(test_data), test_offset, device_path ) assert test_data == returned_data md5_sum = get_pod_data_md5sum( core_api, pod_name, device_path) delete_and_wait_pod(core_api, pod_name) common.wait_for_volume_detached(client, pv_name) pod_name_2 = 'csi-block-volume-test-reuse' pod_manifest['metadata']['name'] = pod_name_2 create_and_wait_pod(core_api, pod_manifest) returned_data = read_pod_block_volume_data( core_api, pod_name_2, len(test_data), test_offset, device_path ) assert test_data == returned_data md5_sum_2 = get_pod_data_md5sum( core_api, pod_name_2, device_path) assert md5_sum == md5_sum_2 delete_and_wait_pod(core_api, pod_name_2) delete_and_wait_pvc(core_api, pvc_name) delete_and_wait_pv(core_api, pv_name)
def test_csi_minimal_volume_size( client, core_api, csi_pv, pvc, pod_make): # NOQA """ Test CSI Minimal Volume Size 1. Create a PVC requesting size 5MiB. Check the PVC requested size is 5MiB and capacity size get is 10MiB. 2. Remove the PVC. 3. Create a PVC requesting size 10MiB. Check the PVC requested size and capacity size get are both 10MiB. 4. Create a pod to use this PVC. 5. Write some data to the volume and read it back to compare. """ vol_name = generate_volume_name() create_and_check_volume(client, vol_name, size=str(100*Mi)) low_storage = str(5*Mi) min_storage = str(10*Mi) pv_name = vol_name + "-pv" csi_pv['metadata']['name'] = pv_name csi_pv['spec']['csi']['volumeHandle'] = vol_name csi_pv['spec']['capacity']['storage'] = min_storage core_api.create_persistent_volume(csi_pv) pvc_name = vol_name + "-pvc" pvc['metadata']['name'] = pvc_name pvc['spec']['volumeName'] = pv_name pvc['spec']['resources']['requests']['storage'] = low_storage pvc['spec']['storageClassName'] = '' core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace='default') claim = common.wait_for_pvc_phase(core_api, pvc_name, "Bound") assert claim.spec.resources.requests['storage'] == low_storage assert claim.status.capacity['storage'] == min_storage common.delete_and_wait_pvc(core_api, pvc_name) common.delete_and_wait_pv(core_api, pv_name) wait_for_volume_detached(client, vol_name) core_api.create_persistent_volume(csi_pv) pvc['spec']['resources']['requests']['storage'] = min_storage core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace='default') claim = common.wait_for_pvc_phase(core_api, pvc_name, "Bound") assert claim.spec.resources.requests['storage'] == min_storage assert claim.status.capacity['storage'] == min_storage pod_name = vol_name + '-pod' pod = pod_make(name=pod_name) pod['spec']['volumes'] = [create_pvc_spec(pvc_name)] create_and_wait_pod(core_api, pod) test_data = "longhorn-integration-test" test_file = "test" write_pod_volume_data(core_api, pod_name, test_data, test_file) read_data = read_volume_data(core_api, pod_name, test_file) assert read_data == test_data
def test_pvc_creation_with_default_sc_set(client, core_api, storage_class, pod): # NOQA """ Test creating PVC with default StorageClass set The target is to make sure the newly create PV/PVC won't use default StorageClass, and if there is no default StorageClass, PV/PVC can still be created. 1. Create a StorageClass and set it to be the default StorageClass 2. Update static StorageClass to `longhorn-static-test` 3. Create volume then PV/PVC. 4. Make sure the newly created PV/PVC using StorageClass `longhorn-static-test` 5. Create pod with PVC. 6. Verify volume's Kubernetes Status 7. Remove PVC and Pod. 8. Verify volume's Kubernetes Status only contains current PV and history 9. Wait for volume to detach (since pod is deleted) 10. Reuse the volume on a new pod. Wait for the pod to start 11. Verify volume's Kubernetes Status reflect the new pod. 12. Delete PV/PVC/Pod. 13. Verify volume's Kubernetes Status only contains history 14. Delete the default StorageClass. 15. Create PV/PVC for the volume. 16. Make sure the PV's StorageClass is static StorageClass """ # set default storage class storage_class['metadata']['annotations'] = \ {"storageclass.kubernetes.io/is-default-class": "true"} create_storage_class(storage_class) static_sc_name = "longhorn-static-test" setting = client.by_id_setting(SETTING_DEFAULT_LONGHORN_STATIC_SC) setting = client.update(setting, value=static_sc_name) assert setting.value == static_sc_name volume_name = "test-pvc-creation-with-sc" # NOQA pod_name = "pod-" + volume_name client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name pvc_name = "pvc-" + volume_name pvc_name_extra = "pvc-" + volume_name + "-extra" create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) ret = core_api.list_namespaced_persistent_volume_claim(namespace='default') for item in ret.items: if item.metadata.name == pvc_name: pvc_found = item break assert pvc_found assert pvc_found.spec.storage_class_name == static_sc_name pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pvc_name, }, }] create_and_wait_pod(core_api, pod) ks = { 'pvName': pv_name, 'pvStatus': 'Bound', 'namespace': 'default', 'pvcName': pvc_name, 'lastPVCRefAt': '', 'lastPodRefAt': '', 'workloadsStatus': [ { 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }, ], } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name) ks = { 'pvName': pv_name, 'pvStatus': 'Released', 'namespace': 'default', 'pvcName': pvc_name, 'lastPVCRefAt': 'not empty', 'lastPodRefAt': 'not empty', } wait_volume_kubernetes_status(client, volume_name, ks) # try to reuse the pv volume = wait_for_volume_detached(client, volume_name) create_pvc_for_volume(client, core_api, volume, pvc_name_extra) pod['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = \ pvc_name_extra create_and_wait_pod(core_api, pod) ks = { 'pvName': pv_name, 'pvStatus': 'Bound', 'namespace': 'default', 'pvcName': pvc_name_extra, 'lastPVCRefAt': '', 'lastPodRefAt': '', 'workloadsStatus': [ { 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }, ], } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name_extra) delete_and_wait_pv(core_api, pv_name) ks = { 'pvName': '', 'pvStatus': '', 'namespace': 'default', 'pvcName': pvc_name_extra, 'lastPVCRefAt': 'not empty', 'lastPodRefAt': 'not empty', } wait_volume_kubernetes_status(client, volume_name, ks) # without default storage class delete_storage_class(storage_class['metadata']['name']) create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) ret = core_api.list_namespaced_persistent_volume_claim(namespace='default') for item in ret.items: if item.metadata.name == pvc_name: pvc2 = item break assert pvc2 assert pvc2.spec.storage_class_name == static_sc_name delete_and_wait_pvc(core_api, pvc_name) delete_and_wait_pv(core_api, pv_name)
def test_kubernetes_status(client, core_api, storage_class, # NOQA statefulset, csi_pv, pvc, pod): # NOQA statefulset_name = 'kubernetes-status-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) storage_class['reclaimPolicy'] = 'Retain' create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volume_info = [p['pv_name'] for p in pod_info] extra_pod_name = 'extra-pod-using-' + volume_info[1] pod['metadata']['name'] = extra_pod_name p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'], namespace='default') pod['spec']['nodeName'] = p2.spec.node_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pod_info[1]['pvc_name'], }, }] create_and_wait_pod(core_api, pod) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == p['pv_name'] assert k_status['pvStatus'] == 'Bound' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] if i == 0: assert len(workloads) == 1 assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert workloads[0]['podStatus'] == 'Running' if i == 1: assert len(k_status['workloadsStatus']) == 2 if workloads[0]['podName'] == pod_info[i]['pod_name']: assert workloads[1]['podName'] == extra_pod_name assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' assert not workloads[1]['workloadName'] assert not workloads[1]['workloadType'] else: assert workloads[1]['podName'] == pod_info[i]['pod_name'] assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] assert workloads[1]['workloadName'] == statefulset_name assert workloads[1]['workloadType'] == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running' and \ workloads[1]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 2 assert workloads[0]['podStatus'] == 'Running' assert workloads[1]['podStatus'] == 'Running' # the extra pod is still using the 2nd volume delete_and_wait_statefulset_only(core_api, statefulset) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == p['pv_name'] assert k_status['pvStatus'] == 'Bound' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert not k_status['lastPVCRefAt'] assert len(workloads) == 1 if i == 0: assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' assert k_status['lastPodRefAt'] if i == 1: assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] assert not k_status['lastPodRefAt'] # deleted extra_pod, all volumes have no workload delete_and_wait_pod(core_api, pod['metadata']['name']) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == p['pv_name'] assert k_status['pvStatus'] == 'Bound' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert not k_status['lastPVCRefAt'] assert k_status['lastPodRefAt'] assert len(workloads) == 1 if i == 0: assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' if i == 1: assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] # deleted pvc only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pvc(core_api, p['pvc_name']) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] for _ in range(RETRY_COUNTS): if k_status['pvStatus'] == 'Released': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == p['pv_name'] assert k_status['pvStatus'] == 'Released' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert k_status['lastPVCRefAt'] assert k_status['lastPodRefAt'] assert len(workloads) == 1 if i == 0: assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' if i == 1: assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] # deleted pv only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pv(core_api, p['pv_name']) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == '' assert k_status['pvStatus'] == '' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert k_status['lastPVCRefAt'] assert k_status['lastPodRefAt'] assert len(workloads) == 1 if i == 0: assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' if i == 1: assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] # reuse that volume for p, volume_name in zip(pod_info, volume_info): p['pod_name'] = p['pod_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pv_name'] = p['pvc_name'] csi_pv['metadata']['name'] = p['pv_name'] csi_pv['spec']['csi']['volumeHandle'] = volume_name core_api.create_persistent_volume(csi_pv) pvc['metadata']['name'] = p['pvc_name'] pvc['spec']['volumeName'] = p['pv_name'] core_api.create_namespaced_persistent_volume_claim( body=pvc, namespace='default') pod['metadata']['name'] = p['pod_name'] pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': p['pvc_name'], }, }] create_and_wait_pod(core_api, pod) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 1 assert k_status['pvName'] == p['pv_name'] for _ in range(RETRY_COUNTS): if k_status['pvStatus'] == 'Bound': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 1 assert k_status['pvStatus'] == 'Bound' for _ in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 1 assert workloads[0]['podStatus'] == 'Running' assert workloads[0]['podName'] == p['pod_name'] assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] delete_and_wait_pod(core_api, p['pod_name']) # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`, # we don't need to delete bounded pv manually delete_and_wait_pvc(core_api, p['pvc_name']) wait_delete_pv(core_api, p['pv_name'])
def test_engine_live_upgrade_with_intensive_data_writing( client, core_api, volume_name, pod_make): # NOQA """ Test engine live upgrade with intensive data writing 1. Deploy a compatible new engine image 2. Create a volume(with the old default engine image) with /PV/PVC/Pod and wait for pod to ready. 3. Write data to a tmp file in the pod and get the md5sum 4. Upgrade the volume to the new engine image without waiting. 5. Keep copying data from the tmp file to the volume during the live upgrade. 6. Wait until the upgrade completed, verify the volume engine image changed 7. Wait for new replica mode update then check the engine status. 8. Verify all engine and replicas' engine image changed 9. Verify the reference count of the new engine image changed 10. Check the existing data. Then write new data to the upgraded volume and get the md5sum. 11. Delete the pod and wait for the volume detached. Then check engine and replicas's engine image again. 12. Recreate the pod and wait for the volume attached. 13. Check if the attached volume is state `healthy` rather than `degraded`. 14. Check the data. """ default_img = common.get_default_engine_image(client) default_img_name = default_img.name default_img = wait_for_engine_image_ref_count(client, default_img_name, 0) cli_v = default_img.cliAPIVersion cli_minv = default_img.cliAPIMinVersion ctl_v = default_img.controllerAPIVersion ctl_minv = default_img.controllerAPIMinVersion data_v = default_img.dataFormatVersion data_minv = default_img.dataFormatMinVersion engine_upgrade_image = common.get_upgrade_test_image( cli_v, cli_minv, ctl_v, ctl_minv, data_v, data_minv) new_img = client.create_engine_image(image=engine_upgrade_image) new_img_name = new_img.name new_img = wait_for_engine_image_state(client, new_img_name, "ready") assert new_img.refCount == 0 assert new_img.noRefSince != "" default_img = common.get_default_engine_image(client) default_img_name = default_img.name pod_name = volume_name + "-pod" pv_name = volume_name + "-pv" pvc_name = volume_name + "-pvc" pod = pod_make(name=pod_name) volume = create_and_check_volume(client, volume_name, num_of_replicas=3, size=str(1 * Gi)) original_engine_image = volume.engineImage assert original_engine_image != engine_upgrade_image create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) pod['spec']['volumes'] = [create_pvc_spec(pvc_name)] create_and_wait_pod(core_api, pod) volume = client.by_id_volume(volume_name) assert volume.engineImage == original_engine_image assert volume.currentImage == original_engine_image engine = get_volume_engine(volume) assert engine.engineImage == original_engine_image assert engine.currentImage == original_engine_image for replica in volume.replicas: assert replica.engineImage == original_engine_image assert replica.currentImage == original_engine_image data_path0 = "/tmp/test" data_path1 = "/data/test1" write_pod_volume_random_data(core_api, pod_name, data_path0, RANDOM_DATA_SIZE_LARGE) original_md5sum1 = get_pod_data_md5sum(core_api, pod_name, data_path0) volume.engineUpgrade(image=engine_upgrade_image) # Keep writing data to the volume during the live upgrade copy_pod_volume_data(core_api, pod_name, data_path0, data_path1) # Wait for live upgrade complete wait_for_volume_current_image(client, volume_name, engine_upgrade_image) volume = wait_for_volume_replicas_mode(client, volume_name, "RW") engine = get_volume_engine(volume) assert engine.engineImage == engine_upgrade_image check_volume_endpoint(volume) wait_for_engine_image_ref_count(client, default_img_name, 0) wait_for_engine_image_ref_count(client, new_img_name, 1) volume_file_md5sum1 = get_pod_data_md5sum(core_api, pod_name, data_path1) assert volume_file_md5sum1 == original_md5sum1 data_path2 = "/data/test2" write_pod_volume_random_data(core_api, pod_name, data_path2, RANDOM_DATA_SIZE_SMALL) original_md5sum2 = get_pod_data_md5sum(core_api, pod_name, data_path2) delete_and_wait_pod(core_api, pod_name) volume = wait_for_volume_detached(client, volume_name) assert len(volume.replicas) == 3 assert volume.engineImage == engine_upgrade_image engine = get_volume_engine(volume) assert engine.engineImage == engine_upgrade_image for replica in volume.replicas: assert replica.engineImage == engine_upgrade_image # The reattached volume should be state `healthy` rather than`degraded`. create_and_wait_pod(core_api, pod) volume = client.by_id_volume(volume_name) assert volume.robustness == "healthy" volume_file_md5sum1 = get_pod_data_md5sum(core_api, pod_name, data_path1) assert volume_file_md5sum1 == original_md5sum1 volume_file_md5sum2 = get_pod_data_md5sum(core_api, pod_name, data_path2) assert volume_file_md5sum2 == original_md5sum2 delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name) delete_and_wait_pv(core_api, pv_name)
def test_kubernetes_status( client, core_api, storage_class, # NOQA statefulset, csi_pv, pvc, pod): # NOQA statefulset_name = 'kubernetes-status-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) storage_class['reclaimPolicy'] = 'Retain' create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volume_info = [p['pv_name'] for p in pod_info] extra_pod_name = 'extra-pod-using-' + volume_info[1] pod['metadata']['name'] = extra_pod_name p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'], namespace='default') pod['spec']['nodeName'] = p2.spec.node_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pod_info[1]['pvc_name'], }, }] create_and_wait_pod(core_api, pod) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == p['pv_name'] assert k_status['pvStatus'] == 'Bound' assert k_status['namespace'] == 'default' assert k_status['pvcName'] == p['pvc_name'] assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] if i == 0: assert len(workloads) == 1 assert workloads[0]['podName'] == p['pod_name'] assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert workloads[0]['podStatus'] == 'Running' if i == 1: assert len(k_status['workloadsStatus']) == 2 if workloads[0]['podName'] == pod_info[i]['pod_name']: assert workloads[1]['podName'] == extra_pod_name assert workloads[0]['workloadName'] == statefulset_name assert workloads[0]['workloadType'] == 'StatefulSet' assert not workloads[1]['workloadName'] assert not workloads[1]['workloadType'] else: assert workloads[1]['podName'] == pod_info[i]['pod_name'] assert workloads[0]['podName'] == extra_pod_name assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] assert workloads[1]['workloadName'] == statefulset_name assert workloads[1]['workloadType'] == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running' and \ workloads[1]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 2 assert workloads[0]['podStatus'] == 'Running' assert workloads[1]['podStatus'] == 'Running' ks_list = [{}, {}] delete_and_wait_statefulset_only(core_api, statefulset) # the extra pod is still using the 2nd volume for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] ks_list[i]['pvName'] = p['pv_name'] ks_list[i]['pvStatus'] = 'Bound' ks_list[i]['namespace'] = 'default' ks_list[i]['pvcName'] = p['pvc_name'] ks_list[i]['lastPVCRefAt'] = '' if i == 0: ks_list[i]['lastPodRefAt'] = 'not empty' ks_list[i]['workloadsStatus'] = [ { 'podName': p['pod_name'], 'podStatus': 'Running', 'workloadName': statefulset_name, 'workloadType': 'StatefulSet', }, ] if i == 1: ks_list[i]['lastPodRefAt'] = '' ks_list[i]['workloadsStatus'] = [{ 'podName': extra_pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }] wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted extra_pod, all volumes have no workload delete_and_wait_pod(core_api, pod['metadata']['name']) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] ks_list[i]['lastPodRefAt'] = 'not empty' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted pvc only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pvc(core_api, p['pvc_name']) ks_list[i]['pvStatus'] = 'Released' ks_list[i]['lastPVCRefAt'] = 'not empty' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted pv only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pv(core_api, p['pv_name']) ks_list[i]['pvName'] = '' ks_list[i]['pvStatus'] = '' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # reuse that volume for p, volume_name in zip(pod_info, volume_info): p['pod_name'] = p['pod_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pv_name'] = p['pvc_name'] csi_pv['metadata']['name'] = p['pv_name'] csi_pv['spec']['csi']['volumeHandle'] = volume_name core_api.create_persistent_volume(csi_pv) pvc['metadata']['name'] = p['pvc_name'] pvc['spec']['volumeName'] = p['pv_name'] core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace='default') pod['metadata']['name'] = p['pod_name'] pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': p['pvc_name'], }, }] create_and_wait_pod(core_api, pod) ks = { 'pvName': p['pv_name'], 'pvStatus': 'Bound', 'namespace': 'default', 'pvcName': p['pvc_name'], 'lastPVCRefAt': '', 'lastPodRefAt': '', 'workloadsStatus': [ { 'podName': p['pod_name'], 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }, ], } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, p['pod_name']) # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`, # we don't need to delete bounded pv manually delete_and_wait_pvc(core_api, p['pvc_name']) wait_delete_pv(core_api, p['pv_name'])
def test_pvc_creation_with_default_sc_set(client, core_api, storage_class, pod): # NOQA # set default storage class storage_class['metadata']['annotations'] = \ {"storageclass.kubernetes.io/is-default-class": "true"} create_storage_class(storage_class) static_sc_name = "longhorn-static-test" setting = client.by_id_setting(SETTING_DEFAULT_LONGHORN_STATIC_SC) setting = client.update(setting, value=static_sc_name) assert setting["value"] == static_sc_name volume_name = "test-pvc-creation-with-sc" pod_name = "pod-" + volume_name client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name pvc_name = "pvc-" + volume_name pvc_name_extra = "pvc-" + volume_name + "-extra" create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) ret = core_api.list_namespaced_persistent_volume_claim(namespace='default') for item in ret.items: if item.metadata.name == pvc_name: pvc_found = item break assert pvc_found assert pvc_found.spec.storage_class_name == static_sc_name pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pvc_name, }, }] create_and_wait_pod(core_api, pod) ks = { 'pvName': pv_name, 'pvStatus': 'Bound', 'namespace': 'default', 'pvcName': pvc_name, 'lastPVCRefAt': '', 'lastPodRefAt': '', 'workloadsStatus': [ { 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }, ], } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name) # try to reuse the pv volume = wait_for_volume_detached(client, volume_name) create_pvc_for_volume(client, core_api, volume, pvc_name_extra) pod['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = \ pvc_name_extra create_and_wait_pod(core_api, pod) ks['pvcName'] = pvc_name_extra wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name_extra) delete_and_wait_pv(core_api, pv_name) # without default storage class delete_storage_class(storage_class['metadata']['name']) create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) ret = core_api.list_namespaced_persistent_volume_claim(namespace='default') for item in ret.items: if item.metadata.name == pvc_name: pvc2 = item break assert pvc2 assert pvc2.spec.storage_class_name == static_sc_name delete_and_wait_pvc(core_api, pvc_name) delete_and_wait_pv(core_api, pv_name)
def test_kubernetes_status( client, core_api, storage_class, # NOQA statefulset, csi_pv, pvc, pod): # NOQA """ Test Volume feature: Kubernetes Status 1. Create StorageClass with `reclaimPolicy = Retain` 2. Create a statefulset `kubernetes-status-test` with the StorageClass 1. The statefulset has scale of 2. 3. Get the volume name from the SECOND pod of the StateufulSet pod and create an `extra_pod` with the same volume on the same node 4. Check the volumes that used by the StatefulSet 1. The volume used by the FIRST StatefulSet pod will have one workload 2. The volume used by the SECOND StatefulSet pod will have two workloads 3. Validate related status, e.g. pv/pod name/state, workload name/type 5. Check the volumes again 1. PV/PVC should still be bound 2. The volume used by the FIRST pod should have history data 3. The volume used by the SECOND and extra pod should have current data point to the extra pod 6. Delete the extra pod 1. Now all the volume's should only have history data(`lastPodRefAt` set) 7. Delete the PVC 1. PVC should be updated with status `Released` and become history data 8. Delete PV 1. All the Kubernetes status information should be cleaned up. 9. Reuse the two Longhorn volumes to create new pods 1. Since the `reclaimPolicy == Retain`, volume won't be deleted by Longhorn 2. Check the Kubernetes status now updated, with pod info but empty workload 3. Default Longhorn Static StorageClass will remove the PV with PVC, but leave Longhorn volume """ statefulset_name = 'kubernetes-status-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) storage_class['reclaimPolicy'] = 'Retain' create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) volume_info = [p['pv_name'] for p in pod_info] extra_pod_name = 'extra-pod-using-' + volume_info[1] pod['metadata']['name'] = extra_pod_name p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'], namespace='default') pod['spec']['nodeName'] = p2.spec.node_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pod_info[1]['pvc_name'], }, }] create_and_wait_pod(core_api, pod) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] # NOQA volume = client.by_id_volume(volume_name) k_status = volume.kubernetesStatus workloads = k_status.workloadsStatus assert k_status.pvName == p['pv_name'] assert k_status.pvStatus == 'Bound' assert k_status.namespace == 'default' assert k_status.pvcName == p['pvc_name'] assert not k_status.lastPVCRefAt assert not k_status.lastPodRefAt if i == 0: assert len(workloads) == 1 assert workloads[0].podName == p['pod_name'] assert workloads[0].workloadName == statefulset_name assert workloads[0].workloadType == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0].podStatus == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume.kubernetesStatus workloads = k_status.workloadsStatus assert workloads[0].podStatus == 'Running' if i == 1: assert len(k_status.workloadsStatus) == 2 if workloads[0].podName == pod_info[i]['pod_name']: assert workloads[1].podName == extra_pod_name assert workloads[0].workloadName == statefulset_name assert workloads[0].workloadType == 'StatefulSet' assert not workloads[1].workloadName assert not workloads[1].workloadType else: assert workloads[1].podName == pod_info[i]['pod_name'] assert workloads[0].podName == extra_pod_name assert not workloads[0].workloadName assert not workloads[0].workloadType assert workloads[1].workloadName == statefulset_name assert workloads[1].workloadType == 'StatefulSet' for _ in range(RETRY_COUNTS): if workloads[0].podStatus == 'Running' and \ workloads[1].podStatus == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume.kubernetesStatus workloads = k_status.workloadsStatus assert len(workloads) == 2 assert workloads[0].podStatus == 'Running' assert workloads[1].podStatus == 'Running' ks_list = [{}, {}] delete_and_wait_statefulset_only(core_api, statefulset) # the extra pod is still using the 2nd volume for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] ks_list[i]['pvName'] = p['pv_name'] ks_list[i]['pvStatus'] = 'Bound' ks_list[i]['namespace'] = 'default' ks_list[i]['pvcName'] = p['pvc_name'] ks_list[i]['lastPVCRefAt'] = '' if i == 0: ks_list[i]['lastPodRefAt'] = 'not empty' ks_list[i]['workloadsStatus'] = [ { 'podName': p['pod_name'], 'podStatus': 'Running', 'workloadName': statefulset_name, 'workloadType': 'StatefulSet', }, ] if i == 1: ks_list[i]['lastPodRefAt'] = '' ks_list[i]['workloadsStatus'] = [{ 'podName': extra_pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }] wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted extra_pod, all volumes have no workload delete_and_wait_pod(core_api, pod['metadata']['name']) for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] ks_list[i]['lastPodRefAt'] = 'not empty' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted pvc only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pvc(core_api, p['pvc_name']) ks_list[i]['pvStatus'] = 'Released' ks_list[i]['lastPVCRefAt'] = 'not empty' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # deleted pv only. for i in range(len(volume_info)): p, volume_name = pod_info[i], volume_info[i] delete_and_wait_pv(core_api, p['pv_name']) ks_list[i]['pvName'] = '' ks_list[i]['pvStatus'] = '' wait_volume_kubernetes_status(client, volume_name, ks_list[i]) # reuse that volume for p, volume_name in zip(pod_info, volume_info): p['pod_name'] = p['pod_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test', 'kubernetes-status-test-reuse') p['pv_name'] = p['pvc_name'] csi_pv['metadata']['name'] = p['pv_name'] csi_pv['spec']['csi']['volumeHandle'] = volume_name csi_pv['spec']['storageClassName'] = \ DEFAULT_LONGHORN_STATIC_STORAGECLASS_NAME core_api.create_persistent_volume(csi_pv) pvc['metadata']['name'] = p['pvc_name'] pvc['spec']['volumeName'] = p['pv_name'] pvc['spec']['storageClassName'] = \ DEFAULT_LONGHORN_STATIC_STORAGECLASS_NAME core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace='default') pod['metadata']['name'] = p['pod_name'] pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': p['pvc_name'], }, }] create_and_wait_pod(core_api, pod) ks = { 'pvName': p['pv_name'], 'pvStatus': 'Bound', 'namespace': 'default', 'pvcName': p['pvc_name'], 'lastPVCRefAt': '', 'lastPodRefAt': '', 'workloadsStatus': [ { 'podName': p['pod_name'], 'podStatus': 'Running', 'workloadName': '', 'workloadType': '', }, ], } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pod(core_api, p['pod_name']) # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`, # we don't need to delete bounded pv manually delete_and_wait_pvc(core_api, p['pvc_name']) wait_delete_pv(core_api, p['pv_name'])
def restore_inc_test(client, core_api, volume_name, pod): # NOQA std_volume = create_and_check_volume(client, volume_name, 2, SIZE) lht_host_id = get_self_host_id() std_volume.attach(hostId=lht_host_id) std_volume = common.wait_for_volume_healthy(client, volume_name) with pytest.raises(Exception) as e: std_volume.activate(frontend="blockdev") assert "already in active mode" in str(e.value) data0 = {'len': 4 * 1024, 'pos': 0} data0['content'] = common.generate_random_data(data0['len']) bv, backup0, _, data0 = create_backup(client, volume_name, data0) sb_volume0_name = "sb-0-" + volume_name sb_volume1_name = "sb-1-" + volume_name sb_volume2_name = "sb-2-" + volume_name client.create_volume(name=sb_volume0_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0['url'], frontend="", standby=True) client.create_volume(name=sb_volume1_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0['url'], frontend="", standby=True) client.create_volume(name=sb_volume2_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0['url'], frontend="", standby=True) common.wait_for_volume_restoration_completed(client, sb_volume0_name) common.wait_for_volume_restoration_completed(client, sb_volume1_name) common.wait_for_volume_restoration_completed(client, sb_volume2_name) sb_volume0 = common.wait_for_volume_healthy(client, sb_volume0_name) sb_volume1 = common.wait_for_volume_healthy(client, sb_volume1_name) sb_volume2 = common.wait_for_volume_healthy(client, sb_volume2_name) for i in range(RETRY_COUNTS): sb_volume0 = client.by_id_volume(sb_volume0_name) sb_volume1 = client.by_id_volume(sb_volume1_name) sb_volume2 = client.by_id_volume(sb_volume2_name) sb_engine0 = get_volume_engine(sb_volume0) sb_engine1 = get_volume_engine(sb_volume1) sb_engine2 = get_volume_engine(sb_volume2) if sb_volume0["lastBackup"] != backup0["name"] or \ sb_volume1["lastBackup"] != backup0["name"] or \ sb_volume2["lastBackup"] != backup0["name"] or \ sb_engine0["lastRestoredBackup"] != backup0["name"] or \ sb_engine1["lastRestoredBackup"] != backup0["name"] or \ sb_engine2["lastRestoredBackup"] != backup0["name"]: time.sleep(RETRY_INTERVAL) else: break assert sb_volume0["standby"] is True assert sb_volume0["lastBackup"] == backup0["name"] assert sb_volume0["frontend"] == "" assert sb_volume0["disableFrontend"] is True assert sb_volume0["initialRestorationRequired"] is False sb_engine0 = get_volume_engine(sb_volume0) assert sb_engine0["lastRestoredBackup"] == backup0["name"] assert sb_engine0["requestedBackupRestore"] == backup0["name"] assert sb_volume1["standby"] is True assert sb_volume1["lastBackup"] == backup0["name"] assert sb_volume1["frontend"] == "" assert sb_volume1["disableFrontend"] is True assert sb_volume1["initialRestorationRequired"] is False sb_engine1 = get_volume_engine(sb_volume1) assert sb_engine1["lastRestoredBackup"] == backup0["name"] assert sb_engine1["requestedBackupRestore"] == backup0["name"] assert sb_volume2["standby"] is True assert sb_volume2["lastBackup"] == backup0["name"] assert sb_volume2["frontend"] == "" assert sb_volume2["disableFrontend"] is True assert sb_volume2["initialRestorationRequired"] is False sb_engine2 = get_volume_engine(sb_volume2) assert sb_engine2["lastRestoredBackup"] == backup0["name"] assert sb_engine2["requestedBackupRestore"] == backup0["name"] sb0_snaps = sb_volume0.snapshotList() assert len(sb0_snaps) == 2 for s in sb0_snaps: if s['name'] != "volume-head": sb0_snap = s assert sb0_snaps with pytest.raises(Exception) as e: sb_volume0.snapshotCreate() assert "cannot create snapshot for standby volume" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.snapshotRevert(name=sb0_snap["name"]) assert "cannot revert snapshot for standby volume" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.snapshotDelete(name=sb0_snap["name"]) assert "cannot delete snapshot for standby volume" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.snapshotBackup(name=sb0_snap["name"]) assert "cannot create backup for standby volume" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.pvCreate(pvName=sb_volume0_name) assert "cannot create PV for standby volume" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.pvcCreate(pvcName=sb_volume0_name) assert "cannot create PVC for standby volume" in str(e.value) setting = client.by_id_setting(common.SETTING_BACKUP_TARGET) with pytest.raises(Exception) as e: client.update(setting, value="random.backup.target") assert "cannot modify BackupTarget " \ "since there are existing standby volumes" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.activate(frontend="wrong_frontend") assert "invalid frontend" in str(e.value) activate_standby_volume(client, sb_volume0_name) sb_volume0 = client.by_id_volume(sb_volume0_name) sb_volume0.attach(hostId=lht_host_id) sb_volume0 = common.wait_for_volume_healthy(client, sb_volume0_name) check_volume_data(sb_volume0, data0, False) zero_string = b'\x00'.decode('utf-8') _, backup1, _, data1 = create_backup(client, volume_name, { 'len': 2 * 1024, 'pos': 0, 'content': zero_string * 2 * 1024 }) # use this api to update field `last backup` client.list_backupVolume() check_volume_last_backup(client, sb_volume1_name, backup1['name']) activate_standby_volume(client, sb_volume1_name) sb_volume1 = client.by_id_volume(sb_volume1_name) sb_volume1.attach(hostId=lht_host_id) sb_volume1 = common.wait_for_volume_healthy(client, sb_volume1_name) data0_modified = { 'len': data0['len'] - data1['len'], 'pos': data1['len'], 'content': data0['content'][data1['len']:], } check_volume_data(sb_volume1, data0_modified, False) check_volume_data(sb_volume1, data1) data2 = {'len': 1 * 1024 * 1024, 'pos': 0} data2['content'] = common.generate_random_data(data2['len']) _, backup2, _, data2 = create_backup(client, volume_name, data2) client.list_backupVolume() check_volume_last_backup(client, sb_volume2_name, backup2['name']) activate_standby_volume(client, sb_volume2_name) sb_volume2 = client.by_id_volume(sb_volume2_name) sb_volume2.attach(hostId=lht_host_id) sb_volume2 = common.wait_for_volume_healthy(client, sb_volume2_name) check_volume_data(sb_volume2, data2) # allocated this active volume to a pod sb_volume2.detach() sb_volume2 = common.wait_for_volume_detached(client, sb_volume2_name) create_pv_for_volume(client, core_api, sb_volume2, sb_volume2_name) create_pvc_for_volume(client, core_api, sb_volume2, sb_volume2_name) sb_volume2_pod_name = "pod-" + sb_volume2_name pod['metadata']['name'] = sb_volume2_pod_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': sb_volume2_name, }, }] create_and_wait_pod(core_api, pod) sb_volume2 = client.by_id_volume(sb_volume2_name) k_status = sb_volume2["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == sb_volume2_name assert k_status['pvStatus'] == 'Bound' assert len(workloads) == 1 for i in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) sb_volume2 = client.by_id_volume(sb_volume2_name) k_status = sb_volume2["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 1 assert workloads[0]['podName'] == sb_volume2_pod_name assert workloads[0]['podStatus'] == 'Running' assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] assert k_status['namespace'] == 'default' assert k_status['pvcName'] == sb_volume2_name assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] delete_and_wait_pod(core_api, sb_volume2_pod_name) delete_and_wait_pvc(core_api, sb_volume2_name) delete_and_wait_pv(core_api, sb_volume2_name) # cleanup std_volume.detach() sb_volume0.detach() sb_volume1.detach() std_volume = common.wait_for_volume_detached(client, volume_name) sb_volume0 = common.wait_for_volume_detached(client, sb_volume0_name) sb_volume1 = common.wait_for_volume_detached(client, sb_volume1_name) sb_volume2 = common.wait_for_volume_detached(client, sb_volume2_name) bv.backupDelete(name=backup2["name"]) bv.backupDelete(name=backup1["name"]) bv.backupDelete(name=backup0["name"]) client.delete(std_volume) client.delete(sb_volume0) client.delete(sb_volume1) client.delete(sb_volume2) wait_for_volume_delete(client, volume_name) wait_for_volume_delete(client, sb_volume0_name) wait_for_volume_delete(client, sb_volume1_name) wait_for_volume_delete(client, sb_volume2_name) volumes = client.list_volume() assert len(volumes) == 0
def test_recurring_job_kubernetes_status(client, core_api, volume_name): # NOQA """ Test RecurringJob properly backs up the KubernetesStatus 1. Setup a random backupstore. 2. Create a volume. 3. Create a PV from the volume, and verify the PV status. 4. Create a backup recurring job to run every 2 minutes. 5. Verify the recurring job runs correctly. 6. Verify the backup contains the Kubernetes Status labels """ set_random_backupstore(client) host_id = get_self_host_id() client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = common.wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name create_pv_for_volume(client, core_api, volume, pv_name) ks = { 'pvName': pv_name, 'pvStatus': 'Available', 'namespace': '', 'pvcName': '', 'lastPVCRefAt': '', 'lastPodRefAt': '', } wait_volume_kubernetes_status(client, volume_name, ks) # Simple Backup Job that runs every 2 minutes, retains 1. jobs = [{ "name": RECURRING_JOB_NAME, "cron": "*/2 * * * *", "task": "backup", "retain": 1 }] volume.recurringUpdate(jobs=jobs) volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) # 5 minutes time.sleep(300) snapshots = volume.snapshotList() count = 0 for snapshot in snapshots: if snapshot.removed is False: count += 1 # 1 from Backup, 1 from Volume Head. assert count == 2 # Verify the Labels on the actual Backup. bv = client.by_id_backupVolume(volume_name) backups = bv.backupList().data assert len(backups) == 1 b = bv.backupGet(name=backups[0].name) status = json.loads(b.labels.get(KUBERNETES_STATUS_LABEL)) assert b.labels.get(RECURRING_JOB_LABEL) == RECURRING_JOB_NAME assert status == { 'lastPodRefAt': '', 'lastPVCRefAt': '', 'namespace': '', 'pvcName': '', 'pvName': pv_name, 'pvStatus': 'Available', 'workloadsStatus': None } # Two Labels: KubernetesStatus and RecurringJob. assert len(b.labels) == 2 cleanup_volume(client, volume) delete_and_wait_pv(core_api, pv_name)
def test_backup_kubernetes_status(set_random_backupstore, client, core_api, pod): # NOQA """ Test that Backups have KubernetesStatus stored properly when there is an associated PersistentVolumeClaim and Pod. 1. Setup a random backupstore 2. Set settings Longhorn Static StorageClass to `longhorn-static-test` 3. Create a volume and PV/PVC. Verify the StorageClass of PVC 4. Create a Pod using the PVC. 5. Check volume's Kubernetes status to reflect PV/PVC/Pod correctly. 6. Create a backup for the volume. 7. Verify the labels of created backup reflect PV/PVC/Pod status. 8. Restore the backup to a volume. Wait for restoration to complete. 9. Check the volume's Kubernetes Status 1. Make sure the `lastPodRefAt` and `lastPVCRefAt` is snapshot created time 10. Delete the backup and restored volume. 11. Delete PV/PVC/Pod. 12. Verify volume's Kubernetes Status updated to reflect history data. 13. Attach the volume and create another backup. Verify the labels 14. Verify the volume's Kubernetes status. 15. Restore the previous backup to a new volume. Wait for restoration. 16. Verify the restored volume's Kubernetes status. 1. Make sure `lastPodRefAt` and `lastPVCRefAt` matched volume on step 12 """ host_id = get_self_host_id() static_sc_name = "longhorn-static-test" setting = client.by_id_setting(SETTING_DEFAULT_LONGHORN_STATIC_SC) setting = client.update(setting, value=static_sc_name) assert setting.value == static_sc_name volume_name = "test-backup-kubernetes-status-pod" # NOQA client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pod_name = "pod-" + volume_name pv_name = "pv-" + volume_name pvc_name = "pvc-" + volume_name create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) ret = core_api.list_namespaced_persistent_volume_claim(namespace='default') pvc_found = False for item in ret.items: if item.metadata.name == pvc_name: pvc_found = item break assert pvc_found assert pvc_found.spec.storage_class_name == static_sc_name pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pvc_name, }, }] create_and_wait_pod(core_api, pod) ks = { 'lastPodRefAt': '', 'lastPVCRefAt': '', 'namespace': 'default', 'pvcName': pvc_name, 'pvName': pv_name, 'pvStatus': 'Bound', 'workloadsStatus': [{ 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '' }] } wait_volume_kubernetes_status(client, volume_name, ks) volume = wait_for_volume_healthy(client, volume_name) # Create Backup manually instead of calling create_backup since Kubernetes # is not guaranteed to mount our Volume to the test host. snap = create_snapshot(client, volume_name) volume.snapshotBackup(name=snap.name) wait_for_backup_completion(client, volume_name, snap.name) _, b = find_backup(client, volume_name, snap.name) # Check backup label status = loads(b.labels.get(KUBERNETES_STATUS_LABEL)) assert status == ks # Check backup volume label for _ in range(RETRY_COUNTS): bv = client.by_id_backupVolume(volume_name) if bv is not None and bv.labels is not None: break time.sleep(RETRY_INTERVAL) assert bv is not None and bv.labels is not None status = loads(bv.labels.get(KUBERNETES_STATUS_LABEL)) assert status == ks restore_name = generate_volume_name() client.create_volume(name=restore_name, size=SIZE, numberOfReplicas=2, fromBackup=b.url) wait_for_volume_restoration_completed(client, restore_name) wait_for_volume_detached(client, restore_name) snapshot_created = b.snapshotCreated ks = { 'lastPodRefAt': b.snapshotCreated, 'lastPVCRefAt': b.snapshotCreated, 'namespace': 'default', 'pvcName': pvc_name, # Restoration should not apply PersistentVolume data. 'pvName': '', 'pvStatus': '', 'workloadsStatus': [{ 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '' }] } wait_volume_kubernetes_status(client, restore_name, ks) restore = client.by_id_volume(restore_name) # We need to compare LastPodRefAt and LastPVCRefAt manually since # wait_volume_kubernetes_status only checks for empty or non-empty state. assert restore.kubernetesStatus.lastPodRefAt == ks["lastPodRefAt"] assert restore.kubernetesStatus.lastPVCRefAt == ks["lastPVCRefAt"] delete_backup(client, bv.name, b.name) client.delete(restore) wait_for_volume_delete(client, restore_name) delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name) delete_and_wait_pv(core_api, pv_name) # With the Pod, PVC, and PV deleted, the Volume should have both Ref # fields set. Check that a new Backup and Restore will use this instead of # manually populating the Ref fields. ks = { 'lastPodRefAt': 'NOT NULL', 'lastPVCRefAt': 'NOT NULL', 'namespace': 'default', 'pvcName': pvc_name, 'pvName': '', 'pvStatus': '', 'workloadsStatus': [{ 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '' }] } wait_volume_kubernetes_status(client, volume_name, ks) volume = wait_for_volume_detached(client, volume_name) volume.attach(hostId=host_id) volume = wait_for_volume_healthy(client, volume_name) snap = create_snapshot(client, volume_name) volume.snapshotBackup(name=snap.name) volume = wait_for_backup_completion(client, volume_name, snap.name) bv, b = find_backup(client, volume_name, snap.name) new_b = bv.backupGet(name=b.name) status = loads(new_b.labels.get(KUBERNETES_STATUS_LABEL)) # Check each field manually, we have no idea what the LastPodRefAt or the # LastPVCRefAt will be. We just know it shouldn't be SnapshotCreated. assert status['lastPodRefAt'] != snapshot_created assert status['lastPVCRefAt'] != snapshot_created assert status['namespace'] == "default" assert status['pvcName'] == pvc_name assert status['pvName'] == "" assert status['pvStatus'] == "" assert status['workloadsStatus'] == [{ 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '' }] restore_name = generate_volume_name() client.create_volume(name=restore_name, size=SIZE, numberOfReplicas=2, fromBackup=b.url) wait_for_volume_restoration_completed(client, restore_name) wait_for_volume_detached(client, restore_name) ks = { 'lastPodRefAt': status['lastPodRefAt'], 'lastPVCRefAt': status['lastPVCRefAt'], 'namespace': 'default', 'pvcName': pvc_name, 'pvName': '', 'pvStatus': '', 'workloadsStatus': [{ 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '' }] } wait_volume_kubernetes_status(client, restore_name, ks) restore = client.by_id_volume(restore_name) assert restore.kubernetesStatus.lastPodRefAt == ks["lastPodRefAt"] assert restore.kubernetesStatus.lastPVCRefAt == ks["lastPVCRefAt"] # cleanup backupstore_cleanup(client) client.delete(restore) cleanup_volume(client, volume)
def test_backup_kubernetes_status(client, core_api, pod): # NOQA """ Test that Backups have KubernetesStatus stored properly when there is an associated PersistentVolumeClaim and Pod. """ host_id = get_self_host_id() static_sc_name = "longhorn-static-test" setting = client.by_id_setting(SETTING_DEFAULT_LONGHORN_STATIC_SC) setting = client.update(setting, value=static_sc_name) assert setting["value"] == static_sc_name volume_name = "test-backup-kubernetes-status-pod" client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pod_name = "pod-" + volume_name pv_name = "pv-" + volume_name pvc_name = "pvc-" + volume_name create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) ret = core_api.list_namespaced_persistent_volume_claim(namespace='default') pvc_found = False for item in ret.items: if item.metadata.name == pvc_name: pvc_found = item break assert pvc_found assert pvc_found.spec.storage_class_name == static_sc_name pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pvc_name, }, }] create_and_wait_pod(core_api, pod) ks = { 'lastPodRefAt': '', 'lastPVCRefAt': '', 'namespace': 'default', 'pvcName': pvc_name, 'pvName': pv_name, 'pvStatus': 'Bound', 'workloadsStatus': [{ 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '' }] } wait_volume_kubernetes_status(client, volume_name, ks) volume = wait_for_volume_healthy(client, volume_name) # Create Backup manually instead of calling create_backup since Kubernetes # is not guaranteed to mount our Volume to the test host. snap = volume.snapshotCreate() volume.snapshotBackup(name=snap["name"]) bv, b = find_backup(client, volume_name, snap["name"]) new_b = bv.backupGet(name=b["name"]) status = loads(new_b["labels"].get(KUBERNETES_STATUS_LABEL)) assert status == ks restore_name = generate_volume_name() client.create_volume(name=restore_name, size=SIZE, numberOfReplicas=2, fromBackup=b["url"]) wait_for_volume_restoration_completed(client, restore_name) wait_for_volume_detached(client, restore_name) snapshot_created = b["snapshotCreated"] ks = { 'lastPodRefAt': b["snapshotCreated"], 'lastPVCRefAt': b["snapshotCreated"], 'namespace': 'default', 'pvcName': pvc_name, # Restoration should not apply PersistentVolume data. 'pvName': '', 'pvStatus': '', 'workloadsStatus': [{ 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '' }] } wait_volume_kubernetes_status(client, restore_name, ks) restore = client.by_id_volume(restore_name) # We need to compare LastPodRefAt and LastPVCRefAt manually since # wait_volume_kubernetes_status only checks for empty or non-empty state. assert restore["kubernetesStatus"]["lastPodRefAt"] == ks["lastPodRefAt"] assert restore["kubernetesStatus"]["lastPVCRefAt"] == ks["lastPVCRefAt"] bv.backupDelete(name=b["name"]) client.delete(restore) wait_for_volume_delete(client, restore_name) delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name) delete_and_wait_pv(core_api, pv_name) # With the Pod, PVC, and PV deleted, the Volume should have both Ref # fields set. Check that a new Backup and Restore will use this instead of # manually populating the Ref fields. ks = { 'lastPodRefAt': 'NOT NULL', 'lastPVCRefAt': 'NOT NULL', 'namespace': 'default', 'pvcName': pvc_name, 'pvName': '', 'pvStatus': '', 'workloadsStatus': [{ 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '' }] } wait_volume_kubernetes_status(client, volume_name, ks) volume = wait_for_volume_detached(client, volume_name) volume.attach(hostId=host_id) volume = wait_for_volume_healthy(client, volume_name) snap = volume.snapshotCreate() volume.snapshotBackup(name=snap["name"]) bv, b = find_backup(client, volume_name, snap["name"]) new_b = bv.backupGet(name=b["name"]) status = loads(new_b["labels"].get(KUBERNETES_STATUS_LABEL)) # Check each field manually, we have no idea what the LastPodRefAt or the # LastPVCRefAt will be. We just know it shouldn't be SnapshotCreated. assert status["lastPodRefAt"] != snapshot_created assert status["lastPVCRefAt"] != snapshot_created assert status["namespace"] == "default" assert status["pvcName"] == pvc_name assert status["pvName"] == "" assert status["pvStatus"] == "" assert status["workloadsStatus"] == [{ 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '' }] restore_name = generate_volume_name() client.create_volume(name=restore_name, size=SIZE, numberOfReplicas=2, fromBackup=b["url"]) wait_for_volume_restoration_completed(client, restore_name) wait_for_volume_detached(client, restore_name) ks = { 'lastPodRefAt': status["lastPodRefAt"], 'lastPVCRefAt': status["lastPVCRefAt"], 'namespace': 'default', 'pvcName': pvc_name, 'pvName': '', 'pvStatus': '', 'workloadsStatus': [{ 'podName': pod_name, 'podStatus': 'Running', 'workloadName': '', 'workloadType': '' }] } wait_volume_kubernetes_status(client, restore_name, ks) restore = client.by_id_volume(restore_name) assert restore["kubernetesStatus"]["lastPodRefAt"] == ks["lastPodRefAt"] assert restore["kubernetesStatus"]["lastPVCRefAt"] == ks["lastPVCRefAt"] bv.backupDelete(name=b["name"]) client.delete(restore) cleanup_volume(client, volume)