def pytest_collection_modifyitems(config, items): c = Configuration() c.assert_hostname = False Configuration.set_default(c) k8sconfig.load_incluster_config() core_api = k8sclient.CoreV1Api() check_longhorn(core_api) if config.getoption(SKIP_RECURRING_JOB_OPT): skip_upgrade = pytest.mark.skip(reason="remove " + SKIP_RECURRING_JOB_OPT + " option to run") for item in items: if "recurring_job" in item.keywords: item.add_marker(skip_upgrade) using_csi = check_csi(core_api) if using_csi: skip_upgrade = pytest.mark.skip(reason="environment is not using " + "flexvolume") for item in items: if "flexvolume" in item.keywords: item.add_marker(skip_upgrade) else: skip_upgrade = pytest.mark.skip(reason="environment is not " + "using csi") for item in items: if "csi" in item.keywords: item.add_marker(skip_upgrade) all_nodes_support_mount_propagation = True for node in get_longhorn_api_client().list_node(): node = wait_for_node_mountpropagation_condition( get_longhorn_api_client(), node["name"]) if "conditions" not in node.keys(): all_nodes_support_mount_propagation = False else: conditions = node["conditions"] for key, condition in conditions.iteritems(): if key == NODE_CONDITION_MOUNTPROPAGATION and \ condition["status"] != CONDITION_STATUS_TRUE: all_nodes_support_mount_propagation = False break if not all_nodes_support_mount_propagation: break if not all_nodes_support_mount_propagation: skip_upgrade = pytest.mark.skip(reason="environment does not " + "support base image") skip_node = pytest.mark.skip(reason="environment does not " + "support mount disk") for item in items: if "baseimage" in item.keywords: item.add_marker(skip_upgrade) elif "mountdisk" in item.keywords: item.add_marker(skip_node)
def test_statefulset_restore( client, core_api, storage_class, # NOQA statefulset): # NOQA """ Test that data can be restored into volumes usable by a StatefulSet. 1. Create a StatefulSet with VolumeClaimTemplate and Longhorn. 2. Wait for pods to run. 3. Create a backup for each pod. 4. Delete the StatefulSet, including the Longhorn volumes. 5. Create volumes and PV/PVC using previous backups from each Pod. 1. PVs will be created using the previous names. 2. PVCs will be created using previous name + "-2" due to statefulset has a naming policy for what should be PVC name for them. 6. Create a new StatefulSet using the previous name + "-2" 7. Wait for pods to be up. . Verify the pods contain the previous backed up data """ statefulset_name = 'statefulset-restore-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) create_and_test_backups(core_api, client, pod_info) delete_and_wait_statefulset(core_api, client, statefulset) csi = check_csi(core_api) # StatefulSet fixture already cleans these up, use the manifests instead of # the fixtures to avoid issues during teardown. pv = { 'apiVersion': 'v1', 'kind': 'PersistentVolume', 'metadata': { 'name': '' }, 'spec': { 'capacity': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) }, 'volumeMode': 'Filesystem', 'accessModes': ['ReadWriteOnce'], 'persistentVolumeReclaimPolicy': 'Delete', 'storageClassName': DEFAULT_STORAGECLASS_NAME } } pvc = { 'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim', 'metadata': { 'name': '' }, 'spec': { 'accessModes': ['ReadWriteOnce'], 'resources': { 'requests': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } }, 'storageClassName': DEFAULT_STORAGECLASS_NAME } } assert csi pv['spec']['csi'] = { 'driver': 'driver.longhorn.io', 'fsType': 'ext4', 'volumeAttributes': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'] }, 'volumeHandle': '' } # Make sure that volumes still work even if the Pod and StatefulSet names # are different. for pod in pod_info: pod['pod_name'] = pod['pod_name'].replace( 'statefulset-restore-test', 'statefulset-restore-test-2') pod['pvc_name'] = pod['pvc_name'].replace( 'statefulset-restore-test', 'statefulset-restore-test-2') pv['metadata']['name'] = pod['pvc_name'] client.create_volume( name=pod['pvc_name'], size=size_to_string(DEFAULT_VOLUME_SIZE * Gi), numberOfReplicas=int( storage_class['parameters']['numberOfReplicas']), fromBackup=pod['backup_snapshot']['url']) wait_for_volume_detached(client, pod['pvc_name']) pv['spec']['csi']['volumeHandle'] = pod['pvc_name'] core_api.create_persistent_volume(pv) pvc['metadata']['name'] = pod['pvc_name'] pvc['spec']['volumeName'] = pod['pvc_name'] core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace='default') statefulset_name = 'statefulset-restore-test-2' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_and_wait_statefulset(statefulset) for pod in pod_info: resp = read_volume_data(core_api, pod['pod_name']) assert resp == pod['data']
def test_statefulset_restore(client, core_api, storage_class, # NOQA statefulset): # NOQA """ Test that data can be restored into volumes usable by a StatefulSet. """ statefulset_name = 'statefulset-restore-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) create_and_test_backups(core_api, client, pod_info) delete_and_wait_statefulset(core_api, client, statefulset) csi = check_csi(core_api) # StatefulSet fixture already cleans these up, use the manifests instead of # the fixtures to avoid issues during teardown. pv = { 'apiVersion': 'v1', 'kind': 'PersistentVolume', 'metadata': { 'name': '' }, 'spec': { 'capacity': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) }, 'volumeMode': 'Filesystem', 'accessModes': ['ReadWriteOnce'], 'persistentVolumeReclaimPolicy': 'Delete', 'storageClassName': DEFAULT_STORAGECLASS_NAME } } pvc = { 'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim', 'metadata': { 'name': '' }, 'spec': { 'accessModes': [ 'ReadWriteOnce' ], 'resources': { 'requests': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } }, 'storageClassName': DEFAULT_STORAGECLASS_NAME } } if csi: pv['spec']['csi'] = { 'driver': 'io.rancher.longhorn', 'fsType': 'ext4', 'volumeAttributes': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'] }, 'volumeHandle': '' } else: pv['spec']['flexVolume'] = { 'driver': 'rancher.io/longhorn', 'fsType': 'ext4', 'options': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'], 'fromBackup': '', 'size': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } } # Make sure that volumes still work even if the Pod and StatefulSet names # are different. for pod in pod_info: pod['pod_name'] = pod['pod_name'].replace('statefulset-restore-test', 'statefulset-restore-test-2') pod['pvc_name'] = pod['pvc_name'].replace('statefulset-restore-test', 'statefulset-restore-test-2') pv['metadata']['name'] = pod['pvc_name'] if csi: client.create_volume( name=pod['pvc_name'], size=size_to_string(DEFAULT_VOLUME_SIZE * Gi), numberOfReplicas=int( storage_class['parameters']['numberOfReplicas']), fromBackup=pod['backup_snapshot']['url']) wait_for_volume_detached(client, pod['pvc_name']) pv['spec']['csi']['volumeHandle'] = pod['pvc_name'] else: pv['spec']['flexVolume']['options']['fromBackup'] = \ pod['backup_snapshot']['url'] core_api.create_persistent_volume(pv) pvc['metadata']['name'] = pod['pvc_name'] pvc['spec']['volumeName'] = pod['pvc_name'] core_api.create_namespaced_persistent_volume_claim( body=pvc, namespace='default') statefulset_name = 'statefulset-restore-test-2' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_and_wait_statefulset(statefulset) for pod in pod_info: resp = read_volume_data(core_api, pod['pod_name']) assert resp == pod['data']
def test_statefulset_restore( client, core_api, storage_class, # NOQA statefulset): # NOQA """ Test that data can be restored into volumes usable by a StatefulSet. """ statefulset_name = 'statefulset-restore-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) create_and_test_backups(core_api, client, pod_info) delete_and_wait_statefulset(core_api, client, statefulset) csi = check_csi(core_api) # StatefulSet fixture already cleans these up, use the manifests instead of # the fixtures to avoid issues during teardown. pv = { 'apiVersion': 'v1', 'kind': 'PersistentVolume', 'metadata': { 'name': '' }, 'spec': { 'capacity': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) }, 'volumeMode': 'Filesystem', 'accessModes': ['ReadWriteOnce'], 'persistentVolumeReclaimPolicy': 'Delete', 'storageClassName': DEFAULT_STORAGECLASS_NAME } } pvc = { 'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim', 'metadata': { 'name': '' }, 'spec': { 'accessModes': ['ReadWriteOnce'], 'resources': { 'requests': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } }, 'storageClassName': DEFAULT_STORAGECLASS_NAME } } if csi: pv['spec']['csi'] = { 'driver': 'io.rancher.longhorn', 'fsType': 'ext4', 'volumeAttributes': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'] }, 'volumeHandle': '' } else: pv['spec']['flexVolume'] = { 'driver': 'rancher.io/longhorn', 'fsType': 'ext4', 'options': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'], 'fromBackup': '', 'size': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } } # Make sure that volumes still work even if the Pod and StatefulSet names # are different. for pod in pod_info: pod['pod_name'] = pod['pod_name'].replace( 'statefulset-restore-test', 'statefulset-restore-test-2') pod['pvc_name'] = pod['pvc_name'].replace( 'statefulset-restore-test', 'statefulset-restore-test-2') pv['metadata']['name'] = pod['pvc_name'] if csi: client.create_volume( name=pod['pvc_name'], size=size_to_string(DEFAULT_VOLUME_SIZE * Gi), numberOfReplicas=int( storage_class['parameters']['numberOfReplicas']), fromBackup=pod['backup_snapshot']['url']) wait_for_volume_detached(client, pod['pvc_name']) pv['spec']['csi']['volumeHandle'] = pod['pvc_name'] else: pv['spec']['flexVolume']['options']['fromBackup'] = \ pod['backup_snapshot']['url'] core_api.create_persistent_volume(pv) pvc['metadata']['name'] = pod['pvc_name'] pvc['spec']['volumeName'] = pod['pvc_name'] core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace='default') statefulset_name = 'statefulset-restore-test-2' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_and_wait_statefulset(statefulset) for pod in pod_info: resp = read_volume_data(core_api, pod['pod_name']) assert resp == pod['data']