コード例 #1
0
def delete_and_wait_statefulset_only(api, ss):
    pod_data = get_statefulset_pod_info(api, ss)

    apps_api = get_apps_api_client()
    try:
        apps_api.delete_namespaced_stateful_set(
            name=ss['metadata']['name'],
            namespace='default', body=k8sclient.V1DeleteOptions())
    except ApiException as e:
        assert e.status == 404

    for i in range(RETRY_COUNTS):
        ret = apps_api.list_namespaced_stateful_set(namespace='default')
        found = False
        for item in ret.items:
            if item.metadata.name == ss['metadata']['name']:
                found = True
                break
        if not found:
            break
        time.sleep(RETRY_INTERVAL)
    assert not found

    for p in pod_data:
        wait_delete_pod(api, p['pod_name'])
コード例 #2
0
def test_statefulset_mount(client, core_api, storage_class, statefulset):  # NOQA
    """
    Tests that volumes provisioned for a StatefulSet can be properly created,
    mounted, unmounted, and deleted on the Kubernetes cluster.
    """

    statefulset_name = 'statefulset-mount-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        # Workaround for checking volume name since they differ per pod.
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    # Confirm that we've iterated through all the volumes.
    assert len(pod_info) == 0
コード例 #3
0
def test_recurring_job_in_storageclass(client, core_api, storage_class,
                                       statefulset):  # NOQA
    """
    Test create volume with StorageClass contains recurring jobs

    1. Create a StorageClass with recurring jobs
    2. Create a StatefulSet with PVC template and StorageClass
    3. Verify the recurring jobs run correctly.
    """
    set_random_backupstore(client)
    statefulset_name = 'recurring-job-in-storageclass-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    storage_class["parameters"]["recurringJobs"] = json.dumps(create_jobs1())

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    # 5 minutes
    time.sleep(300)
    for volume_name in volume_info:  # NOQA
        volume = client.by_id_volume(volume_name)
        check_jobs1_result(volume)
コード例 #4
0
def test_statefulset_mount(client, core_api, storage_class,
                           statefulset):  # NOQA
    """
    Tests that volumes provisioned for a StatefulSet can be properly created,
    mounted, unmounted, and deleted on the Kubernetes cluster.
    """

    statefulset_name = 'statefulset-mount-test'
    update_test_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        # Workaround for checking volume name since they differ per pod.
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    # Confirm that we've iterated through all the volumes.
    assert len(pod_info) == 0
コード例 #5
0
def test_statefulset_backup(client, core_api, storage_class,
                            statefulset):  # NOQA
    """
    Test that backups on StatefulSet volumes work properly.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.

    Then create backup using following steps for each pod:

    1. Create a snapshot
    2. Write some data into it
    3. Create another snapshot `backup_snapshot`
    4. Create a third snapshot
    5. Backup the snapshot `backup_snapshot`
    6. Wait for backup to show up.
        1 Verify the backup informations
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)
コード例 #6
0
def test_statefulset_recurring_backup(set_random_backupstore, client, core_api, storage_class, statefulset):  # NOQA
    """
    Scenario : test recurring backups on StatefulSets

    Given 1 default backup recurring jobs created.

    When create a statefulset.
    And write data to every statefulset pod.
    And wait for 5 minutes.

    Then 2 snapshots created for every statefulset pod.
    """

    # backup every minute
    recurring_jobs = {
        "backup": {
            "task": "backup",
            "groups": ["default"],
            "cron": "* * * * *",
            "retain": 2,
            "concurrency": 2,
            "labels": {},
        },
    }
    create_recurring_jobs(client, recurring_jobs)
    check_recurring_jobs(client, recurring_jobs)

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot.removed is False:
                count += 1

        # one backup + volume-head
        assert count == 2
コード例 #7
0
def delete_and_wait_statefulset_only(api, ss):
    pod_data = get_statefulset_pod_info(api, ss)

    apps_api = get_apps_api_client()
    try:
        apps_api.delete_namespaced_stateful_set(
            name=ss['metadata']['name'],
            namespace='default',
            body=k8sclient.V1DeleteOptions())
    except ApiException as e:
        assert e.status == 404

    for i in range(RETRY_COUNTS):
        ret = apps_api.list_namespaced_stateful_set(namespace='default')
        found = False
        for item in ret.items:
            if item.metadata.name == ss['metadata']['name']:
                found = True
                break
        if not found:
            break
        time.sleep(RETRY_INTERVAL)
    assert not found

    for p in pod_data:
        wait_delete_pod(api, p['pod_uid'])
コード例 #8
0
def test_statefulset_recurring_backup(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset):  # NOQA
    """
    Test that recurring backups on StatefulSets work properly.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.
    3. Write some data to every pod
    4. Schedule recurring jobs for volumes using Longhorn API
    5. Wait for 5 minutes
    6. Verify the snapshots created by the recurring jobs.
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    # backup every minute
    job_backup = {
        "name": "backup",
        "cron": "* * * * *",
        "task": "backup",
        "retain": 2
    }
    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot.removed is False:
                count += 1

        # one backups + volume-head
        assert count == 2
コード例 #9
0
def test_statefulset_backup(client, core_api, storage_class, statefulset):  # NOQA
    """
    Test that backups on StatefulSet volumes work properly.
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)
コード例 #10
0
def test_statefulset_backup(client, core_api, storage_class,
                            statefulset):  # NOQA
    """
    Test that backups on StatefulSet volumes work properly.
    """

    statefulset_name = 'statefulset-backup-test'
    update_test_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)
コード例 #11
0
def test_recurring_job_in_storageclass(client, core_api, storage_class, statefulset):  # NOQA
    statefulset_name = 'recurring-job-in-storageclass-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    storage_class['parameters']['recurringJobs'] = json.dumps(create_jobs1())

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    # 5 minutes
    time.sleep(300)
    for volume_name in volume_info:  # NOQA
        volume = client.by_id_volume(volume_name)
        check_jobs1_result(volume)
コード例 #12
0
def test_recurring_job_in_storageclass(client, core_api, storage_class,
                                       statefulset):  # NOQA
    statefulset_name = 'recurring-job-in-storageclass-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    storage_class['parameters']['recurringJobs'] = json.dumps(create_jobs1())

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    # 5 minutes
    time.sleep(300)
    for volume_name in volume_info:  # NOQA
        volume = client.by_id_volume(volume_name)
        check_jobs1_result(volume)
コード例 #13
0
def test_statefulset_recurring_backup(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset):  # NOQA
    """
    Test that recurring backups on StatefulSets work properly.
    """

    statefulset_name = 'statefulset-backup-test'
    update_test_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    # backup every minute
    job_backup = {
        "name": "backup",
        "cron": "* * * * *",
        "task": "backup",
        "retain": 2
    }
    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(300)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot['removed'] is False:
                count += 1

        # two backups + volume-head
        assert count == 3
コード例 #14
0
def test_recurring_job_in_storageclass(set_random_backupstore, client,
                                       core_api, storage_class,
                                       statefulset):  # NOQA
    """
    Test create volume with StorageClass contains recurring jobs

    1. Create a StorageClass with recurring jobs
    2. Create a StatefulSet with PVC template and StorageClass
    3. Verify the recurring jobs run correctly.
    """
    statefulset_name = 'recurring-job-in-storageclass-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    storage_class["parameters"]["recurringJobs"] = json.dumps(create_jobs1())

    create_storage_class(storage_class)

    # wait until the beginning of an even minute
    wait_until_begin_of_an_even_minute()

    start_time = datetime.utcnow()
    create_and_wait_statefulset(statefulset)
    statefulset_creating_duration = datetime.utcnow() - start_time

    assert 150 > statefulset_creating_duration.seconds

    # We want to write data exactly at the 150th second since the start_time
    time.sleep(150 - statefulset_creating_duration.seconds)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]
    pod_names = [p['pod_name'] for p in pod_info]

    # write random data to volume to trigger recurring snapshot and backup job
    volume_data_path = "/data/test"
    for pod_name in pod_names:
        write_pod_volume_random_data(core_api, pod_name, volume_data_path, 2)

    time.sleep(150)  # 2.5 minutes

    for volume_name in volume_info:  # NOQA
        volume = client.by_id_volume(volume_name)
        check_jobs1_result(volume)
コード例 #15
0
def test_statefulset_recurring_backup(client, core_api, storage_class,  # NOQA
                                      statefulset):  # NOQA
    """
    Test that recurring backups on StatefulSets work properly.
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    # backup every minute
    job_backup = {"name": "backup", "cron": "* * * * *",
                  "task": "backup", "retain": 2}
    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(300)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot['removed'] is False:
                count += 1

        # two backups + volume-head
        assert count == 3
コード例 #16
0
def test_statefulset_restore(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset):  # NOQA
    """
    Test that data can be restored into volumes usable by a StatefulSet.
    """
    statefulset_name = 'statefulset-restore-test'
    update_test_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)

    delete_and_wait_statefulset(core_api, client, statefulset)

    csi = True
    try:
        core_api.read_namespaced_pod(name='csi-provisioner-0',
                                     namespace='longhorn-system')
    except ApiException as e:
        if (e.status == 404):
            csi = False

    # StatefulSet fixture already cleans these up, use the manifests instead of
    # the fixtures to avoid issues during teardown.
    pv = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolume',
        'metadata': {
            'name': ''
        },
        'spec': {
            'capacity': {
                'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
            },
            'volumeMode': 'Filesystem',
            'accessModes': ['ReadWriteOnce'],
            'persistentVolumeReclaimPolicy': 'Delete',
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    pvc = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolumeClaim',
        'metadata': {
            'name': ''
        },
        'spec': {
            'accessModes': ['ReadWriteOnce'],
            'resources': {
                'requests': {
                    'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
                }
            },
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    if csi:
        pv['spec']['csi'] = {
            'driver': 'io.rancher.longhorn',
            'fsType': 'ext4',
            'volumeAttributes': {
                'numberOfReplicas':
                storage_class['parameters']['numberOfReplicas'],
                'staleReplicaTimeout':
                storage_class['parameters']['staleReplicaTimeout']
            },
            'volumeHandle': ''
        }
    else:
        pv['spec']['flexVolume'] = {
            'driver': 'rancher.io/longhorn',
            'fsType': 'ext4',
            'options': {
                'numberOfReplicas':
                storage_class['parameters']['numberOfReplicas'],
                'staleReplicaTimeout':
                storage_class['parameters']['staleReplicaTimeout'],
                'fromBackup':
                '',
                'size':
                size_to_string(DEFAULT_VOLUME_SIZE * Gi)
            }
        }

    # Make sure that volumes still work even if the Pod and StatefulSet names
    # are different.
    for pod in pod_info:
        pod['pod_name'] = pod['pod_name'].replace(
            'statefulset-restore-test', 'statefulset-restore-test-2')
        pod['pvc_name'] = pod['pvc_name'].replace(
            'statefulset-restore-test', 'statefulset-restore-test-2')

        pv['metadata']['name'] = pod['pvc_name']
        if csi:
            client.create_volume(
                name=pod['pvc_name'],
                size=size_to_string(DEFAULT_VOLUME_SIZE * Gi),
                numberOfReplicas=int(
                    storage_class['parameters']['numberOfReplicas']),
                fromBackup=pod['backup_snapshot']['url'])
            wait_for_volume_detached(client, pod['pvc_name'])

            pv['spec']['csi']['volumeHandle'] = pod['pvc_name']
        else:
            pv['spec']['flexVolume']['options']['fromBackup'] = \
                pod['backup_snapshot']['url']

        core_api.create_persistent_volume(pv)

        pvc['metadata']['name'] = pod['pvc_name']
        pvc['spec']['volumeName'] = pod['pvc_name']
        core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                           namespace='default')

    statefulset_name = 'statefulset-restore-test-2'
    update_test_manifests(statefulset, storage_class, statefulset_name)
    create_and_wait_statefulset(statefulset)

    for pod in pod_info:
        resp = read_volume_data(core_api, pod['pod_name'])
        assert resp == pod['data']
コード例 #17
0
def test_statefulset_restore(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset):  # NOQA
    """
    Test that data can be restored into volumes usable by a StatefulSet.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.
    3. Create a backup for each pod.
    4. Delete the StatefulSet, including the Longhorn volumes.
    5. Create volumes and PV/PVC using previous backups from each Pod.
        1. PVs will be created using the previous names.
        2. PVCs will be created using previous name + "-2" due to statefulset
        has a naming policy for what should be PVC name for them.
    6. Create a new StatefulSet using the previous name + "-2"
    7. Wait for pods to be up.
        . Verify the pods contain the previous backed up data
    """

    statefulset_name = 'statefulset-restore-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)

    delete_and_wait_statefulset(core_api, client, statefulset)

    csi = check_csi(core_api)

    # StatefulSet fixture already cleans these up, use the manifests instead of
    # the fixtures to avoid issues during teardown.
    pv = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolume',
        'metadata': {
            'name': ''
        },
        'spec': {
            'capacity': {
                'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
            },
            'volumeMode': 'Filesystem',
            'accessModes': ['ReadWriteOnce'],
            'persistentVolumeReclaimPolicy': 'Delete',
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    pvc = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolumeClaim',
        'metadata': {
            'name': ''
        },
        'spec': {
            'accessModes': ['ReadWriteOnce'],
            'resources': {
                'requests': {
                    'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
                }
            },
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    assert csi

    pv['spec']['csi'] = {
        'driver': 'driver.longhorn.io',
        'fsType': 'ext4',
        'volumeAttributes': {
            'numberOfReplicas':
            storage_class['parameters']['numberOfReplicas'],
            'staleReplicaTimeout':
            storage_class['parameters']['staleReplicaTimeout']
        },
        'volumeHandle': ''
    }

    # Make sure that volumes still work even if the Pod and StatefulSet names
    # are different.
    for pod in pod_info:
        pod['pod_name'] = pod['pod_name'].replace(
            'statefulset-restore-test', 'statefulset-restore-test-2')
        pod['pvc_name'] = pod['pvc_name'].replace(
            'statefulset-restore-test', 'statefulset-restore-test-2')
        pv['metadata']['name'] = pod['pvc_name']

        client.create_volume(
            name=pod['pvc_name'],
            size=size_to_string(DEFAULT_VOLUME_SIZE * Gi),
            numberOfReplicas=int(
                storage_class['parameters']['numberOfReplicas']),
            fromBackup=pod['backup_snapshot']['url'])
        wait_for_volume_detached(client, pod['pvc_name'])

        pv['spec']['csi']['volumeHandle'] = pod['pvc_name']

        core_api.create_persistent_volume(pv)

        pvc['metadata']['name'] = pod['pvc_name']
        pvc['spec']['volumeName'] = pod['pvc_name']
        core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                           namespace='default')

    statefulset_name = 'statefulset-restore-test-2'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    create_and_wait_statefulset(statefulset)

    for pod in pod_info:
        resp = read_volume_data(core_api, pod['pod_name'])
        assert resp == pod['data']
コード例 #18
0
def test_kubernetes_status(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset,
        csi_pv,
        pvc,
        pod):  # NOQA
    """
    Test Volume feature: Kubernetes Status

    1. Create StorageClass with `reclaimPolicy = Retain`
    2. Create a statefulset `kubernetes-status-test` with the StorageClass
        1. The statefulset has scale of 2.
    3. Get the volume name from the SECOND pod of the StateufulSet pod and
    create an `extra_pod` with the same volume on the same node
    4. Check the volumes that used by the StatefulSet
        1. The volume used by the FIRST StatefulSet pod will have one workload
        2. The volume used by the SECOND StatefulSet pod will have two
        workloads
        3. Validate related status, e.g. pv/pod name/state, workload
        name/type
    5. Check the volumes again
        1. PV/PVC should still be bound
        2. The volume used by the FIRST pod should have history data
        3. The volume used by the SECOND and extra pod should have current data
        point to the extra pod
    6. Delete the extra pod
        1. Now all the volume's should only have history data(`lastPodRefAt`
        set)
    7. Delete the PVC
        1. PVC should be updated with status `Released` and become history data
    8. Delete PV
        1. All the Kubernetes status information should be cleaned up.
    9. Reuse the two Longhorn volumes to create new pods
        1. Since the `reclaimPolicy == Retain`, volume won't be deleted by
        Longhorn
        2. Check the Kubernetes status now updated, with pod info but empty
        workload
        3. Default Longhorn Static StorageClass will remove the PV with PVC,
        but leave Longhorn volume
    """
    statefulset_name = 'kubernetes-status-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    storage_class['reclaimPolicy'] = 'Retain'
    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    extra_pod_name = 'extra-pod-using-' + volume_info[1]
    pod['metadata']['name'] = extra_pod_name
    p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'],
                                      namespace='default')
    pod['spec']['nodeName'] = p2.spec.node_name
    pod['spec']['volumes'] = [{
        'name':
        pod['spec']['containers'][0]['volumeMounts'][0]['name'],
        'persistentVolumeClaim': {
            'claimName': pod_info[1]['pvc_name'],
        },
    }]
    create_and_wait_pod(core_api, pod)

    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]  # NOQA
        volume = client.by_id_volume(volume_name)
        k_status = volume.kubernetesStatus
        workloads = k_status.workloadsStatus
        assert k_status.pvName == p['pv_name']
        assert k_status.pvStatus == 'Bound'
        assert k_status.namespace == 'default'
        assert k_status.pvcName == p['pvc_name']
        assert not k_status.lastPVCRefAt
        assert not k_status.lastPodRefAt
        if i == 0:
            assert len(workloads) == 1
            assert workloads[0].podName == p['pod_name']
            assert workloads[0].workloadName == statefulset_name
            assert workloads[0].workloadType == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0].podStatus == 'Running':
                    break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume.kubernetesStatus
            workloads = k_status.workloadsStatus
            assert workloads[0].podStatus == 'Running'
        if i == 1:
            assert len(k_status.workloadsStatus) == 2
            if workloads[0].podName == pod_info[i]['pod_name']:
                assert workloads[1].podName == extra_pod_name
                assert workloads[0].workloadName == statefulset_name
                assert workloads[0].workloadType == 'StatefulSet'
                assert not workloads[1].workloadName
                assert not workloads[1].workloadType
            else:
                assert workloads[1].podName == pod_info[i]['pod_name']
                assert workloads[0].podName == extra_pod_name
                assert not workloads[0].workloadName
                assert not workloads[0].workloadType
                assert workloads[1].workloadName == statefulset_name
                assert workloads[1].workloadType == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0].podStatus == 'Running' and \
                        workloads[1].podStatus == 'Running':
                    break
                time.sleep(RETRY_INTERVAL)
                volume = client.by_id_volume(volume_name)
                k_status = volume.kubernetesStatus
                workloads = k_status.workloadsStatus
                assert len(workloads) == 2
            assert workloads[0].podStatus == 'Running'
            assert workloads[1].podStatus == 'Running'

    ks_list = [{}, {}]
    delete_and_wait_statefulset_only(core_api, statefulset)
    # the extra pod is still using the 2nd volume
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        ks_list[i]['pvName'] = p['pv_name']
        ks_list[i]['pvStatus'] = 'Bound'
        ks_list[i]['namespace'] = 'default'
        ks_list[i]['pvcName'] = p['pvc_name']
        ks_list[i]['lastPVCRefAt'] = ''
        if i == 0:
            ks_list[i]['lastPodRefAt'] = 'not empty'
            ks_list[i]['workloadsStatus'] = [
                {
                    'podName': p['pod_name'],
                    'podStatus': 'Running',
                    'workloadName': statefulset_name,
                    'workloadType': 'StatefulSet',
                },
            ]
        if i == 1:
            ks_list[i]['lastPodRefAt'] = ''
            ks_list[i]['workloadsStatus'] = [{
                'podName': extra_pod_name,
                'podStatus': 'Running',
                'workloadName': '',
                'workloadType': '',
            }]
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted extra_pod, all volumes have no workload
    delete_and_wait_pod(core_api, pod['metadata']['name'])
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        ks_list[i]['lastPodRefAt'] = 'not empty'
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted pvc only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pvc(core_api, p['pvc_name'])
        ks_list[i]['pvStatus'] = 'Released'
        ks_list[i]['lastPVCRefAt'] = 'not empty'
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted pv only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pv(core_api, p['pv_name'])
        ks_list[i]['pvName'] = ''
        ks_list[i]['pvStatus'] = ''
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # reuse that volume
    for p, volume_name in zip(pod_info, volume_info):
        p['pod_name'] = p['pod_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pv_name'] = p['pvc_name']

        csi_pv['metadata']['name'] = p['pv_name']
        csi_pv['spec']['csi']['volumeHandle'] = volume_name
        csi_pv['spec']['storageClassName'] = \
            DEFAULT_LONGHORN_STATIC_STORAGECLASS_NAME
        core_api.create_persistent_volume(csi_pv)

        pvc['metadata']['name'] = p['pvc_name']
        pvc['spec']['volumeName'] = p['pv_name']
        pvc['spec']['storageClassName'] = \
            DEFAULT_LONGHORN_STATIC_STORAGECLASS_NAME
        core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                           namespace='default')

        pod['metadata']['name'] = p['pod_name']
        pod['spec']['volumes'] = [{
            'name':
            pod['spec']['containers'][0]['volumeMounts'][0]['name'],
            'persistentVolumeClaim': {
                'claimName': p['pvc_name'],
            },
        }]
        create_and_wait_pod(core_api, pod)

        ks = {
            'pvName':
            p['pv_name'],
            'pvStatus':
            'Bound',
            'namespace':
            'default',
            'pvcName':
            p['pvc_name'],
            'lastPVCRefAt':
            '',
            'lastPodRefAt':
            '',
            'workloadsStatus': [
                {
                    'podName': p['pod_name'],
                    'podStatus': 'Running',
                    'workloadName': '',
                    'workloadType': '',
                },
            ],
        }
        wait_volume_kubernetes_status(client, volume_name, ks)

        delete_and_wait_pod(core_api, p['pod_name'])
        # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`,
        # we don't need to delete bounded pv manually
        delete_and_wait_pvc(core_api, p['pvc_name'])
        wait_delete_pv(core_api, p['pv_name'])
コード例 #19
0
def test_statefulset_scaling(client, core_api, storage_class,
                             statefulset):  # NOQA
    """
    Test that scaling up a StatefulSet successfully provisions new volumes.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.
    3. Verify the properities of volumes.
    4. Scale the StatefulSet to 3 replicas
    5. Wait for the new pod to become ready.
    6. Verify the new volume properties.
    """

    statefulset_name = 'statefulset-scaling-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        found = False
        for pod in pod_info:
            if v.name == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v.size == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v.numberOfReplicas == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v.state == 'attached'
    assert len(pod_info) == 0

    statefulset['spec']['replicas'] = replicas = 3
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={'spec': {
            'replicas': replicas
        }})
    for i in range(DEFAULT_POD_TIMEOUT):
        s_set = apps_api.read_namespaced_stateful_set(name=statefulset_name,
                                                      namespace='default')
        if s_set.status.ready_replicas == replicas:
            break
        time.sleep(DEFAULT_POD_INTERVAL)
    assert s_set.status.ready_replicas == replicas

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == replicas
    for v in volumes:
        found = False
        for pod in pod_info:
            if v.name == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v.size == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v.numberOfReplicas == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v.state == 'attached'
    assert len(pod_info) == 0
コード例 #20
0
def test_statefulset_scaling(client, core_api, storage_class, statefulset):  # NOQA
    """
    Test that scaling up a StatefulSet successfully provisions new volumes.
    """

    statefulset_name = 'statefulset-scaling-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    assert len(pod_info) == 0

    statefulset['spec']['replicas'] = replicas = 3
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={
            'spec': {
                'replicas': replicas
            }
        })
    for i in range(DEFAULT_POD_TIMEOUT):
        s_set = apps_api.read_namespaced_stateful_set(
            name=statefulset_name,
            namespace='default')
        if s_set.status.ready_replicas == replicas:
            break
        time.sleep(DEFAULT_POD_INTERVAL)
    assert s_set.status.ready_replicas == replicas

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == replicas
    for v in volumes:
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    assert len(pod_info) == 0
コード例 #21
0
def test_upgrade(upgrade_image_tag, settings_reset, volume_name, pod_make, statefulset, storage_class):  # NOQA
    """
    Test Longhorn upgrade

    Prerequisite:
      - Disable Auto Salvage Setting

    1. Find the upgrade image tag
    2. Create a volume, generate and write data into the volume.
    3. Create a Pod using a volume, generate and write data
    4. Create a StatefulSet with 2 replicas,
       generate and write data to their volumes
    5. Keep all volumes attached
    6. Upgrade Longhorn system.
    7. Check Pod and StatefulSet didn't restart after upgrade
    8. Check All volumes data
    9. Write data to StatefulSet pods, and Attached volume
    10. Check data written to StatefulSet pods, and attached volume.
    11. Detach the volume, and Delete Pod, and
        StatefulSet to detach theirvolumes
    12. Upgrade all volumes engine images.
    13. Attach the volume, and recreate Pod, and StatefulSet
    14. Check All volumes data
    """
    new_ei_name = "longhornio/longhorn-engine:" + upgrade_image_tag

    client = get_longhorn_api_client()
    core_api = get_core_api_client()
    host_id = get_self_host_id()
    pod_data_path = "/data/test"

    pod_volume_name = generate_volume_name()

    auto_salvage_setting = client.by_id_setting(SETTING_AUTO_SALVAGE)
    setting = client.update(auto_salvage_setting, value="false")

    assert setting.name == SETTING_AUTO_SALVAGE
    assert setting.value == "false"

    # Create Volume attached to a node.
    volume1 = create_and_check_volume(client,
                                      volume_name,
                                      size=SIZE)
    volume1.attach(hostId=host_id)
    volume1 = wait_for_volume_healthy(client, volume_name)
    volume1_data = write_volume_random_data(volume1)

    # Create Volume used by Pod
    pod_name, pv_name, pvc_name, pod_md5sum = \
        prepare_pod_with_data_in_mb(client, core_api,
                                    pod_make, pod_volume_name,
                                    data_path=pod_data_path,
                                    add_liveness_prope=False)

    # Create multiple volumes used by StatefulSet
    statefulset_name = 'statefulset-upgrade-test'
    update_statefulset_manifests(statefulset,
                                 storage_class,
                                 statefulset_name)
    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)
    statefulset_pod_info = get_statefulset_pod_info(core_api, statefulset)

    for sspod_info in statefulset_pod_info:
        sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api,
                              sspod_info['pod_name'],
                              sspod_info['data'])

    # upgrade Longhorn
    assert longhorn_upgrade(upgrade_image_tag)

    client = get_longhorn_api_client()

    # wait for 1 minute before checking pod restarts
    time.sleep(60)

    pod = core_api.read_namespaced_pod(name=pod_name,
                                       namespace='default')
    assert pod.status.container_statuses[0].restart_count == 0

    for sspod_info in statefulset_pod_info:
        sspod = core_api.read_namespaced_pod(name=sspod_info['pod_name'],
                                             namespace='default')
        assert \
            sspod.status.container_statuses[0].restart_count == 0

    for sspod_info in statefulset_pod_info:
        resp = read_volume_data(core_api, sspod_info['pod_name'])
        assert resp == sspod_info['data']

    res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path)
    assert res_pod_md5sum == pod_md5sum

    check_volume_data(volume1, volume1_data)

    for sspod_info in statefulset_pod_info:
        sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api,
                              sspod_info['pod_name'],
                              sspod_info['data'])

    for sspod_info in statefulset_pod_info:
        resp = read_volume_data(core_api, sspod_info['pod_name'])
        assert resp == sspod_info['data']

    volume1 = client.by_id_volume(volume_name)
    volume1_data = write_volume_random_data(volume1)
    check_volume_data(volume1, volume1_data)

    statefulset['spec']['replicas'] = replicas = 0
    apps_api = get_apps_api_client()

    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={
            'spec': {
                'replicas': replicas
            }
        })

    delete_and_wait_pod(core_api, pod_name)

    volume = client.by_id_volume(volume_name)
    volume.detach()

    volumes = client.list_volume()

    for v in volumes:
        wait_for_volume_detached(client, v.name)

    engineimages = client.list_engine_image()

    for ei in engineimages:
        if ei.image == new_ei_name:
            new_ei = ei

    volumes = client.list_volume()

    for v in volumes:
        volume = client.by_id_volume(v.name)
        volume.engineUpgrade(image=new_ei.image)

    statefulset['spec']['replicas'] = replicas = 2
    apps_api = get_apps_api_client()

    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={
            'spec': {
                'replicas': replicas
            }
        })

    wait_statefulset(statefulset)

    pod = pod_make(name=pod_name)
    pod['spec']['volumes'] = [create_pvc_spec(pvc_name)]
    create_and_wait_pod(core_api, pod)

    volume1 = client.by_id_volume(volume_name)
    volume1.attach(hostId=host_id)
    volume1 = wait_for_volume_healthy(client, volume_name)

    for sspod_info in statefulset_pod_info:
        resp = read_volume_data(core_api, sspod_info['pod_name'])
        assert resp == sspod_info['data']

    res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path)
    assert res_pod_md5sum == pod_md5sum

    check_volume_data(volume1, volume1_data)
コード例 #22
0
def test_statefulset_restore(client, core_api, storage_class,  # NOQA
                             statefulset):  # NOQA
    """
    Test that data can be restored into volumes usable by a StatefulSet.
    """

    statefulset_name = 'statefulset-restore-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)

    delete_and_wait_statefulset(core_api, client, statefulset)

    csi = check_csi(core_api)

    # StatefulSet fixture already cleans these up, use the manifests instead of
    # the fixtures to avoid issues during teardown.
    pv = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolume',
        'metadata': {
            'name': ''
        },
        'spec': {
            'capacity': {
                'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
            },
            'volumeMode': 'Filesystem',
            'accessModes': ['ReadWriteOnce'],
            'persistentVolumeReclaimPolicy': 'Delete',
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    pvc = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolumeClaim',
        'metadata': {
            'name': ''
        },
        'spec': {
            'accessModes': [
                'ReadWriteOnce'
            ],
            'resources': {
                'requests': {
                    'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
                }
            },
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    if csi:
        pv['spec']['csi'] = {
            'driver': 'io.rancher.longhorn',
            'fsType': 'ext4',
            'volumeAttributes': {
                'numberOfReplicas':
                    storage_class['parameters']['numberOfReplicas'],
                'staleReplicaTimeout':
                    storage_class['parameters']['staleReplicaTimeout']
            },
            'volumeHandle': ''
        }
    else:
        pv['spec']['flexVolume'] = {
            'driver': 'rancher.io/longhorn',
            'fsType': 'ext4',
            'options': {
                'numberOfReplicas':
                    storage_class['parameters']['numberOfReplicas'],
                'staleReplicaTimeout':
                    storage_class['parameters']['staleReplicaTimeout'],
                'fromBackup': '',
                'size': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
            }
        }

    # Make sure that volumes still work even if the Pod and StatefulSet names
    # are different.
    for pod in pod_info:
        pod['pod_name'] = pod['pod_name'].replace('statefulset-restore-test',
                                                  'statefulset-restore-test-2')
        pod['pvc_name'] = pod['pvc_name'].replace('statefulset-restore-test',
                                                  'statefulset-restore-test-2')

        pv['metadata']['name'] = pod['pvc_name']
        if csi:
            client.create_volume(
                name=pod['pvc_name'],
                size=size_to_string(DEFAULT_VOLUME_SIZE * Gi),
                numberOfReplicas=int(
                    storage_class['parameters']['numberOfReplicas']),
                fromBackup=pod['backup_snapshot']['url'])
            wait_for_volume_detached(client, pod['pvc_name'])

            pv['spec']['csi']['volumeHandle'] = pod['pvc_name']
        else:
            pv['spec']['flexVolume']['options']['fromBackup'] = \
                pod['backup_snapshot']['url']

        core_api.create_persistent_volume(pv)

        pvc['metadata']['name'] = pod['pvc_name']
        pvc['spec']['volumeName'] = pod['pvc_name']
        core_api.create_namespaced_persistent_volume_claim(
            body=pvc,
            namespace='default')

    statefulset_name = 'statefulset-restore-test-2'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    create_and_wait_statefulset(statefulset)

    for pod in pod_info:
        resp = read_volume_data(core_api, pod['pod_name'])
        assert resp == pod['data']
コード例 #23
0
def test_statefulset_backup(client, core_api, storage_class,
                            statefulset):  # NOQA
    """
    Test that backups on StatefulSet volumes work properly.
    """

    statefulset_name = 'statefulset-backup-test'
    update_test_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        # Create backup.
        volume = client.by_id_volume(pod['pv_name'])
        volume.snapshotCreate()
        write_volume_data(core_api, pod['pod_name'], pod['data'])
        pod['backup_snapshot'] = volume.snapshotCreate()
        volume.snapshotCreate()
        volume.snapshotBackup(name=pod['backup_snapshot']['name'])

        # Wait for backup to appear.
        found = False
        for i in range(100):
            backup_volumes = client.list_backupVolume()
            for bv in backup_volumes:
                if bv['name'] == pod['pv_name']:
                    found = True
                    break
            if found:
                break
            time.sleep(DEFAULT_POD_INTERVAL)
        assert found

        found = False
        for i in range(20):
            backups = bv.backupList()
            for b in backups:
                if b['snapshotName'] == pod['backup_snapshot']['name']:
                    found = True
                    break
            if found:
                break
            time.sleep(DEFAULT_POD_INTERVAL)
        assert found

        # Make sure backup has the correct attributes.
        new_b = bv.backupGet(name=b["name"])
        assert new_b["name"] == b["name"]
        assert new_b["url"] == b["url"]
        assert new_b["snapshotName"] == b["snapshotName"]
        assert new_b["snapshotCreated"] == b["snapshotCreated"]
        assert new_b["created"] == b["created"]
        assert new_b["volumeName"] == b["volumeName"]
        assert new_b["volumeSize"] == b["volumeSize"]
        assert new_b["volumeCreated"] == b["volumeCreated"]
コード例 #24
0
def test_kubernetes_status(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset,
        csi_pv,
        pvc,
        pod):  # NOQA
    statefulset_name = 'kubernetes-status-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    storage_class['reclaimPolicy'] = 'Retain'
    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    extra_pod_name = 'extra-pod-using-' + volume_info[1]
    pod['metadata']['name'] = extra_pod_name
    p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'],
                                      namespace='default')
    pod['spec']['nodeName'] = p2.spec.node_name
    pod['spec']['volumes'] = [{
        'name':
        pod['spec']['containers'][0]['volumeMounts'][0]['name'],
        'persistentVolumeClaim': {
            'claimName': pod_info[1]['pvc_name'],
        },
    }]
    create_and_wait_pod(core_api, pod)

    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == p['pv_name']
        assert k_status['pvStatus'] == 'Bound'
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert not k_status['lastPVCRefAt']
        assert not k_status['lastPodRefAt']
        if i == 0:
            assert len(workloads) == 1
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0]['podStatus'] == 'Running':
                    break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume["kubernetesStatus"]
            workloads = k_status['workloadsStatus']
            assert workloads[0]['podStatus'] == 'Running'
        if i == 1:
            assert len(k_status['workloadsStatus']) == 2
            if workloads[0]['podName'] == pod_info[i]['pod_name']:
                assert workloads[1]['podName'] == extra_pod_name
                assert workloads[0]['workloadName'] == statefulset_name
                assert workloads[0]['workloadType'] == 'StatefulSet'
                assert not workloads[1]['workloadName']
                assert not workloads[1]['workloadType']
            else:
                assert workloads[1]['podName'] == pod_info[i]['pod_name']
                assert workloads[0]['podName'] == extra_pod_name
                assert not workloads[0]['workloadName']
                assert not workloads[0]['workloadType']
                assert workloads[1]['workloadName'] == statefulset_name
                assert workloads[1]['workloadType'] == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0]['podStatus'] == 'Running' and \
                        workloads[1]['podStatus'] == 'Running':
                    break
                time.sleep(RETRY_INTERVAL)
                volume = client.by_id_volume(volume_name)
                k_status = volume["kubernetesStatus"]
                workloads = k_status['workloadsStatus']
                assert len(workloads) == 2
            assert workloads[0]['podStatus'] == 'Running'
            assert workloads[1]['podStatus'] == 'Running'

    ks_list = [{}, {}]
    delete_and_wait_statefulset_only(core_api, statefulset)
    # the extra pod is still using the 2nd volume
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        ks_list[i]['pvName'] = p['pv_name']
        ks_list[i]['pvStatus'] = 'Bound'
        ks_list[i]['namespace'] = 'default'
        ks_list[i]['pvcName'] = p['pvc_name']
        ks_list[i]['lastPVCRefAt'] = ''
        if i == 0:
            ks_list[i]['lastPodRefAt'] = 'not empty'
            ks_list[i]['workloadsStatus'] = [
                {
                    'podName': p['pod_name'],
                    'podStatus': 'Running',
                    'workloadName': statefulset_name,
                    'workloadType': 'StatefulSet',
                },
            ]
        if i == 1:
            ks_list[i]['lastPodRefAt'] = ''
            ks_list[i]['workloadsStatus'] = [{
                'podName': extra_pod_name,
                'podStatus': 'Running',
                'workloadName': '',
                'workloadType': '',
            }]
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted extra_pod, all volumes have no workload
    delete_and_wait_pod(core_api, pod['metadata']['name'])
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        ks_list[i]['lastPodRefAt'] = 'not empty'
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted pvc only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pvc(core_api, p['pvc_name'])
        ks_list[i]['pvStatus'] = 'Released'
        ks_list[i]['lastPVCRefAt'] = 'not empty'
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted pv only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pv(core_api, p['pv_name'])
        ks_list[i]['pvName'] = ''
        ks_list[i]['pvStatus'] = ''
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # reuse that volume
    for p, volume_name in zip(pod_info, volume_info):
        p['pod_name'] = p['pod_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pv_name'] = p['pvc_name']

        csi_pv['metadata']['name'] = p['pv_name']
        csi_pv['spec']['csi']['volumeHandle'] = volume_name
        core_api.create_persistent_volume(csi_pv)

        pvc['metadata']['name'] = p['pvc_name']
        pvc['spec']['volumeName'] = p['pv_name']
        core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                           namespace='default')

        pod['metadata']['name'] = p['pod_name']
        pod['spec']['volumes'] = [{
            'name':
            pod['spec']['containers'][0]['volumeMounts'][0]['name'],
            'persistentVolumeClaim': {
                'claimName': p['pvc_name'],
            },
        }]
        create_and_wait_pod(core_api, pod)

        ks = {
            'pvName':
            p['pv_name'],
            'pvStatus':
            'Bound',
            'namespace':
            'default',
            'pvcName':
            p['pvc_name'],
            'lastPVCRefAt':
            '',
            'lastPodRefAt':
            '',
            'workloadsStatus': [
                {
                    'podName': p['pod_name'],
                    'podStatus': 'Running',
                    'workloadName': '',
                    'workloadType': '',
                },
            ],
        }
        wait_volume_kubernetes_status(client, volume_name, ks)

        delete_and_wait_pod(core_api, p['pod_name'])
        # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`,
        # we don't need to delete bounded pv manually
        delete_and_wait_pvc(core_api, p['pvc_name'])
        wait_delete_pv(core_api, p['pv_name'])
コード例 #25
0
def test_kubernetes_status(client, core_api, storage_class,  # NOQA
                           statefulset, csi_pv, pvc, pod):  # NOQA
    statefulset_name = 'kubernetes-status-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    storage_class['reclaimPolicy'] = 'Retain'
    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    extra_pod_name = 'extra-pod-using-' + volume_info[1]
    pod['metadata']['name'] = extra_pod_name
    p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'],
                                      namespace='default')
    pod['spec']['nodeName'] = p2.spec.node_name
    pod['spec']['volumes'] = [{
        'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'],
        'persistentVolumeClaim': {
            'claimName': pod_info[1]['pvc_name'],
        },
    }]
    create_and_wait_pod(core_api, pod)

    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == p['pv_name']
        assert k_status['pvStatus'] == 'Bound'
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert not k_status['lastPVCRefAt']
        assert not k_status['lastPodRefAt']
        if i == 0:
            assert len(workloads) == 1
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0]['podStatus'] == 'Running':
                    break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume["kubernetesStatus"]
            workloads = k_status['workloadsStatus']
            assert workloads[0]['podStatus'] == 'Running'
        if i == 1:
            assert len(k_status['workloadsStatus']) == 2
            if workloads[0]['podName'] == pod_info[i]['pod_name']:
                assert workloads[1]['podName'] == extra_pod_name
                assert workloads[0]['workloadName'] == statefulset_name
                assert workloads[0]['workloadType'] == 'StatefulSet'
                assert not workloads[1]['workloadName']
                assert not workloads[1]['workloadType']
            else:
                assert workloads[1]['podName'] == pod_info[i]['pod_name']
                assert workloads[0]['podName'] == extra_pod_name
                assert not workloads[0]['workloadName']
                assert not workloads[0]['workloadType']
                assert workloads[1]['workloadName'] == statefulset_name
                assert workloads[1]['workloadType'] == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0]['podStatus'] == 'Running' and \
                        workloads[1]['podStatus'] == 'Running':
                    break
                time.sleep(RETRY_INTERVAL)
                volume = client.by_id_volume(volume_name)
                k_status = volume["kubernetesStatus"]
                workloads = k_status['workloadsStatus']
                assert len(workloads) == 2
            assert workloads[0]['podStatus'] == 'Running'
            assert workloads[1]['podStatus'] == 'Running'

    # the extra pod is still using the 2nd volume
    delete_and_wait_statefulset_only(core_api, statefulset)
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == p['pv_name']
        assert k_status['pvStatus'] == 'Bound'
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert not k_status['lastPVCRefAt']
        assert len(workloads) == 1
        if i == 0:
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
            assert k_status['lastPodRefAt']
        if i == 1:
            assert workloads[0]['podName'] == extra_pod_name
            assert not workloads[0]['workloadName']
            assert not workloads[0]['workloadType']
            assert not k_status['lastPodRefAt']

    # deleted extra_pod, all volumes have no workload
    delete_and_wait_pod(core_api, pod['metadata']['name'])
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == p['pv_name']
        assert k_status['pvStatus'] == 'Bound'
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert not k_status['lastPVCRefAt']
        assert k_status['lastPodRefAt']
        assert len(workloads) == 1
        if i == 0:
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
        if i == 1:
            assert workloads[0]['podName'] == extra_pod_name
            assert not workloads[0]['workloadName']
            assert not workloads[0]['workloadType']

    # deleted pvc only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pvc(core_api, p['pvc_name'])
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        for _ in range(RETRY_COUNTS):
            if k_status['pvStatus'] == 'Released':
                break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume["kubernetesStatus"]
            workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == p['pv_name']
        assert k_status['pvStatus'] == 'Released'
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert k_status['lastPVCRefAt']
        assert k_status['lastPodRefAt']
        assert len(workloads) == 1
        if i == 0:
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
        if i == 1:
            assert workloads[0]['podName'] == extra_pod_name
            assert not workloads[0]['workloadName']
            assert not workloads[0]['workloadType']

    # deleted pv only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pv(core_api, p['pv_name'])
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == ''
        assert k_status['pvStatus'] == ''
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert k_status['lastPVCRefAt']
        assert k_status['lastPodRefAt']
        assert len(workloads) == 1
        if i == 0:
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
        if i == 1:
            assert workloads[0]['podName'] == extra_pod_name
            assert not workloads[0]['workloadName']
            assert not workloads[0]['workloadType']

    # reuse that volume
    for p, volume_name in zip(pod_info, volume_info):
        p['pod_name'] = p['pod_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pv_name'] = p['pvc_name']

        csi_pv['metadata']['name'] = p['pv_name']
        csi_pv['spec']['csi']['volumeHandle'] = volume_name
        core_api.create_persistent_volume(csi_pv)

        pvc['metadata']['name'] = p['pvc_name']
        pvc['spec']['volumeName'] = p['pv_name']
        core_api.create_namespaced_persistent_volume_claim(
            body=pvc, namespace='default')

        pod['metadata']['name'] = p['pod_name']
        pod['spec']['volumes'] = [{
            'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'],
            'persistentVolumeClaim': {
                'claimName': p['pvc_name'],
            },
        }]
        create_and_wait_pod(core_api, pod)

        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert len(workloads) == 1
        assert k_status['pvName'] == p['pv_name']
        for _ in range(RETRY_COUNTS):
            if k_status['pvStatus'] == 'Bound':
                break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume["kubernetesStatus"]
            workloads = k_status['workloadsStatus']
            assert len(workloads) == 1
        assert k_status['pvStatus'] == 'Bound'
        for _ in range(RETRY_COUNTS):
            if workloads[0]['podStatus'] == 'Running':
                break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume["kubernetesStatus"]
            workloads = k_status['workloadsStatus']
            assert len(workloads) == 1
        assert workloads[0]['podStatus'] == 'Running'
        assert workloads[0]['podName'] == p['pod_name']
        assert not workloads[0]['workloadName']
        assert not workloads[0]['workloadType']
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert not k_status['lastPVCRefAt']
        assert not k_status['lastPodRefAt']

        delete_and_wait_pod(core_api, p['pod_name'])
        # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`,
        # we don't need to delete bounded pv manually
        delete_and_wait_pvc(core_api, p['pvc_name'])
        wait_delete_pv(core_api, p['pv_name'])