コード例 #1
0
def test_statefulset_backup(client, core_api, storage_class,
                            statefulset):  # NOQA
    """
    Test that backups on StatefulSet volumes work properly.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.

    Then create backup using following steps for each pod:

    1. Create a snapshot
    2. Write some data into it
    3. Create another snapshot `backup_snapshot`
    4. Create a third snapshot
    5. Backup the snapshot `backup_snapshot`
    6. Wait for backup to show up.
        1 Verify the backup informations
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)
コード例 #2
0
def test_statefulset_pod_deletion(core_api, storage_class,
                                  statefulset):  # NOQA
    """
    Test that a StatefulSet can spin up a new Pod with the same data after a
    previous Pod has been deleted.

    This test will only work in a CSI environment. It will automatically be
    disabled in FlexVolume environments.
    """

    statefulset_name = 'statefulset-pod-deletion-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    test_pod_name = statefulset_name + '-' + \
        str(randrange(statefulset['spec']['replicas']))
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    write_pod_volume_data(core_api, test_pod_name, test_data)
    # Not using delete_and_wait_pod here because there is the small chance the
    # StatefulSet recreates the Pod quickly enough where the function won't
    # detect that the Pod was deleted, which will time out and throw an error.
    core_api.delete_namespaced_pod(name=test_pod_name,
                                   namespace='default',
                                   body=k8sclient.V1DeleteOptions())
    wait_statefulset(statefulset)
    resp = read_volume_data(core_api, test_pod_name)

    assert resp == test_data
コード例 #3
0
def test_statefulset_pod_deletion(core_api, storage_class,
                                  statefulset):  # NOQA
    """
    Test that a StatefulSet can spin up a new Pod with the same data after a
    previous Pod has been deleted.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.
    3. Write some data to one of the pod.
    4. Delete that pod.
    5. Wait for the StatefulSet to recreate the pod
    6. Verify the data in the pod.
    """

    statefulset_name = 'statefulset-pod-deletion-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    test_pod_name = statefulset_name + '-' + \
        str(randrange(statefulset['spec']['replicas']))
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    write_pod_volume_data(core_api, test_pod_name, test_data)
    # Not using delete_and_wait_pod here because there is the small chance the
    # StatefulSet recreates the Pod quickly enough where the function won't
    # detect that the Pod was deleted, which will time out and throw an error.
    core_api.delete_namespaced_pod(name=test_pod_name,
                                   namespace='default',
                                   body=k8sclient.V1DeleteOptions())
    wait_statefulset(statefulset)
    resp = read_volume_data(core_api, test_pod_name)

    assert resp == test_data
コード例 #4
0
def test_statefulset_mount(client, core_api, storage_class, statefulset):  # NOQA
    """
    Tests that volumes provisioned for a StatefulSet can be properly created,
    mounted, unmounted, and deleted on the Kubernetes cluster.
    """

    statefulset_name = 'statefulset-mount-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        # Workaround for checking volume name since they differ per pod.
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    # Confirm that we've iterated through all the volumes.
    assert len(pod_info) == 0
コード例 #5
0
def test_recurring_job_in_storageclass(client, core_api, storage_class,
                                       statefulset):  # NOQA
    """
    Test create volume with StorageClass contains recurring jobs

    1. Create a StorageClass with recurring jobs
    2. Create a StatefulSet with PVC template and StorageClass
    3. Verify the recurring jobs run correctly.
    """
    set_random_backupstore(client)
    statefulset_name = 'recurring-job-in-storageclass-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    storage_class["parameters"]["recurringJobs"] = json.dumps(create_jobs1())

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    # 5 minutes
    time.sleep(300)
    for volume_name in volume_info:  # NOQA
        volume = client.by_id_volume(volume_name)
        check_jobs1_result(volume)
コード例 #6
0
def test_statefulset_pod_deletion(core_api, storage_class, statefulset):  # NOQA
    """
    Test that a StatefulSet can spin up a new Pod with the same data after a
    previous Pod has been deleted.

    This test will only work in a CSI environment. It will automatically be
    disabled in FlexVolume environments.
    """

    statefulset_name = 'statefulset-pod-deletion-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    test_pod_name = statefulset_name + '-' + \
        str(randrange(statefulset['spec']['replicas']))
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    write_volume_data(core_api, test_pod_name, test_data)
    # Not using delete_and_wait_pod here because there is the small chance the
    # StatefulSet recreates the Pod quickly enough where the function won't
    # detect that the Pod was deleted, which will time out and throw an error.
    core_api.delete_namespaced_pod(name=test_pod_name, namespace='default',
                                   body=k8sclient.V1DeleteOptions())
    wait_statefulset(statefulset)
    resp = read_volume_data(core_api, test_pod_name)

    assert resp == test_data
コード例 #7
0
def test_statefulset_recurring_backup(set_random_backupstore, client, core_api, storage_class, statefulset):  # NOQA
    """
    Scenario : test recurring backups on StatefulSets

    Given 1 default backup recurring jobs created.

    When create a statefulset.
    And write data to every statefulset pod.
    And wait for 5 minutes.

    Then 2 snapshots created for every statefulset pod.
    """

    # backup every minute
    recurring_jobs = {
        "backup": {
            "task": "backup",
            "groups": ["default"],
            "cron": "* * * * *",
            "retain": 2,
            "concurrency": 2,
            "labels": {},
        },
    }
    create_recurring_jobs(client, recurring_jobs)
    check_recurring_jobs(client, recurring_jobs)

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot.removed is False:
                count += 1

        # one backup + volume-head
        assert count == 2
コード例 #8
0
def test_statefulset_mount(client, core_api, storage_class,
                           statefulset):  # NOQA
    """
    Tests that volumes provisioned for a StatefulSet can be properly created,
    mounted, unmounted, and deleted on the Kubernetes cluster.
    """

    statefulset_name = 'statefulset-mount-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        # Workaround for checking volume name since they differ per pod.
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    # Confirm that we've iterated through all the volumes.
    assert len(pod_info) == 0
コード例 #9
0
def test_statefulset_recurring_backup(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset):  # NOQA
    """
    Test that recurring backups on StatefulSets work properly.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.
    3. Write some data to every pod
    4. Schedule recurring jobs for volumes using Longhorn API
    5. Wait for 5 minutes
    6. Verify the snapshots created by the recurring jobs.
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    # backup every minute
    job_backup = {
        "name": "backup",
        "cron": "* * * * *",
        "task": "backup",
        "retain": 2
    }
    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot.removed is False:
                count += 1

        # one backups + volume-head
        assert count == 2
コード例 #10
0
def test_statefulset_backup(client, core_api, storage_class, statefulset):  # NOQA
    """
    Test that backups on StatefulSet volumes work properly.
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)
コード例 #11
0
def test_statefulset_backup(client, core_api, storage_class,
                            statefulset):  # NOQA
    """
    Test that backups on StatefulSet volumes work properly.
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)
コード例 #12
0
def test_recurring_job_in_storageclass(client, core_api, storage_class, statefulset):  # NOQA
    statefulset_name = 'recurring-job-in-storageclass-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    storage_class['parameters']['recurringJobs'] = json.dumps(create_jobs1())

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    # 5 minutes
    time.sleep(300)
    for volume_name in volume_info:  # NOQA
        volume = client.by_id_volume(volume_name)
        check_jobs1_result(volume)
コード例 #13
0
def test_recurring_job_in_storageclass(client, core_api, storage_class,
                                       statefulset):  # NOQA
    statefulset_name = 'recurring-job-in-storageclass-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    storage_class['parameters']['recurringJobs'] = json.dumps(create_jobs1())

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    # 5 minutes
    time.sleep(300)
    for volume_name in volume_info:  # NOQA
        volume = client.by_id_volume(volume_name)
        check_jobs1_result(volume)
コード例 #14
0
def test_statefulset_recurring_backup(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset):  # NOQA
    """
    Test that recurring backups on StatefulSets work properly.
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    # backup every minute
    job_backup = {
        "name": "backup",
        "cron": "* * * * *",
        "task": "backup",
        "retain": 2
    }
    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(300)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot['removed'] is False:
                count += 1

        # two backups + volume-head
        assert count == 3
コード例 #15
0
def test_recurring_job_in_storageclass(set_random_backupstore, client,
                                       core_api, storage_class,
                                       statefulset):  # NOQA
    """
    Test create volume with StorageClass contains recurring jobs

    1. Create a StorageClass with recurring jobs
    2. Create a StatefulSet with PVC template and StorageClass
    3. Verify the recurring jobs run correctly.
    """
    statefulset_name = 'recurring-job-in-storageclass-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    storage_class["parameters"]["recurringJobs"] = json.dumps(create_jobs1())

    create_storage_class(storage_class)

    # wait until the beginning of an even minute
    wait_until_begin_of_an_even_minute()

    start_time = datetime.utcnow()
    create_and_wait_statefulset(statefulset)
    statefulset_creating_duration = datetime.utcnow() - start_time

    assert 150 > statefulset_creating_duration.seconds

    # We want to write data exactly at the 150th second since the start_time
    time.sleep(150 - statefulset_creating_duration.seconds)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]
    pod_names = [p['pod_name'] for p in pod_info]

    # write random data to volume to trigger recurring snapshot and backup job
    volume_data_path = "/data/test"
    for pod_name in pod_names:
        write_pod_volume_random_data(core_api, pod_name, volume_data_path, 2)

    time.sleep(150)  # 2.5 minutes

    for volume_name in volume_info:  # NOQA
        volume = client.by_id_volume(volume_name)
        check_jobs1_result(volume)
コード例 #16
0
def test_statefulset_recurring_backup(client, core_api, storage_class,  # NOQA
                                      statefulset):  # NOQA
    """
    Test that recurring backups on StatefulSets work properly.
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    # backup every minute
    job_backup = {"name": "backup", "cron": "* * * * *",
                  "task": "backup", "retain": 2}
    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(300)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot['removed'] is False:
                count += 1

        # two backups + volume-head
        assert count == 3
コード例 #17
0
def test_statefulset_scaling(client, core_api, storage_class, statefulset):  # NOQA
    """
    Test that scaling up a StatefulSet successfully provisions new volumes.
    """

    statefulset_name = 'statefulset-scaling-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    assert len(pod_info) == 0

    statefulset['spec']['replicas'] = replicas = 3
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={
            'spec': {
                'replicas': replicas
            }
        })
    for i in range(DEFAULT_POD_TIMEOUT):
        s_set = apps_api.read_namespaced_stateful_set(
            name=statefulset_name,
            namespace='default')
        if s_set.status.ready_replicas == replicas:
            break
        time.sleep(DEFAULT_POD_INTERVAL)
    assert s_set.status.ready_replicas == replicas

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == replicas
    for v in volumes:
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    assert len(pod_info) == 0
コード例 #18
0
def test_statefulset_scaling(client, core_api, storage_class,
                             statefulset):  # NOQA
    """
    Test that scaling up a StatefulSet successfully provisions new volumes.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.
    3. Verify the properities of volumes.
    4. Scale the StatefulSet to 3 replicas
    5. Wait for the new pod to become ready.
    6. Verify the new volume properties.
    """

    statefulset_name = 'statefulset-scaling-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        found = False
        for pod in pod_info:
            if v.name == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v.size == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v.numberOfReplicas == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v.state == 'attached'
    assert len(pod_info) == 0

    statefulset['spec']['replicas'] = replicas = 3
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={'spec': {
            'replicas': replicas
        }})
    for i in range(DEFAULT_POD_TIMEOUT):
        s_set = apps_api.read_namespaced_stateful_set(name=statefulset_name,
                                                      namespace='default')
        if s_set.status.ready_replicas == replicas:
            break
        time.sleep(DEFAULT_POD_INTERVAL)
    assert s_set.status.ready_replicas == replicas

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == replicas
    for v in volumes:
        found = False
        for pod in pod_info:
            if v.name == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v.size == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v.numberOfReplicas == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v.state == 'attached'
    assert len(pod_info) == 0
コード例 #19
0
def test_rwx_multi_statefulset_with_same_pvc(core_api, pvc, statefulset,
                                             pod):  # NOQA
    """
    Test writing of data into a volume from multiple pods using same PVC

    1. Create a volume with 'accessMode' rwx.
    2. Create a PV and a PVC with access mode 'readwritemany' and attach to the
       volume.
    3. Deploy a StatefulSet of 2 pods with the existing PVC above created.
    4. Wait for both pods to come up.
    5. Create a pod with the existing PVC above created.
    6. Wait for StatefulSet to come up healthy.
    7. Write data all three pods and compute md5sum.
    8. Check the data md5sum in the share manager pod.
    """
    pvc_name = 'pvc-multi-pods-test'
    statefulset_name = 'statefulset-rwx-same-pvc-test'
    pod_name = 'pod-rwx-same-pvc-test'

    pvc['metadata']['name'] = pvc_name
    pvc['spec']['storageClassName'] = 'longhorn'
    pvc['spec']['accessModes'] = ['ReadWriteMany']

    core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                       namespace='default')

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['template']['spec']['volumes'] = \
        [create_pvc_spec(pvc_name)]
    del statefulset['spec']['volumeClaimTemplates']

    create_and_wait_statefulset(statefulset)

    pv_name = get_volume_name(core_api, pvc_name)
    share_manager_name = 'share-manager-' + pv_name

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api,
                          statefulset_name + '-0',
                          test_data,
                          filename='test1')
    assert test_data == read_volume_data(core_api,
                                         statefulset_name + '-1',
                                         filename='test1')

    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc_name)]
    create_and_wait_pod(core_api, pod)

    assert test_data == read_volume_data(core_api, pod_name, filename='test1')

    test_data_2 = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data_2, filename='test2')

    command1 = 'cat /export' + '/' + pv_name + '/' + 'test1'
    command2 = 'cat /export' + '/' + pv_name + '/' + 'test2'

    assert test_data == exec_command_in_pod(core_api, command1,
                                            share_manager_name,
                                            LONGHORN_NAMESPACE)
    assert test_data_2 == exec_command_in_pod(core_api, command2,
                                              share_manager_name,
                                              LONGHORN_NAMESPACE)
コード例 #20
0
def test_rwx_delete_share_manager_pod(core_api, statefulset):  # NOQA
    """
    Test moving of Share manager pod from one node to another.

    1. Create a StatefulSet of 1 pod with VolumeClaimTemplate where accessMode
       is 'RWX'.
    2. Wait for StatefulSet to come up healthy.
    3. Write data and compute md5sum.
    4. Delete the share manager pod.
    5. Wait for a new pod to be created and volume getting attached.
    6. Check the data md5sum in statefulSet.
    7. Write more data to it and compute md5sum.
    8. Check the data md5sum in share manager volume.
    """

    statefulset_name = 'statefulset-delete-share-manager-pods-test'

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['replicas'] = 1
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\
        = 'longhorn'
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \
        = ['ReadWriteMany']

    create_and_wait_statefulset(statefulset)

    pod_name = statefulset_name + '-' + '0'
    pvc_name = \
        statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name'] \
        + '-' + statefulset_name + '-0'
    pv_name = get_volume_name(core_api, pvc_name)
    share_manager_name = 'share-manager-' + pv_name

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data, filename='test1')

    delete_and_wait_pod(core_api,
                        share_manager_name,
                        namespace=LONGHORN_NAMESPACE)

    target_pod = core_api.read_namespaced_pod(name=pod_name,
                                              namespace='default')
    wait_delete_pod(core_api, target_pod.metadata.uid)
    wait_for_pod_remount(core_api, pod_name)

    test_data_2 = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data_2, filename='test2')

    command1 = 'cat /export/' + pv_name + '/test1'
    share_manager_data_1 = exec_command_in_pod(core_api, command1,
                                               share_manager_name,
                                               LONGHORN_NAMESPACE)
    assert test_data == share_manager_data_1

    command2 = 'cat /export/' + pv_name + '/test2'
    share_manager_data_2 = exec_command_in_pod(core_api, command2,
                                               share_manager_name,
                                               LONGHORN_NAMESPACE)
    assert test_data_2 == share_manager_data_2
コード例 #21
0
def test_rwx_with_statefulset_multi_pods(core_api, statefulset):  # NOQA
    """
    Test creation of share manager pod and rwx volumes from 2 pods.

    1. Create a StatefulSet of 2 pods with VolumeClaimTemplate where accessMode
       is 'RWX'.
    2. Wait for both pods to come up running.
    3. Verify there are two share manager pods created in the longhorn
       namespace and they have the directory with the PV name in the
       path `/export`
    4. Write data in both pods and compute md5sum.
    5. Compare md5sum of the data with the data written the share manager.
    """

    statefulset_name = 'statefulset-rwx-multi-pods-test'
    share_manager_name = []
    volumes_name = []

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\
        = 'longhorn'
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \
        = ['ReadWriteMany']

    create_and_wait_statefulset(statefulset)

    for i in range(2):
        pvc_name = \
            statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name']\
            + '-' + statefulset_name + '-' + str(i)
        pv_name = get_volume_name(core_api, pvc_name)

        assert pv_name is not None

        volumes_name.append(pv_name)
        share_manager_name.append('share-manager-' + pv_name)

        check_pod_existence(core_api,
                            share_manager_name[i],
                            namespace=LONGHORN_NAMESPACE)

    command = "ls /export | grep -i 'pvc' | wc -l"

    assert exec_command_in_pod(core_api, command, share_manager_name[0],
                               LONGHORN_NAMESPACE) == '1'
    assert exec_command_in_pod(core_api, command, share_manager_name[1],
                               LONGHORN_NAMESPACE) == '1'

    md5sum_pod = []
    for i in range(2):
        test_pod_name = statefulset_name + '-' + str(i)
        test_data = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api, test_pod_name, test_data)
        md5sum_pod.append(test_data)

    for i in range(2):
        command = 'cat /export' + '/' + volumes_name[i] + '/' + 'test'
        pod_data = exec_command_in_pod(core_api, command,
                                       share_manager_name[i],
                                       LONGHORN_NAMESPACE)

        assert pod_data == md5sum_pod[i]
コード例 #22
0
def test_rwx_statefulset_scale_down_up(core_api, statefulset):  # NOQA
    """
    Test Scaling up and down of pods attached to rwx volume.

    1. Create a StatefulSet of 2 pods with VolumeClaimTemplate where accessMode
       is 'RWX'.
    2. Wait for StatefulSet pods to come up healthy.
    3. Write data and compute md5sum in the both pods.
    4. Delete the pods.
    5. Wait for the pods to be terminated.
    6. Verify the share manager pods are no longer available and the volume is
       detached.
    6. Recreate the pods
    7. Wait for new pods to come up.
    8. Check the data md5sum in new pods.
    """

    statefulset_name = 'statefulset-rwx-scale-down-up-test'
    share_manager_name = []

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\
        = 'longhorn'
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \
        = ['ReadWriteMany']

    create_and_wait_statefulset(statefulset)

    for i in range(2):
        pvc_name = \
            statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name']\
            + '-' + statefulset_name + '-' + str(i)
        pv_name = get_volume_name(core_api, pvc_name)

        assert pv_name is not None

        share_manager_name.append('share-manager-' + pv_name)

        check_pod_existence(core_api,
                            share_manager_name[i],
                            namespace=LONGHORN_NAMESPACE)

    md5sum_pod = []
    for i in range(2):
        test_pod_name = statefulset_name + '-' + str(i)
        test_data = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api, test_pod_name, test_data)
        md5sum_pod.append(test_data)

    statefulset['spec']['replicas'] = replicas = 0
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={'spec': {
            'replicas': replicas
        }})
    for i in range(DEFAULT_STATEFULSET_TIMEOUT):
        s_set = apps_api.read_namespaced_stateful_set(
            name=statefulset['metadata']['name'], namespace='default')
        if s_set.status.ready_replicas == replicas or \
                (replicas == 0 and not s_set.status.ready_replicas):
            break
        time.sleep(DEFAULT_STATEFULSET_INTERVAL)

    pods = core_api.list_namespaced_pod(namespace=LONGHORN_NAMESPACE)

    found = False
    for item in pods.items:
        if item.metadata.name == share_manager_name[0] or \
                item.metadata.name == share_manager_name[1]:
            found = True
            break

    assert not found

    statefulset['spec']['replicas'] = replicas = 2
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={'spec': {
            'replicas': replicas
        }})
    wait_statefulset(statefulset)

    for i in range(2):
        test_pod_name = statefulset_name + '-' + str(i)
        command = 'cat /data/test'
        pod_data = exec_command_in_pod(core_api, command, test_pod_name,
                                       'default')

        assert pod_data == md5sum_pod[i]
コード例 #23
0
def test_rwx_parallel_writing(core_api, statefulset, pod):  # NOQA
    """
    Test parallel writing of data

    1. Create a StatefulSet of 1 pod with VolumeClaimTemplate where accessMode
       is 'RWX'.
    2. Wait for StatefulSet to come up healthy.
    3. Create another statefulSet with same pvc which got created with first
       statefulSet.
    4. Wait for statefulSet to come up healthy.
    5. Start writing 800 MB data in first statefulSet `file 1` and start
       writing 500 MB data in second statefulSet `file 2`.
    6. Compute md5sum.
    7. Check the data md5sum in share manager pod volume
    """

    statefulset_name = 'statefulset-rwx-parallel-writing-test'

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['replicas'] = 1
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\
        = 'longhorn'
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \
        = ['ReadWriteMany']

    create_and_wait_statefulset(statefulset)
    statefulset_pod_name = statefulset_name + '-0'

    pvc_name = \
        statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name'] \
        + '-' + statefulset_name + '-0'
    pv_name = get_volume_name(core_api, pvc_name)
    share_manager_name = 'share-manager-' + pv_name

    pod_name = 'pod-parallel-write-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc_name)]
    create_and_wait_pod(core_api, pod)

    with Pool(2) as p:
        p.map(
            write_data_into_pod,
            [statefulset_pod_name + ':/data/test1', pod_name + ':/data/test2'])

    md5sum1 = get_pod_data_md5sum(core_api, statefulset_pod_name, 'data/test1')
    md5sum2 = get_pod_data_md5sum(core_api, pod_name, 'data/test2')

    command1 = 'md5sum /export' + '/' + pv_name + '/' + 'test1' + \
               " | awk '{print $1}'"
    share_manager_data1 = exec_command_in_pod(core_api, command1,
                                              share_manager_name,
                                              LONGHORN_NAMESPACE)
    assert md5sum1 == share_manager_data1

    command2 = 'md5sum /export' + '/' + pv_name + '/' + 'test2' + \
               " | awk '{print $1}'"
    share_manager_data2 = exec_command_in_pod(core_api, command2,
                                              share_manager_name,
                                              LONGHORN_NAMESPACE)
    assert md5sum2 == share_manager_data2
コード例 #24
0
def test_upgrade(upgrade_image_tag, settings_reset, volume_name, pod_make, statefulset, storage_class):  # NOQA
    """
    Test Longhorn upgrade

    Prerequisite:
      - Disable Auto Salvage Setting

    1. Find the upgrade image tag
    2. Create a volume, generate and write data into the volume.
    3. Create a Pod using a volume, generate and write data
    4. Create a StatefulSet with 2 replicas,
       generate and write data to their volumes
    5. Keep all volumes attached
    6. Upgrade Longhorn system.
    7. Check Pod and StatefulSet didn't restart after upgrade
    8. Check All volumes data
    9. Write data to StatefulSet pods, and Attached volume
    10. Check data written to StatefulSet pods, and attached volume.
    11. Detach the volume, and Delete Pod, and
        StatefulSet to detach theirvolumes
    12. Upgrade all volumes engine images.
    13. Attach the volume, and recreate Pod, and StatefulSet
    14. Check All volumes data
    """
    new_ei_name = "longhornio/longhorn-engine:" + upgrade_image_tag

    client = get_longhorn_api_client()
    core_api = get_core_api_client()
    host_id = get_self_host_id()
    pod_data_path = "/data/test"

    pod_volume_name = generate_volume_name()

    auto_salvage_setting = client.by_id_setting(SETTING_AUTO_SALVAGE)
    setting = client.update(auto_salvage_setting, value="false")

    assert setting.name == SETTING_AUTO_SALVAGE
    assert setting.value == "false"

    # Create Volume attached to a node.
    volume1 = create_and_check_volume(client,
                                      volume_name,
                                      size=SIZE)
    volume1.attach(hostId=host_id)
    volume1 = wait_for_volume_healthy(client, volume_name)
    volume1_data = write_volume_random_data(volume1)

    # Create Volume used by Pod
    pod_name, pv_name, pvc_name, pod_md5sum = \
        prepare_pod_with_data_in_mb(client, core_api,
                                    pod_make, pod_volume_name,
                                    data_path=pod_data_path,
                                    add_liveness_prope=False)

    # Create multiple volumes used by StatefulSet
    statefulset_name = 'statefulset-upgrade-test'
    update_statefulset_manifests(statefulset,
                                 storage_class,
                                 statefulset_name)
    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)
    statefulset_pod_info = get_statefulset_pod_info(core_api, statefulset)

    for sspod_info in statefulset_pod_info:
        sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api,
                              sspod_info['pod_name'],
                              sspod_info['data'])

    # upgrade Longhorn
    assert longhorn_upgrade(upgrade_image_tag)

    client = get_longhorn_api_client()

    # wait for 1 minute before checking pod restarts
    time.sleep(60)

    pod = core_api.read_namespaced_pod(name=pod_name,
                                       namespace='default')
    assert pod.status.container_statuses[0].restart_count == 0

    for sspod_info in statefulset_pod_info:
        sspod = core_api.read_namespaced_pod(name=sspod_info['pod_name'],
                                             namespace='default')
        assert \
            sspod.status.container_statuses[0].restart_count == 0

    for sspod_info in statefulset_pod_info:
        resp = read_volume_data(core_api, sspod_info['pod_name'])
        assert resp == sspod_info['data']

    res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path)
    assert res_pod_md5sum == pod_md5sum

    check_volume_data(volume1, volume1_data)

    for sspod_info in statefulset_pod_info:
        sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api,
                              sspod_info['pod_name'],
                              sspod_info['data'])

    for sspod_info in statefulset_pod_info:
        resp = read_volume_data(core_api, sspod_info['pod_name'])
        assert resp == sspod_info['data']

    volume1 = client.by_id_volume(volume_name)
    volume1_data = write_volume_random_data(volume1)
    check_volume_data(volume1, volume1_data)

    statefulset['spec']['replicas'] = replicas = 0
    apps_api = get_apps_api_client()

    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={
            'spec': {
                'replicas': replicas
            }
        })

    delete_and_wait_pod(core_api, pod_name)

    volume = client.by_id_volume(volume_name)
    volume.detach()

    volumes = client.list_volume()

    for v in volumes:
        wait_for_volume_detached(client, v.name)

    engineimages = client.list_engine_image()

    for ei in engineimages:
        if ei.image == new_ei_name:
            new_ei = ei

    volumes = client.list_volume()

    for v in volumes:
        volume = client.by_id_volume(v.name)
        volume.engineUpgrade(image=new_ei.image)

    statefulset['spec']['replicas'] = replicas = 2
    apps_api = get_apps_api_client()

    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={
            'spec': {
                'replicas': replicas
            }
        })

    wait_statefulset(statefulset)

    pod = pod_make(name=pod_name)
    pod['spec']['volumes'] = [create_pvc_spec(pvc_name)]
    create_and_wait_pod(core_api, pod)

    volume1 = client.by_id_volume(volume_name)
    volume1.attach(hostId=host_id)
    volume1 = wait_for_volume_healthy(client, volume_name)

    for sspod_info in statefulset_pod_info:
        resp = read_volume_data(core_api, sspod_info['pod_name'])
        assert resp == sspod_info['data']

    res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path)
    assert res_pod_md5sum == pod_md5sum

    check_volume_data(volume1, volume1_data)
コード例 #25
0
def test_kubernetes_status(client, core_api, storage_class,  # NOQA
                           statefulset, csi_pv, pvc, pod):  # NOQA
    statefulset_name = 'kubernetes-status-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    storage_class['reclaimPolicy'] = 'Retain'
    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    extra_pod_name = 'extra-pod-using-' + volume_info[1]
    pod['metadata']['name'] = extra_pod_name
    p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'],
                                      namespace='default')
    pod['spec']['nodeName'] = p2.spec.node_name
    pod['spec']['volumes'] = [{
        'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'],
        'persistentVolumeClaim': {
            'claimName': pod_info[1]['pvc_name'],
        },
    }]
    create_and_wait_pod(core_api, pod)

    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == p['pv_name']
        assert k_status['pvStatus'] == 'Bound'
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert not k_status['lastPVCRefAt']
        assert not k_status['lastPodRefAt']
        if i == 0:
            assert len(workloads) == 1
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0]['podStatus'] == 'Running':
                    break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume["kubernetesStatus"]
            workloads = k_status['workloadsStatus']
            assert workloads[0]['podStatus'] == 'Running'
        if i == 1:
            assert len(k_status['workloadsStatus']) == 2
            if workloads[0]['podName'] == pod_info[i]['pod_name']:
                assert workloads[1]['podName'] == extra_pod_name
                assert workloads[0]['workloadName'] == statefulset_name
                assert workloads[0]['workloadType'] == 'StatefulSet'
                assert not workloads[1]['workloadName']
                assert not workloads[1]['workloadType']
            else:
                assert workloads[1]['podName'] == pod_info[i]['pod_name']
                assert workloads[0]['podName'] == extra_pod_name
                assert not workloads[0]['workloadName']
                assert not workloads[0]['workloadType']
                assert workloads[1]['workloadName'] == statefulset_name
                assert workloads[1]['workloadType'] == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0]['podStatus'] == 'Running' and \
                        workloads[1]['podStatus'] == 'Running':
                    break
                time.sleep(RETRY_INTERVAL)
                volume = client.by_id_volume(volume_name)
                k_status = volume["kubernetesStatus"]
                workloads = k_status['workloadsStatus']
                assert len(workloads) == 2
            assert workloads[0]['podStatus'] == 'Running'
            assert workloads[1]['podStatus'] == 'Running'

    # the extra pod is still using the 2nd volume
    delete_and_wait_statefulset_only(core_api, statefulset)
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == p['pv_name']
        assert k_status['pvStatus'] == 'Bound'
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert not k_status['lastPVCRefAt']
        assert len(workloads) == 1
        if i == 0:
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
            assert k_status['lastPodRefAt']
        if i == 1:
            assert workloads[0]['podName'] == extra_pod_name
            assert not workloads[0]['workloadName']
            assert not workloads[0]['workloadType']
            assert not k_status['lastPodRefAt']

    # deleted extra_pod, all volumes have no workload
    delete_and_wait_pod(core_api, pod['metadata']['name'])
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == p['pv_name']
        assert k_status['pvStatus'] == 'Bound'
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert not k_status['lastPVCRefAt']
        assert k_status['lastPodRefAt']
        assert len(workloads) == 1
        if i == 0:
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
        if i == 1:
            assert workloads[0]['podName'] == extra_pod_name
            assert not workloads[0]['workloadName']
            assert not workloads[0]['workloadType']

    # deleted pvc only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pvc(core_api, p['pvc_name'])
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        for _ in range(RETRY_COUNTS):
            if k_status['pvStatus'] == 'Released':
                break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume["kubernetesStatus"]
            workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == p['pv_name']
        assert k_status['pvStatus'] == 'Released'
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert k_status['lastPVCRefAt']
        assert k_status['lastPodRefAt']
        assert len(workloads) == 1
        if i == 0:
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
        if i == 1:
            assert workloads[0]['podName'] == extra_pod_name
            assert not workloads[0]['workloadName']
            assert not workloads[0]['workloadType']

    # deleted pv only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pv(core_api, p['pv_name'])
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == ''
        assert k_status['pvStatus'] == ''
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert k_status['lastPVCRefAt']
        assert k_status['lastPodRefAt']
        assert len(workloads) == 1
        if i == 0:
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
        if i == 1:
            assert workloads[0]['podName'] == extra_pod_name
            assert not workloads[0]['workloadName']
            assert not workloads[0]['workloadType']

    # reuse that volume
    for p, volume_name in zip(pod_info, volume_info):
        p['pod_name'] = p['pod_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pv_name'] = p['pvc_name']

        csi_pv['metadata']['name'] = p['pv_name']
        csi_pv['spec']['csi']['volumeHandle'] = volume_name
        core_api.create_persistent_volume(csi_pv)

        pvc['metadata']['name'] = p['pvc_name']
        pvc['spec']['volumeName'] = p['pv_name']
        core_api.create_namespaced_persistent_volume_claim(
            body=pvc, namespace='default')

        pod['metadata']['name'] = p['pod_name']
        pod['spec']['volumes'] = [{
            'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'],
            'persistentVolumeClaim': {
                'claimName': p['pvc_name'],
            },
        }]
        create_and_wait_pod(core_api, pod)

        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert len(workloads) == 1
        assert k_status['pvName'] == p['pv_name']
        for _ in range(RETRY_COUNTS):
            if k_status['pvStatus'] == 'Bound':
                break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume["kubernetesStatus"]
            workloads = k_status['workloadsStatus']
            assert len(workloads) == 1
        assert k_status['pvStatus'] == 'Bound'
        for _ in range(RETRY_COUNTS):
            if workloads[0]['podStatus'] == 'Running':
                break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume["kubernetesStatus"]
            workloads = k_status['workloadsStatus']
            assert len(workloads) == 1
        assert workloads[0]['podStatus'] == 'Running'
        assert workloads[0]['podName'] == p['pod_name']
        assert not workloads[0]['workloadName']
        assert not workloads[0]['workloadType']
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert not k_status['lastPVCRefAt']
        assert not k_status['lastPodRefAt']

        delete_and_wait_pod(core_api, p['pod_name'])
        # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`,
        # we don't need to delete bounded pv manually
        delete_and_wait_pvc(core_api, p['pvc_name'])
        wait_delete_pv(core_api, p['pv_name'])
コード例 #26
0
def test_statefulset_restore(client, core_api, storage_class,  # NOQA
                             statefulset):  # NOQA
    """
    Test that data can be restored into volumes usable by a StatefulSet.
    """

    statefulset_name = 'statefulset-restore-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)

    delete_and_wait_statefulset(core_api, client, statefulset)

    csi = check_csi(core_api)

    # StatefulSet fixture already cleans these up, use the manifests instead of
    # the fixtures to avoid issues during teardown.
    pv = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolume',
        'metadata': {
            'name': ''
        },
        'spec': {
            'capacity': {
                'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
            },
            'volumeMode': 'Filesystem',
            'accessModes': ['ReadWriteOnce'],
            'persistentVolumeReclaimPolicy': 'Delete',
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    pvc = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolumeClaim',
        'metadata': {
            'name': ''
        },
        'spec': {
            'accessModes': [
                'ReadWriteOnce'
            ],
            'resources': {
                'requests': {
                    'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
                }
            },
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    if csi:
        pv['spec']['csi'] = {
            'driver': 'io.rancher.longhorn',
            'fsType': 'ext4',
            'volumeAttributes': {
                'numberOfReplicas':
                    storage_class['parameters']['numberOfReplicas'],
                'staleReplicaTimeout':
                    storage_class['parameters']['staleReplicaTimeout']
            },
            'volumeHandle': ''
        }
    else:
        pv['spec']['flexVolume'] = {
            'driver': 'rancher.io/longhorn',
            'fsType': 'ext4',
            'options': {
                'numberOfReplicas':
                    storage_class['parameters']['numberOfReplicas'],
                'staleReplicaTimeout':
                    storage_class['parameters']['staleReplicaTimeout'],
                'fromBackup': '',
                'size': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
            }
        }

    # Make sure that volumes still work even if the Pod and StatefulSet names
    # are different.
    for pod in pod_info:
        pod['pod_name'] = pod['pod_name'].replace('statefulset-restore-test',
                                                  'statefulset-restore-test-2')
        pod['pvc_name'] = pod['pvc_name'].replace('statefulset-restore-test',
                                                  'statefulset-restore-test-2')

        pv['metadata']['name'] = pod['pvc_name']
        if csi:
            client.create_volume(
                name=pod['pvc_name'],
                size=size_to_string(DEFAULT_VOLUME_SIZE * Gi),
                numberOfReplicas=int(
                    storage_class['parameters']['numberOfReplicas']),
                fromBackup=pod['backup_snapshot']['url'])
            wait_for_volume_detached(client, pod['pvc_name'])

            pv['spec']['csi']['volumeHandle'] = pod['pvc_name']
        else:
            pv['spec']['flexVolume']['options']['fromBackup'] = \
                pod['backup_snapshot']['url']

        core_api.create_persistent_volume(pv)

        pvc['metadata']['name'] = pod['pvc_name']
        pvc['spec']['volumeName'] = pod['pvc_name']
        core_api.create_namespaced_persistent_volume_claim(
            body=pvc,
            namespace='default')

    statefulset_name = 'statefulset-restore-test-2'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    create_and_wait_statefulset(statefulset)

    for pod in pod_info:
        resp = read_volume_data(core_api, pod['pod_name'])
        assert resp == pod['data']
コード例 #27
0
def test_statefulset_restore(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset):  # NOQA
    """
    Test that data can be restored into volumes usable by a StatefulSet.
    """

    statefulset_name = 'statefulset-restore-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)

    delete_and_wait_statefulset(core_api, client, statefulset)

    csi = check_csi(core_api)

    # StatefulSet fixture already cleans these up, use the manifests instead of
    # the fixtures to avoid issues during teardown.
    pv = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolume',
        'metadata': {
            'name': ''
        },
        'spec': {
            'capacity': {
                'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
            },
            'volumeMode': 'Filesystem',
            'accessModes': ['ReadWriteOnce'],
            'persistentVolumeReclaimPolicy': 'Delete',
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    pvc = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolumeClaim',
        'metadata': {
            'name': ''
        },
        'spec': {
            'accessModes': ['ReadWriteOnce'],
            'resources': {
                'requests': {
                    'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
                }
            },
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    if csi:
        pv['spec']['csi'] = {
            'driver': 'io.rancher.longhorn',
            'fsType': 'ext4',
            'volumeAttributes': {
                'numberOfReplicas':
                storage_class['parameters']['numberOfReplicas'],
                'staleReplicaTimeout':
                storage_class['parameters']['staleReplicaTimeout']
            },
            'volumeHandle': ''
        }
    else:
        pv['spec']['flexVolume'] = {
            'driver': 'rancher.io/longhorn',
            'fsType': 'ext4',
            'options': {
                'numberOfReplicas':
                storage_class['parameters']['numberOfReplicas'],
                'staleReplicaTimeout':
                storage_class['parameters']['staleReplicaTimeout'],
                'fromBackup':
                '',
                'size':
                size_to_string(DEFAULT_VOLUME_SIZE * Gi)
            }
        }

    # Make sure that volumes still work even if the Pod and StatefulSet names
    # are different.
    for pod in pod_info:
        pod['pod_name'] = pod['pod_name'].replace(
            'statefulset-restore-test', 'statefulset-restore-test-2')
        pod['pvc_name'] = pod['pvc_name'].replace(
            'statefulset-restore-test', 'statefulset-restore-test-2')

        pv['metadata']['name'] = pod['pvc_name']
        if csi:
            client.create_volume(
                name=pod['pvc_name'],
                size=size_to_string(DEFAULT_VOLUME_SIZE * Gi),
                numberOfReplicas=int(
                    storage_class['parameters']['numberOfReplicas']),
                fromBackup=pod['backup_snapshot']['url'])
            wait_for_volume_detached(client, pod['pvc_name'])

            pv['spec']['csi']['volumeHandle'] = pod['pvc_name']
        else:
            pv['spec']['flexVolume']['options']['fromBackup'] = \
                pod['backup_snapshot']['url']

        core_api.create_persistent_volume(pv)

        pvc['metadata']['name'] = pod['pvc_name']
        pvc['spec']['volumeName'] = pod['pvc_name']
        core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                           namespace='default')

    statefulset_name = 'statefulset-restore-test-2'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    create_and_wait_statefulset(statefulset)

    for pod in pod_info:
        resp = read_volume_data(core_api, pod['pod_name'])
        assert resp == pod['data']
コード例 #28
0
def test_statefulset_restore(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset):  # NOQA
    """
    Test that data can be restored into volumes usable by a StatefulSet.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.
    3. Create a backup for each pod.
    4. Delete the StatefulSet, including the Longhorn volumes.
    5. Create volumes and PV/PVC using previous backups from each Pod.
        1. PVs will be created using the previous names.
        2. PVCs will be created using previous name + "-2" due to statefulset
        has a naming policy for what should be PVC name for them.
    6. Create a new StatefulSet using the previous name + "-2"
    7. Wait for pods to be up.
        . Verify the pods contain the previous backed up data
    """

    statefulset_name = 'statefulset-restore-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    create_and_test_backups(core_api, client, pod_info)

    delete_and_wait_statefulset(core_api, client, statefulset)

    csi = check_csi(core_api)

    # StatefulSet fixture already cleans these up, use the manifests instead of
    # the fixtures to avoid issues during teardown.
    pv = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolume',
        'metadata': {
            'name': ''
        },
        'spec': {
            'capacity': {
                'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
            },
            'volumeMode': 'Filesystem',
            'accessModes': ['ReadWriteOnce'],
            'persistentVolumeReclaimPolicy': 'Delete',
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    pvc = {
        'apiVersion': 'v1',
        'kind': 'PersistentVolumeClaim',
        'metadata': {
            'name': ''
        },
        'spec': {
            'accessModes': ['ReadWriteOnce'],
            'resources': {
                'requests': {
                    'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi)
                }
            },
            'storageClassName': DEFAULT_STORAGECLASS_NAME
        }
    }

    assert csi

    pv['spec']['csi'] = {
        'driver': 'driver.longhorn.io',
        'fsType': 'ext4',
        'volumeAttributes': {
            'numberOfReplicas':
            storage_class['parameters']['numberOfReplicas'],
            'staleReplicaTimeout':
            storage_class['parameters']['staleReplicaTimeout']
        },
        'volumeHandle': ''
    }

    # Make sure that volumes still work even if the Pod and StatefulSet names
    # are different.
    for pod in pod_info:
        pod['pod_name'] = pod['pod_name'].replace(
            'statefulset-restore-test', 'statefulset-restore-test-2')
        pod['pvc_name'] = pod['pvc_name'].replace(
            'statefulset-restore-test', 'statefulset-restore-test-2')
        pv['metadata']['name'] = pod['pvc_name']

        client.create_volume(
            name=pod['pvc_name'],
            size=size_to_string(DEFAULT_VOLUME_SIZE * Gi),
            numberOfReplicas=int(
                storage_class['parameters']['numberOfReplicas']),
            fromBackup=pod['backup_snapshot']['url'])
        wait_for_volume_detached(client, pod['pvc_name'])

        pv['spec']['csi']['volumeHandle'] = pod['pvc_name']

        core_api.create_persistent_volume(pv)

        pvc['metadata']['name'] = pod['pvc_name']
        pvc['spec']['volumeName'] = pod['pvc_name']
        core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                           namespace='default')

    statefulset_name = 'statefulset-restore-test-2'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    create_and_wait_statefulset(statefulset)

    for pod in pod_info:
        resp = read_volume_data(core_api, pod['pod_name'])
        assert resp == pod['data']
コード例 #29
0
def test_kubernetes_status(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset,
        csi_pv,
        pvc,
        pod):  # NOQA
    """
    Test Volume feature: Kubernetes Status

    1. Create StorageClass with `reclaimPolicy = Retain`
    2. Create a statefulset `kubernetes-status-test` with the StorageClass
        1. The statefulset has scale of 2.
    3. Get the volume name from the SECOND pod of the StateufulSet pod and
    create an `extra_pod` with the same volume on the same node
    4. Check the volumes that used by the StatefulSet
        1. The volume used by the FIRST StatefulSet pod will have one workload
        2. The volume used by the SECOND StatefulSet pod will have two
        workloads
        3. Validate related status, e.g. pv/pod name/state, workload
        name/type
    5. Check the volumes again
        1. PV/PVC should still be bound
        2. The volume used by the FIRST pod should have history data
        3. The volume used by the SECOND and extra pod should have current data
        point to the extra pod
    6. Delete the extra pod
        1. Now all the volume's should only have history data(`lastPodRefAt`
        set)
    7. Delete the PVC
        1. PVC should be updated with status `Released` and become history data
    8. Delete PV
        1. All the Kubernetes status information should be cleaned up.
    9. Reuse the two Longhorn volumes to create new pods
        1. Since the `reclaimPolicy == Retain`, volume won't be deleted by
        Longhorn
        2. Check the Kubernetes status now updated, with pod info but empty
        workload
        3. Default Longhorn Static StorageClass will remove the PV with PVC,
        but leave Longhorn volume
    """
    statefulset_name = 'kubernetes-status-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    storage_class['reclaimPolicy'] = 'Retain'
    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    extra_pod_name = 'extra-pod-using-' + volume_info[1]
    pod['metadata']['name'] = extra_pod_name
    p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'],
                                      namespace='default')
    pod['spec']['nodeName'] = p2.spec.node_name
    pod['spec']['volumes'] = [{
        'name':
        pod['spec']['containers'][0]['volumeMounts'][0]['name'],
        'persistentVolumeClaim': {
            'claimName': pod_info[1]['pvc_name'],
        },
    }]
    create_and_wait_pod(core_api, pod)

    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]  # NOQA
        volume = client.by_id_volume(volume_name)
        k_status = volume.kubernetesStatus
        workloads = k_status.workloadsStatus
        assert k_status.pvName == p['pv_name']
        assert k_status.pvStatus == 'Bound'
        assert k_status.namespace == 'default'
        assert k_status.pvcName == p['pvc_name']
        assert not k_status.lastPVCRefAt
        assert not k_status.lastPodRefAt
        if i == 0:
            assert len(workloads) == 1
            assert workloads[0].podName == p['pod_name']
            assert workloads[0].workloadName == statefulset_name
            assert workloads[0].workloadType == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0].podStatus == 'Running':
                    break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume.kubernetesStatus
            workloads = k_status.workloadsStatus
            assert workloads[0].podStatus == 'Running'
        if i == 1:
            assert len(k_status.workloadsStatus) == 2
            if workloads[0].podName == pod_info[i]['pod_name']:
                assert workloads[1].podName == extra_pod_name
                assert workloads[0].workloadName == statefulset_name
                assert workloads[0].workloadType == 'StatefulSet'
                assert not workloads[1].workloadName
                assert not workloads[1].workloadType
            else:
                assert workloads[1].podName == pod_info[i]['pod_name']
                assert workloads[0].podName == extra_pod_name
                assert not workloads[0].workloadName
                assert not workloads[0].workloadType
                assert workloads[1].workloadName == statefulset_name
                assert workloads[1].workloadType == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0].podStatus == 'Running' and \
                        workloads[1].podStatus == 'Running':
                    break
                time.sleep(RETRY_INTERVAL)
                volume = client.by_id_volume(volume_name)
                k_status = volume.kubernetesStatus
                workloads = k_status.workloadsStatus
                assert len(workloads) == 2
            assert workloads[0].podStatus == 'Running'
            assert workloads[1].podStatus == 'Running'

    ks_list = [{}, {}]
    delete_and_wait_statefulset_only(core_api, statefulset)
    # the extra pod is still using the 2nd volume
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        ks_list[i]['pvName'] = p['pv_name']
        ks_list[i]['pvStatus'] = 'Bound'
        ks_list[i]['namespace'] = 'default'
        ks_list[i]['pvcName'] = p['pvc_name']
        ks_list[i]['lastPVCRefAt'] = ''
        if i == 0:
            ks_list[i]['lastPodRefAt'] = 'not empty'
            ks_list[i]['workloadsStatus'] = [
                {
                    'podName': p['pod_name'],
                    'podStatus': 'Running',
                    'workloadName': statefulset_name,
                    'workloadType': 'StatefulSet',
                },
            ]
        if i == 1:
            ks_list[i]['lastPodRefAt'] = ''
            ks_list[i]['workloadsStatus'] = [{
                'podName': extra_pod_name,
                'podStatus': 'Running',
                'workloadName': '',
                'workloadType': '',
            }]
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted extra_pod, all volumes have no workload
    delete_and_wait_pod(core_api, pod['metadata']['name'])
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        ks_list[i]['lastPodRefAt'] = 'not empty'
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted pvc only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pvc(core_api, p['pvc_name'])
        ks_list[i]['pvStatus'] = 'Released'
        ks_list[i]['lastPVCRefAt'] = 'not empty'
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted pv only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pv(core_api, p['pv_name'])
        ks_list[i]['pvName'] = ''
        ks_list[i]['pvStatus'] = ''
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # reuse that volume
    for p, volume_name in zip(pod_info, volume_info):
        p['pod_name'] = p['pod_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pv_name'] = p['pvc_name']

        csi_pv['metadata']['name'] = p['pv_name']
        csi_pv['spec']['csi']['volumeHandle'] = volume_name
        csi_pv['spec']['storageClassName'] = \
            DEFAULT_LONGHORN_STATIC_STORAGECLASS_NAME
        core_api.create_persistent_volume(csi_pv)

        pvc['metadata']['name'] = p['pvc_name']
        pvc['spec']['volumeName'] = p['pv_name']
        pvc['spec']['storageClassName'] = \
            DEFAULT_LONGHORN_STATIC_STORAGECLASS_NAME
        core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                           namespace='default')

        pod['metadata']['name'] = p['pod_name']
        pod['spec']['volumes'] = [{
            'name':
            pod['spec']['containers'][0]['volumeMounts'][0]['name'],
            'persistentVolumeClaim': {
                'claimName': p['pvc_name'],
            },
        }]
        create_and_wait_pod(core_api, pod)

        ks = {
            'pvName':
            p['pv_name'],
            'pvStatus':
            'Bound',
            'namespace':
            'default',
            'pvcName':
            p['pvc_name'],
            'lastPVCRefAt':
            '',
            'lastPodRefAt':
            '',
            'workloadsStatus': [
                {
                    'podName': p['pod_name'],
                    'podStatus': 'Running',
                    'workloadName': '',
                    'workloadType': '',
                },
            ],
        }
        wait_volume_kubernetes_status(client, volume_name, ks)

        delete_and_wait_pod(core_api, p['pod_name'])
        # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`,
        # we don't need to delete bounded pv manually
        delete_and_wait_pvc(core_api, p['pvc_name'])
        wait_delete_pv(core_api, p['pv_name'])
コード例 #30
0
def test_kubernetes_status(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset,
        csi_pv,
        pvc,
        pod):  # NOQA
    statefulset_name = 'kubernetes-status-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    storage_class['reclaimPolicy'] = 'Retain'
    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)
    volume_info = [p['pv_name'] for p in pod_info]

    extra_pod_name = 'extra-pod-using-' + volume_info[1]
    pod['metadata']['name'] = extra_pod_name
    p2 = core_api.read_namespaced_pod(name=pod_info[1]['pod_name'],
                                      namespace='default')
    pod['spec']['nodeName'] = p2.spec.node_name
    pod['spec']['volumes'] = [{
        'name':
        pod['spec']['containers'][0]['volumeMounts'][0]['name'],
        'persistentVolumeClaim': {
            'claimName': pod_info[1]['pvc_name'],
        },
    }]
    create_and_wait_pod(core_api, pod)

    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        volume = client.by_id_volume(volume_name)
        k_status = volume["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert k_status['pvName'] == p['pv_name']
        assert k_status['pvStatus'] == 'Bound'
        assert k_status['namespace'] == 'default'
        assert k_status['pvcName'] == p['pvc_name']
        assert not k_status['lastPVCRefAt']
        assert not k_status['lastPodRefAt']
        if i == 0:
            assert len(workloads) == 1
            assert workloads[0]['podName'] == p['pod_name']
            assert workloads[0]['workloadName'] == statefulset_name
            assert workloads[0]['workloadType'] == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0]['podStatus'] == 'Running':
                    break
            time.sleep(RETRY_INTERVAL)
            volume = client.by_id_volume(volume_name)
            k_status = volume["kubernetesStatus"]
            workloads = k_status['workloadsStatus']
            assert workloads[0]['podStatus'] == 'Running'
        if i == 1:
            assert len(k_status['workloadsStatus']) == 2
            if workloads[0]['podName'] == pod_info[i]['pod_name']:
                assert workloads[1]['podName'] == extra_pod_name
                assert workloads[0]['workloadName'] == statefulset_name
                assert workloads[0]['workloadType'] == 'StatefulSet'
                assert not workloads[1]['workloadName']
                assert not workloads[1]['workloadType']
            else:
                assert workloads[1]['podName'] == pod_info[i]['pod_name']
                assert workloads[0]['podName'] == extra_pod_name
                assert not workloads[0]['workloadName']
                assert not workloads[0]['workloadType']
                assert workloads[1]['workloadName'] == statefulset_name
                assert workloads[1]['workloadType'] == 'StatefulSet'
            for _ in range(RETRY_COUNTS):
                if workloads[0]['podStatus'] == 'Running' and \
                        workloads[1]['podStatus'] == 'Running':
                    break
                time.sleep(RETRY_INTERVAL)
                volume = client.by_id_volume(volume_name)
                k_status = volume["kubernetesStatus"]
                workloads = k_status['workloadsStatus']
                assert len(workloads) == 2
            assert workloads[0]['podStatus'] == 'Running'
            assert workloads[1]['podStatus'] == 'Running'

    ks_list = [{}, {}]
    delete_and_wait_statefulset_only(core_api, statefulset)
    # the extra pod is still using the 2nd volume
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        ks_list[i]['pvName'] = p['pv_name']
        ks_list[i]['pvStatus'] = 'Bound'
        ks_list[i]['namespace'] = 'default'
        ks_list[i]['pvcName'] = p['pvc_name']
        ks_list[i]['lastPVCRefAt'] = ''
        if i == 0:
            ks_list[i]['lastPodRefAt'] = 'not empty'
            ks_list[i]['workloadsStatus'] = [
                {
                    'podName': p['pod_name'],
                    'podStatus': 'Running',
                    'workloadName': statefulset_name,
                    'workloadType': 'StatefulSet',
                },
            ]
        if i == 1:
            ks_list[i]['lastPodRefAt'] = ''
            ks_list[i]['workloadsStatus'] = [{
                'podName': extra_pod_name,
                'podStatus': 'Running',
                'workloadName': '',
                'workloadType': '',
            }]
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted extra_pod, all volumes have no workload
    delete_and_wait_pod(core_api, pod['metadata']['name'])
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        ks_list[i]['lastPodRefAt'] = 'not empty'
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted pvc only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pvc(core_api, p['pvc_name'])
        ks_list[i]['pvStatus'] = 'Released'
        ks_list[i]['lastPVCRefAt'] = 'not empty'
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # deleted pv only.
    for i in range(len(volume_info)):
        p, volume_name = pod_info[i], volume_info[i]
        delete_and_wait_pv(core_api, p['pv_name'])
        ks_list[i]['pvName'] = ''
        ks_list[i]['pvStatus'] = ''
        wait_volume_kubernetes_status(client, volume_name, ks_list[i])

    # reuse that volume
    for p, volume_name in zip(pod_info, volume_info):
        p['pod_name'] = p['pod_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pvc_name'] = p['pvc_name'].replace('kubernetes-status-test',
                                              'kubernetes-status-test-reuse')
        p['pv_name'] = p['pvc_name']

        csi_pv['metadata']['name'] = p['pv_name']
        csi_pv['spec']['csi']['volumeHandle'] = volume_name
        core_api.create_persistent_volume(csi_pv)

        pvc['metadata']['name'] = p['pvc_name']
        pvc['spec']['volumeName'] = p['pv_name']
        core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                           namespace='default')

        pod['metadata']['name'] = p['pod_name']
        pod['spec']['volumes'] = [{
            'name':
            pod['spec']['containers'][0]['volumeMounts'][0]['name'],
            'persistentVolumeClaim': {
                'claimName': p['pvc_name'],
            },
        }]
        create_and_wait_pod(core_api, pod)

        ks = {
            'pvName':
            p['pv_name'],
            'pvStatus':
            'Bound',
            'namespace':
            'default',
            'pvcName':
            p['pvc_name'],
            'lastPVCRefAt':
            '',
            'lastPodRefAt':
            '',
            'workloadsStatus': [
                {
                    'podName': p['pod_name'],
                    'podStatus': 'Running',
                    'workloadName': '',
                    'workloadType': '',
                },
            ],
        }
        wait_volume_kubernetes_status(client, volume_name, ks)

        delete_and_wait_pod(core_api, p['pod_name'])
        # Since persistentVolumeReclaimPolicy of csi_pv is `Delete`,
        # we don't need to delete bounded pv manually
        delete_and_wait_pvc(core_api, p['pvc_name'])
        wait_delete_pv(core_api, p['pv_name'])