示例#1
0
def test_snapshot_rm_basic(dev):  # NOQA
    existings = {}

    snap1 = Snapshot(dev, generate_random_data(existings))
    snap2 = Snapshot(dev, generate_random_data(existings))
    snap3 = Snapshot(dev, generate_random_data(existings))

    info = cmd.snapshot_info()
    assert len(info) == 4
    assert VOLUME_HEAD in info
    assert snap1.name in info
    assert snap2.name in info
    assert snap3.name in info

    cmd.snapshot_rm(snap2.name)
    cmd.snapshot_purge()

    info = cmd.snapshot_info()
    assert len(info) == 3
    assert snap1.name in info
    assert snap3.name in info

    snap3.verify_checksum()
    snap2.verify_data()
    snap1.verify_data()

    cmd.snapshot_revert(snap1.name)
    snap3.refute_data()
    snap2.refute_data()
    snap1.verify_checksum()
示例#2
0
def test_provisioner_io(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that input and output on a StorageClass provisioned
    PersistentVolumeClaim works as expected.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-io-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])
    write_volume_data(core_api, pod_name, test_data)
    delete_and_wait_pod(core_api, pod_name)

    common.wait_for_volume_detached(client, pvc_volume_name)

    pod_name = 'flexvolume-provisioner-io-test-2'
    pod['metadata']['name'] = pod_name
    create_and_wait_pod(core_api, pod)
    resp = read_volume_data(core_api, pod_name)

    assert resp == test_data
def test_statefulset_pod_deletion(core_api, storage_class,
                                  statefulset):  # NOQA
    """
    Test that a StatefulSet can spin up a new Pod with the same data after a
    previous Pod has been deleted.

    This test will only work in a CSI environment. It will automatically be
    disabled in FlexVolume environments.
    """

    statefulset_name = 'statefulset-pod-deletion-test'
    update_test_manifests(statefulset, storage_class, statefulset_name)
    test_pod_name = statefulset_name + '-' + \
        str(randrange(statefulset['spec']['replicas']))
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    write_volume_data(core_api, test_pod_name, test_data)
    # Not using delete_and_wait_pod here because there is the small chance the
    # StatefulSet recreates the Pod quickly enough where the function won't
    # detect that the Pod was deleted, which will time out and throw an error.
    core_api.delete_namespaced_pod(name=test_pod_name,
                                   namespace='default',
                                   body=k8sclient.V1DeleteOptions())
    wait_statefulset(statefulset)
    resp = read_volume_data(core_api, test_pod_name)

    assert resp == test_data
示例#4
0
def csi_io_test(client, core_api, csi_pv, pvc, pod_make, base_image=""):  # NOQA
    pv_name = generate_volume_name()
    pod_name = 'csi-io-test'
    create_and_wait_csi_pod_named_pv(pv_name, pod_name, client, core_api,
                                     csi_pv, pvc, pod_make, base_image, "")

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_volume_data(core_api, pod_name, test_data)
    delete_and_wait_pod(core_api, pod_name)
    common.wait_for_volume_detached(client, csi_pv['metadata']['name'])

    pod_name = 'csi-io-test-2'
    pod = pod_make(name=pod_name)
    pod['spec']['volumes'] = [
        create_pvc_spec(pv_name)
    ]
    csi_pv['metadata']['name'] = pv_name
    csi_pv['spec']['csi']['volumeHandle'] = pv_name
    pvc['metadata']['name'] = pv_name
    pvc['spec']['volumeName'] = pv_name
    update_storageclass_references(CSI_PV_TEST_STORAGE_NAME, csi_pv, pvc)

    create_and_wait_pod(core_api, pod)

    resp = read_volume_data(core_api, pod_name)
    assert resp == test_data
示例#5
0
def csi_backup_test(client, core_api, csi_pv, pvc, pod_make, base_image=""):  # NOQA
    pod_name = 'csi-backup-test'
    create_and_wait_csi_pod(pod_name, client, core_api, csi_pv, pvc, pod_make,
                            base_image, "")
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)
    # test backupTarget for multiple settings
    backupstores = common.get_backupstore_url()
    i = 1
    for backupstore in backupstores:
        if common.is_backupTarget_s3(backupstore):
            backupsettings = backupstore.split("$")
            setting = client.update(setting, value=backupsettings[0])
            assert setting["value"] == backupsettings[0]

            credential = client.by_id_setting(
                    common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)
            credential = client.update(credential, value=backupsettings[1])
            assert credential["value"] == backupsettings[1]
        else:
            setting = client.update(setting, value=backupstore)
            assert setting["value"] == backupstore
            credential = client.by_id_setting(
                    common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)
            credential = client.update(credential, value="")
            assert credential["value"] == ""

        backupstore_test(client, core_api, csi_pv, pvc, pod_make, pod_name,
                         base_image, test_data, i)
        i += 1
示例#6
0
def test_flexvolume_io(client, core_api, flexvolume, pod):  # NOQA
    """
    Test that input and output on a statically defined volume works as
    expected.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    pod_name = 'flexvolume-io-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['containers'][0]['volumeMounts'][0]['name'] = \
        flexvolume['name']
    pod['spec']['volumes'] = [flexvolume]
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_and_wait_pod(core_api, pod)

    common.write_volume_data(core_api, pod_name, test_data)
    delete_and_wait_pod(core_api, pod_name)
    wait_for_volume_detached(client, flexvolume["name"])

    pod_name = 'volume-driver-io-test-2'
    pod['metadata']['name'] = pod_name
    create_and_wait_pod(core_api, pod)

    resp = read_volume_data(core_api, pod_name)

    assert resp == test_data
def test_statefulset_recurring_backup(set_random_backupstore, client, core_api, storage_class, statefulset):  # NOQA
    """
    Scenario : test recurring backups on StatefulSets

    Given 1 default backup recurring jobs created.

    When create a statefulset.
    And write data to every statefulset pod.
    And wait for 5 minutes.

    Then 2 snapshots created for every statefulset pod.
    """

    # backup every minute
    recurring_jobs = {
        "backup": {
            "task": "backup",
            "groups": ["default"],
            "cron": "* * * * *",
            "retain": 2,
            "concurrency": 2,
            "labels": {},
        },
    }
    create_recurring_jobs(client, recurring_jobs)
    check_recurring_jobs(client, recurring_jobs)

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot.removed is False:
                count += 1

        # one backup + volume-head
        assert count == 2
def test_provisioner_io(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that input and output on a StorageClass provisioned
    PersistentVolumeClaim works as expected.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-io-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [
        create_pvc_spec(pvc['metadata']['name'])
    ]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])
    write_volume_data(core_api, pod_name, test_data)
    delete_and_wait_pod(core_api, pod_name)

    common.wait_for_volume_detached(client, pvc_volume_name)

    pod_name = 'flexvolume-provisioner-io-test-2'
    pod['metadata']['name'] = pod_name
    create_and_wait_pod(core_api, pod)
    resp = read_volume_data(core_api, pod_name)

    assert resp == test_data
示例#9
0
def csi_io_test(client, core_api, csi_pv, pvc, pod_make, base_image=""):  # NOQA
    pv_name = generate_volume_name()
    pod_name = 'csi-io-test'
    create_and_wait_csi_pod_named_pv(pv_name, pod_name, client, core_api,
                                     csi_pv, pvc, pod_make, base_image, "")

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data)
    delete_and_wait_pod(core_api, pod_name)
    common.wait_for_volume_detached(client, csi_pv['metadata']['name'])

    pod_name = 'csi-io-test-2'
    pod = pod_make(name=pod_name)
    pod['spec']['volumes'] = [
        create_pvc_spec(pv_name)
    ]
    csi_pv['metadata']['name'] = pv_name
    csi_pv['spec']['csi']['volumeHandle'] = pv_name
    pvc['metadata']['name'] = pv_name
    pvc['spec']['volumeName'] = pv_name
    update_storageclass_references(CSI_PV_TEST_STORAGE_NAME, csi_pv, pvc)

    create_and_wait_pod(core_api, pod)

    resp = read_volume_data(core_api, pod_name)
    assert resp == test_data
示例#10
0
def test_statefulset_pod_deletion(core_api, storage_class,
                                  statefulset):  # NOQA
    """
    Test that a StatefulSet can spin up a new Pod with the same data after a
    previous Pod has been deleted.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.
    3. Write some data to one of the pod.
    4. Delete that pod.
    5. Wait for the StatefulSet to recreate the pod
    6. Verify the data in the pod.
    """

    statefulset_name = 'statefulset-pod-deletion-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    test_pod_name = statefulset_name + '-' + \
        str(randrange(statefulset['spec']['replicas']))
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    write_pod_volume_data(core_api, test_pod_name, test_data)
    # Not using delete_and_wait_pod here because there is the small chance the
    # StatefulSet recreates the Pod quickly enough where the function won't
    # detect that the Pod was deleted, which will time out and throw an error.
    core_api.delete_namespaced_pod(name=test_pod_name,
                                   namespace='default',
                                   body=k8sclient.V1DeleteOptions())
    wait_statefulset(statefulset)
    resp = read_volume_data(core_api, test_pod_name)

    assert resp == test_data
示例#11
0
def csi_backup_test(client, core_api, csi_pv, pvc, pod_make, base_image=""):  # NOQA
    pod_name = 'csi-backup-test'
    create_and_wait_csi_pod(pod_name, client, core_api, csi_pv, pvc, pod_make,
                            base_image, "")
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)
    # test backupTarget for multiple settings
    backupstores = common.get_backupstore_url()
    i = 1
    for backupstore in backupstores:
        if common.is_backupTarget_s3(backupstore):
            backupsettings = backupstore.split("$")
            setting = client.update(setting, value=backupsettings[0])
            assert setting.value == backupsettings[0]

            credential = client.by_id_setting(
                    common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)
            credential = client.update(credential, value=backupsettings[1])
            assert credential.value == backupsettings[1]
        else:
            setting = client.update(setting, value=backupstore)
            assert setting.value == backupstore
            credential = client.by_id_setting(
                    common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)
            credential = client.update(credential, value="")
            assert credential.value == ""

        backupstore_test(client, core_api, csi_pv, pvc, pod_make, pod_name,
                         base_image, test_data, i)
        i += 1
示例#12
0
def test_statefulset_pod_deletion(core_api, storage_class, statefulset):  # NOQA
    """
    Test that a StatefulSet can spin up a new Pod with the same data after a
    previous Pod has been deleted.

    This test will only work in a CSI environment. It will automatically be
    disabled in FlexVolume environments.
    """

    statefulset_name = 'statefulset-pod-deletion-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    test_pod_name = statefulset_name + '-' + \
        str(randrange(statefulset['spec']['replicas']))
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    write_volume_data(core_api, test_pod_name, test_data)
    # Not using delete_and_wait_pod here because there is the small chance the
    # StatefulSet recreates the Pod quickly enough where the function won't
    # detect that the Pod was deleted, which will time out and throw an error.
    core_api.delete_namespaced_pod(name=test_pod_name, namespace='default',
                                   body=k8sclient.V1DeleteOptions())
    wait_statefulset(statefulset)
    resp = read_volume_data(core_api, test_pod_name)

    assert resp == test_data
示例#13
0
def test_csi_io(client, core_api, csi_pv, pvc, pod):  # NOQA
    """
    Test that input and output on a statically defined CSI volume works as
    expected.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """
    pod_name = 'csi-io-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['volumeName'] = csi_pv['metadata']['name']
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_pv_storage(core_api, client, csi_pv, pvc)
    create_and_wait_pod(core_api, pod)

    write_volume_data(core_api, pod_name, test_data)
    delete_and_wait_pod(core_api, pod_name)
    common.wait_for_volume_detached(client, csi_pv['metadata']['name'])

    pod_name = 'csi-io-test-2'
    pod['metadata']['name'] = pod_name
    create_and_wait_pod(core_api, pod)

    resp = read_volume_data(core_api, pod_name)

    assert resp == test_data
示例#14
0
def volume_rw_test(dev):
    assert volume_valid(dev)
    w_data = generate_random_data(VOLUME_RWTEST_SIZE)
    l_data = len(w_data)
    spos_data = generate_random_pos(VOLUME_RWTEST_SIZE)
    volume_write(dev, spos_data, w_data)
    r_data = volume_read(dev, spos_data, l_data)
    assert r_data == w_data
示例#15
0
def test_statefulset_recurring_backup(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset):  # NOQA
    """
    Test that recurring backups on StatefulSets work properly.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.
    3. Write some data to every pod
    4. Schedule recurring jobs for volumes using Longhorn API
    5. Wait for 5 minutes
    6. Verify the snapshots created by the recurring jobs.
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    # backup every minute
    job_backup = {
        "name": "backup",
        "cron": "* * * * *",
        "task": "backup",
        "retain": 2
    }
    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_pod_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(150)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot.removed is False:
                count += 1

        # one backups + volume-head
        assert count == 2
示例#16
0
def snapshot_revert_test(dev, address, engine_name):  # NOQA
    existings = {}

    snap1 = Snapshot(dev, generate_random_data(existings), address)
    snap2 = Snapshot(dev, generate_random_data(existings), address)
    snap3 = Snapshot(dev, generate_random_data(existings), address)

    snapList = cmd.snapshot_ls(address)
    assert snap1.name in snapList
    assert snap2.name in snapList
    assert snap3.name in snapList

    snapshot_revert_with_frontend(address, engine_name, snap2.name)
    snap3.refute_data()
    snap2.verify_checksum()
    snap1.verify_data()

    snapshot_revert_with_frontend(address, engine_name, snap1.name)
    snap3.refute_data()
    snap2.refute_data()
    snap1.verify_checksum()
示例#17
0
def test_snapshot_revert(dev):  # NOQA
    existings = {}

    snap1 = Snapshot(dev, generate_random_data(existings))
    snap2 = Snapshot(dev, generate_random_data(existings))
    snap3 = Snapshot(dev, generate_random_data(existings))

    snapList = cmd.snapshot_ls()
    assert snap1.name in snapList
    assert snap2.name in snapList
    assert snap3.name in snapList

    cmd.snapshot_revert(snap2.name)
    snap3.refute_data()
    snap2.verify_checksum()
    snap1.verify_data()

    cmd.snapshot_revert(snap1.name)
    snap3.refute_data()
    snap2.refute_data()
    snap1.verify_checksum()
示例#18
0
def csi_backup_test(client, core_api, csi_pv, pvc, pod_make, backing_image=""):  # NOQA
    pod_name = 'csi-backup-test'
    vol_name = create_and_wait_csi_pod(
        pod_name, client, core_api, csi_pv, pvc, pod_make, backing_image, "")
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    backupstore_test(client, core_api, csi_pv, pvc, pod_make, pod_name,
                     vol_name, backing_image, test_data)

    delete_and_wait_pod(core_api, pod_name)
    delete_and_wait_pvc(core_api, vol_name)
    delete_and_wait_pv(core_api, vol_name)
示例#19
0
def test_snapshot_rm_basic(
        grpc_controller,  # NOQA
        grpc_replica1,
        grpc_replica2):  # NOQA
    address = grpc_controller.address

    dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)

    existings = {}

    snap1 = Snapshot(dev, generate_random_data(existings), address)
    snap2 = Snapshot(dev, generate_random_data(existings), address)
    snap3 = Snapshot(dev, generate_random_data(existings), address)

    info = cmd.snapshot_info(address)
    assert len(info) == 4
    assert VOLUME_HEAD in info
    assert snap1.name in info
    assert snap2.name in info
    assert snap3.name in info

    cmd.snapshot_rm(address, snap2.name)
    cmd.snapshot_purge(address)
    wait_for_purge_completion(address)

    info = cmd.snapshot_info(address)
    assert len(info) == 3
    assert snap1.name in info
    assert snap3.name in info

    snap3.verify_checksum()
    snap2.verify_data()
    snap1.verify_data()

    snapshot_revert_with_frontend(address, ENGINE_NAME, snap1.name)
    snap3.refute_data()
    snap2.refute_data()
    snap1.verify_checksum()
示例#20
0
def delete_data(k8s_api_client, pod_name):
    file_name = 'data-' + pod_name + '.bin'
    test_data = generate_random_data(0)

    write_pod_volume_data(k8s_api_client,
                          pod_name,
                          test_data,
                          filename=file_name)

    volume_data = read_volume_data(k8s_api_client,
                                   pod_name,
                                   filename=file_name)

    assert volume_data == ""
示例#21
0
def test_statefulset_recurring_backup(
        client,
        core_api,
        storage_class,  # NOQA
        statefulset):  # NOQA
    """
    Test that recurring backups on StatefulSets work properly.
    """

    statefulset_name = 'statefulset-backup-test'
    update_test_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    # backup every minute
    job_backup = {
        "name": "backup",
        "cron": "* * * * *",
        "task": "backup",
        "retain": 2
    }
    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(300)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot['removed'] is False:
                count += 1

        # two backups + volume-head
        assert count == 3
示例#22
0
def flexvolume_io_test(client, core_api, flexvolume, pod):  # NOQA
    pod_name = 'flexvolume-io-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['containers'][0]['volumeMounts'][0]['name'] = \
        flexvolume['name']
    pod['spec']['volumes'] = [flexvolume]
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_and_wait_pod(core_api, pod)

    common.write_volume_data(core_api, pod_name, test_data)
    delete_and_wait_pod(core_api, pod_name)
    wait_for_volume_detached(client, flexvolume["name"])

    pod_name = 'volume-driver-io-test-2'
    pod['metadata']['name'] = pod_name
    create_and_wait_pod(core_api, pod)

    resp = read_volume_data(core_api, pod_name)

    assert resp == test_data
示例#23
0
def flexvolume_io_test(client, core_api, flexvolume, pod):  # NOQA
    pod_name = 'flexvolume-io-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['containers'][0]['volumeMounts'][0]['name'] = \
        flexvolume['name']
    pod['spec']['volumes'] = [
        flexvolume
    ]
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_and_wait_pod(core_api, pod)

    common.write_volume_data(core_api, pod_name, test_data)
    delete_and_wait_pod(core_api, pod_name)
    wait_for_volume_detached(client, flexvolume["name"])

    pod_name = 'volume-driver-io-test-2'
    pod['metadata']['name'] = pod_name
    create_and_wait_pod(core_api, pod)

    resp = read_volume_data(core_api, pod_name)

    assert resp == test_data
示例#24
0
def test_statefulset_recurring_backup(client, core_api, storage_class,  # NOQA
                                      statefulset):  # NOQA
    """
    Test that recurring backups on StatefulSets work properly.
    """

    statefulset_name = 'statefulset-backup-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    # backup every minute
    job_backup = {"name": "backup", "cron": "* * * * *",
                  "task": "backup", "retain": 2}
    pod_data = get_statefulset_pod_info(core_api, statefulset)
    for pod in pod_data:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        write_volume_data(core_api, pod['pod_name'], pod['data'])
        volume.recurringUpdate(jobs=[job_backup])

    time.sleep(300)

    for pod in pod_data:
        volume = client.by_id_volume(pod['pv_name'])
        snapshots = volume.snapshotList()
        count = 0
        for snapshot in snapshots:
            if snapshot['removed'] is False:
                count += 1

        # two backups + volume-head
        assert count == 3
示例#25
0
def test_xfs_pv(client, core_api, pod_manifest): # NOQA
    """
    Test create PV with new XFS filesystem

    1. Create a volume
    2. Create a PV for the existing volume, specify `xfs` as filesystem
    3. Create PVC and Pod
    4. Make sure Pod is running.
    5. Write data into the pod and read back for validation.

    Note: The volume will be formatted to XFS filesystem by Kubernetes in this
    case.
    """
    volume_name = generate_volume_name()

    volume = create_and_check_volume(client, volume_name)

    create_pv_for_volume(client, core_api, volume, volume_name, "xfs")

    create_pvc_for_volume(client, core_api, volume, volume_name)

    pod_manifest['spec']['volumes'] = [{
        "name": "pod-data",
        "persistentVolumeClaim": {
            "claimName": volume_name
        }
    }]

    pod_name = pod_manifest['metadata']['name']

    create_and_wait_pod(core_api, pod_manifest)

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data)
    resp = read_volume_data(core_api, pod_name)
    assert resp == test_data
示例#26
0
def create_and_test_backups(api, cli, pod_info):
    """
    Create backups for all Pods in a StatefulSet and tests that all the backups
    have the correct attributes.

    Args:
        api: An instance of CoreV1Api.
        cli: A Longhorn client instance.
        pod_info: A List of Pods with names and volume information. This List
            can be generated using the get_statefulset_pod_info function
            located in common.py.
    """
    for pod in pod_info:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

        # Create backup.
        volume = cli.by_id_volume(pod['pv_name'])
        volume.snapshotCreate()
        write_volume_data(api, pod['pod_name'], pod['data'])
        pod['backup_snapshot'] = volume.snapshotCreate()
        volume.snapshotCreate()
        volume.snapshotBackup(name=pod['backup_snapshot']['name'])

        # Wait for backup to appear.
        found = False
        for i in range(DEFAULT_BACKUP_TIMEOUT):
            backup_volumes = cli.list_backupVolume()
            for bv in backup_volumes:
                if bv['name'] == pod['pv_name']:
                    found = True
                    break
            if found:
                break
            time.sleep(DEFAULT_POD_INTERVAL)
        assert found

        found = False
        for i in range(DEFAULT_BACKUP_TIMEOUT):
            backups = bv.backupList()
            for b in backups:
                if b['snapshotName'] == pod['backup_snapshot']['name']:
                    found = True
                    break
            if found:
                break
            time.sleep(DEFAULT_POD_INTERVAL)
        assert found

        # Make sure backup has the correct attributes.
        new_b = bv.backupGet(name=b["name"])
        assert new_b["name"] == b["name"]
        assert new_b["url"] == b["url"]
        assert new_b["snapshotName"] == b["snapshotName"]
        assert new_b["snapshotCreated"] == b["snapshotCreated"]
        assert new_b["created"] == b["created"]
        assert new_b["volumeName"] == b["volumeName"]
        assert new_b["volumeSize"] == b["volumeSize"]
        assert new_b["volumeCreated"] == b["volumeCreated"]

        # This backup has the url attribute we need to restore from backup.
        pod['backup_snapshot'] = b
示例#27
0
def create_and_test_backups(api, cli, pod_info):
    """
    Create backups for all Pods in a StatefulSet and tests that all the backups
    have the correct attributes.

    Args:
        api: An instance of CoreV1Api.
        cli: A Longhorn client instance.
        pod_info: A List of Pods with names and volume information. This List
            can be generated using the get_statefulset_pod_info function
            located in common.py.
    """
    for pod in pod_info:
        pod['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        pod['backup_snapshot'] = ''

        # Create backup.
        volume_name = pod['pv_name']
        volume = cli.by_id_volume(pod['pv_name'])
        create_snapshot(cli, volume_name)
        write_pod_volume_data(api, pod['pod_name'], pod['data'])
        pod['backup_snapshot'] = create_snapshot(cli, volume_name)
        create_snapshot(cli, volume_name)
        volume.snapshotBackup(name=pod['backup_snapshot']['name'])

        # Wait for backup to appear.
        found = False
        for i in range(DEFAULT_BACKUP_TIMEOUT):
            backup_volumes = cli.list_backupVolume()
            for bv in backup_volumes:
                if bv.name == pod['pv_name']:
                    found = True
                    break
            if found:
                break
            time.sleep(DEFAULT_POD_INTERVAL)
        assert found

        found = False
        for i in range(DEFAULT_BACKUP_TIMEOUT):
            backups = bv.backupList().data
            for b in backups:
                if b['snapshotName'] == pod['backup_snapshot']['name']:
                    found = True
                    break
            if found:
                break
            time.sleep(DEFAULT_POD_INTERVAL)
        assert found

        # Make sure backup has the correct attributes.
        new_b = bv.backupGet(name=b.name)
        assert new_b.name == b.name
        assert new_b.url == b.url
        assert new_b.snapshotName == b.snapshotName
        assert new_b.snapshotCreated == b.snapshotCreated
        assert new_b.created == b.created
        assert new_b.volumeName == b.volumeName
        assert new_b.volumeSize == b.volumeSize
        assert new_b.volumeCreated == b.volumeCreated

        # This backup has the url attribute we need to restore from backup.
        pod['backup_snapshot'] = b
示例#28
0
def test_rwx_multi_statefulset_with_same_pvc(core_api, pvc, statefulset,
                                             pod):  # NOQA
    """
    Test writing of data into a volume from multiple pods using same PVC

    1. Create a volume with 'accessMode' rwx.
    2. Create a PV and a PVC with access mode 'readwritemany' and attach to the
       volume.
    3. Deploy a StatefulSet of 2 pods with the existing PVC above created.
    4. Wait for both pods to come up.
    5. Create a pod with the existing PVC above created.
    6. Wait for StatefulSet to come up healthy.
    7. Write data all three pods and compute md5sum.
    8. Check the data md5sum in the share manager pod.
    """
    pvc_name = 'pvc-multi-pods-test'
    statefulset_name = 'statefulset-rwx-same-pvc-test'
    pod_name = 'pod-rwx-same-pvc-test'

    pvc['metadata']['name'] = pvc_name
    pvc['spec']['storageClassName'] = 'longhorn'
    pvc['spec']['accessModes'] = ['ReadWriteMany']

    core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                       namespace='default')

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['template']['spec']['volumes'] = \
        [create_pvc_spec(pvc_name)]
    del statefulset['spec']['volumeClaimTemplates']

    create_and_wait_statefulset(statefulset)

    pv_name = get_volume_name(core_api, pvc_name)
    share_manager_name = 'share-manager-' + pv_name

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api,
                          statefulset_name + '-0',
                          test_data,
                          filename='test1')
    assert test_data == read_volume_data(core_api,
                                         statefulset_name + '-1',
                                         filename='test1')

    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc_name)]
    create_and_wait_pod(core_api, pod)

    assert test_data == read_volume_data(core_api, pod_name, filename='test1')

    test_data_2 = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data_2, filename='test2')

    command1 = 'cat /export' + '/' + pv_name + '/' + 'test1'
    command2 = 'cat /export' + '/' + pv_name + '/' + 'test2'

    assert test_data == exec_command_in_pod(core_api, command1,
                                            share_manager_name,
                                            LONGHORN_NAMESPACE)
    assert test_data_2 == exec_command_in_pod(core_api, command2,
                                              share_manager_name,
                                              LONGHORN_NAMESPACE)
示例#29
0
def test_rwx_deployment_with_multi_pods(core_api, pvc,
                                        make_deployment_with_pvc):  # NOQA
    """
    Test deployment of 2 pods with same PVC.

    1. Create a volume with 'accessMode' rwx.
    2. Create a PV and a PVC with access mode 'readwritemany' and attach to the
       volume.
    3. Create a deployment of 2 pods with PVC created
    4. Wait for 2 pods to come up healthy.
    5. Write data in both pods and compute md5sum.
    6. Check the data md5sum in the share manager pod.
    """

    pvc_name = 'pvc-deployment-multi-pods-test'
    pvc['metadata']['name'] = pvc_name
    pvc['spec']['storageClassName'] = 'longhorn'
    pvc['spec']['accessModes'] = ['ReadWriteMany']

    core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                       namespace='default')

    deployment = make_deployment_with_pvc('deployment-multi-pods-test',
                                          pvc_name,
                                          replicas=2)
    apps_api = get_apps_api_client()
    create_and_wait_deployment(apps_api, deployment)

    pv_name = get_volume_name(core_api, pvc_name)
    share_manager_name = 'share-manager-' + pv_name
    deployment_label_selector = "name=" + \
                                deployment["metadata"]["labels"]["name"]

    deployment_pod_list = \
        core_api.list_namespaced_pod(namespace="default",
                                     label_selector=deployment_label_selector)

    assert deployment_pod_list.items.__len__() == 2

    pod_name_1 = deployment_pod_list.items[0].metadata.name
    test_data_1 = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name_1, test_data_1, filename='test1')

    pod_name_2 = deployment_pod_list.items[1].metadata.name
    command = 'cat /data/test1'
    pod_data_2 = exec_command_in_pod(core_api, command, pod_name_2, 'default')

    assert test_data_1 == pod_data_2

    test_data_2 = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name_2, test_data_2, filename='test2')

    command = 'cat /export' + '/' + pv_name + '/' + 'test1'
    share_manager_data_1 = exec_command_in_pod(core_api, command,
                                               share_manager_name,
                                               LONGHORN_NAMESPACE)
    assert test_data_1 == share_manager_data_1

    command = 'cat /export' + '/' + pv_name + '/' + 'test2'
    share_manager_data_2 = exec_command_in_pod(core_api, command,
                                               share_manager_name,
                                               LONGHORN_NAMESPACE)

    assert test_data_2 == share_manager_data_2
示例#30
0
def test_rwx_delete_share_manager_pod(core_api, statefulset):  # NOQA
    """
    Test moving of Share manager pod from one node to another.

    1. Create a StatefulSet of 1 pod with VolumeClaimTemplate where accessMode
       is 'RWX'.
    2. Wait for StatefulSet to come up healthy.
    3. Write data and compute md5sum.
    4. Delete the share manager pod.
    5. Wait for a new pod to be created and volume getting attached.
    6. Check the data md5sum in statefulSet.
    7. Write more data to it and compute md5sum.
    8. Check the data md5sum in share manager volume.
    """

    statefulset_name = 'statefulset-delete-share-manager-pods-test'

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['replicas'] = 1
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\
        = 'longhorn'
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \
        = ['ReadWriteMany']

    create_and_wait_statefulset(statefulset)

    pod_name = statefulset_name + '-' + '0'
    pvc_name = \
        statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name'] \
        + '-' + statefulset_name + '-0'
    pv_name = get_volume_name(core_api, pvc_name)
    share_manager_name = 'share-manager-' + pv_name

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data, filename='test1')

    delete_and_wait_pod(core_api,
                        share_manager_name,
                        namespace=LONGHORN_NAMESPACE)

    target_pod = core_api.read_namespaced_pod(name=pod_name,
                                              namespace='default')
    wait_delete_pod(core_api, target_pod.metadata.uid)
    wait_for_pod_remount(core_api, pod_name)

    test_data_2 = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data_2, filename='test2')

    command1 = 'cat /export/' + pv_name + '/test1'
    share_manager_data_1 = exec_command_in_pod(core_api, command1,
                                               share_manager_name,
                                               LONGHORN_NAMESPACE)
    assert test_data == share_manager_data_1

    command2 = 'cat /export/' + pv_name + '/test2'
    share_manager_data_2 = exec_command_in_pod(core_api, command2,
                                               share_manager_name,
                                               LONGHORN_NAMESPACE)
    assert test_data_2 == share_manager_data_2
示例#31
0
def test_rwx_with_statefulset_multi_pods(core_api, statefulset):  # NOQA
    """
    Test creation of share manager pod and rwx volumes from 2 pods.

    1. Create a StatefulSet of 2 pods with VolumeClaimTemplate where accessMode
       is 'RWX'.
    2. Wait for both pods to come up running.
    3. Verify there are two share manager pods created in the longhorn
       namespace and they have the directory with the PV name in the
       path `/export`
    4. Write data in both pods and compute md5sum.
    5. Compare md5sum of the data with the data written the share manager.
    """

    statefulset_name = 'statefulset-rwx-multi-pods-test'
    share_manager_name = []
    volumes_name = []

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\
        = 'longhorn'
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \
        = ['ReadWriteMany']

    create_and_wait_statefulset(statefulset)

    for i in range(2):
        pvc_name = \
            statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name']\
            + '-' + statefulset_name + '-' + str(i)
        pv_name = get_volume_name(core_api, pvc_name)

        assert pv_name is not None

        volumes_name.append(pv_name)
        share_manager_name.append('share-manager-' + pv_name)

        check_pod_existence(core_api,
                            share_manager_name[i],
                            namespace=LONGHORN_NAMESPACE)

    command = "ls /export | grep -i 'pvc' | wc -l"

    assert exec_command_in_pod(core_api, command, share_manager_name[0],
                               LONGHORN_NAMESPACE) == '1'
    assert exec_command_in_pod(core_api, command, share_manager_name[1],
                               LONGHORN_NAMESPACE) == '1'

    md5sum_pod = []
    for i in range(2):
        test_pod_name = statefulset_name + '-' + str(i)
        test_data = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api, test_pod_name, test_data)
        md5sum_pod.append(test_data)

    for i in range(2):
        command = 'cat /export' + '/' + volumes_name[i] + '/' + 'test'
        pod_data = exec_command_in_pod(core_api, command,
                                       share_manager_name[i],
                                       LONGHORN_NAMESPACE)

        assert pod_data == md5sum_pod[i]
示例#32
0
def test_rwx_statefulset_scale_down_up(core_api, statefulset):  # NOQA
    """
    Test Scaling up and down of pods attached to rwx volume.

    1. Create a StatefulSet of 2 pods with VolumeClaimTemplate where accessMode
       is 'RWX'.
    2. Wait for StatefulSet pods to come up healthy.
    3. Write data and compute md5sum in the both pods.
    4. Delete the pods.
    5. Wait for the pods to be terminated.
    6. Verify the share manager pods are no longer available and the volume is
       detached.
    6. Recreate the pods
    7. Wait for new pods to come up.
    8. Check the data md5sum in new pods.
    """

    statefulset_name = 'statefulset-rwx-scale-down-up-test'
    share_manager_name = []

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\
        = 'longhorn'
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \
        = ['ReadWriteMany']

    create_and_wait_statefulset(statefulset)

    for i in range(2):
        pvc_name = \
            statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name']\
            + '-' + statefulset_name + '-' + str(i)
        pv_name = get_volume_name(core_api, pvc_name)

        assert pv_name is not None

        share_manager_name.append('share-manager-' + pv_name)

        check_pod_existence(core_api,
                            share_manager_name[i],
                            namespace=LONGHORN_NAMESPACE)

    md5sum_pod = []
    for i in range(2):
        test_pod_name = statefulset_name + '-' + str(i)
        test_data = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api, test_pod_name, test_data)
        md5sum_pod.append(test_data)

    statefulset['spec']['replicas'] = replicas = 0
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={'spec': {
            'replicas': replicas
        }})
    for i in range(DEFAULT_STATEFULSET_TIMEOUT):
        s_set = apps_api.read_namespaced_stateful_set(
            name=statefulset['metadata']['name'], namespace='default')
        if s_set.status.ready_replicas == replicas or \
                (replicas == 0 and not s_set.status.ready_replicas):
            break
        time.sleep(DEFAULT_STATEFULSET_INTERVAL)

    pods = core_api.list_namespaced_pod(namespace=LONGHORN_NAMESPACE)

    found = False
    for item in pods.items:
        if item.metadata.name == share_manager_name[0] or \
                item.metadata.name == share_manager_name[1]:
            found = True
            break

    assert not found

    statefulset['spec']['replicas'] = replicas = 2
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={'spec': {
            'replicas': replicas
        }})
    wait_statefulset(statefulset)

    for i in range(2):
        test_pod_name = statefulset_name + '-' + str(i)
        command = 'cat /data/test'
        pod_data = exec_command_in_pod(core_api, command, test_pod_name,
                                       'default')

        assert pod_data == md5sum_pod[i]
示例#33
0
def test_csi_block_volume(client, core_api, storage_class, pvc, pod_manifest):  # NOQA
    """
    Test CSI feature: raw block volume

    1. Create a PVC with `volumeMode = Block`
    2. Create a pod using the PVC to dynamic provision a volume
    3. Verify the pod creation
    4. Generate `test_data` and write to the block volume directly in the pod
    5. Read the data back for validation
    6. Delete the pod and create `pod2` to use the same volume
    7. Validate the data in `pod2` is consistent with `test_data`
    """
    pod_name = 'csi-block-volume-test'
    pvc_name = pod_name + "-pvc"
    device_path = "/dev/longhorn/longhorn-test-blk"

    storage_class['reclaimPolicy'] = 'Retain'
    pvc['metadata']['name'] = pvc_name
    pvc['spec']['volumeMode'] = 'Block'
    pvc['spec']['storageClassName'] = storage_class['metadata']['name']
    pvc['spec']['resources'] = {
        'requests': {
            'storage': size_to_string(1 * Gi)
        }
    }
    pod_manifest['metadata']['name'] = pod_name
    pod_manifest['spec']['volumes'] = [{
        'name': 'longhorn-blk',
        'persistentVolumeClaim': {
            'claimName': pvc_name,
        },
    }]
    pod_manifest['spec']['containers'][0]['volumeMounts'] = []
    pod_manifest['spec']['containers'][0]['volumeDevices'] = [
        {'name': 'longhorn-blk', 'devicePath': device_path}
    ]

    create_storage_class(storage_class)
    create_pvc(pvc)
    pv_name = wait_and_get_pv_for_pvc(core_api, pvc_name).metadata.name
    create_and_wait_pod(core_api, pod_manifest)

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    test_offset = random.randint(0, VOLUME_RWTEST_SIZE)
    write_pod_block_volume_data(
        core_api, pod_name, test_data, test_offset, device_path)
    returned_data = read_pod_block_volume_data(
        core_api, pod_name, len(test_data), test_offset, device_path
    )
    assert test_data == returned_data
    md5_sum = get_pod_data_md5sum(
        core_api, pod_name, device_path)

    delete_and_wait_pod(core_api, pod_name)
    common.wait_for_volume_detached(client, pv_name)

    pod_name_2 = 'csi-block-volume-test-reuse'
    pod_manifest['metadata']['name'] = pod_name_2
    create_and_wait_pod(core_api, pod_manifest)

    returned_data = read_pod_block_volume_data(
        core_api, pod_name_2, len(test_data), test_offset, device_path
    )
    assert test_data == returned_data
    md5_sum_2 = get_pod_data_md5sum(
        core_api, pod_name_2, device_path)
    assert md5_sum == md5_sum_2

    delete_and_wait_pod(core_api, pod_name_2)
    delete_and_wait_pvc(core_api, pvc_name)
    delete_and_wait_pv(core_api, pv_name)
示例#34
0
def test_csi_offline_expansion(client, core_api, storage_class, pvc, pod_manifest):  # NOQA
    """
    Test CSI feature: offline expansion

    1. Create a new `storage_class` with `allowVolumeExpansion` set
    2. Create PVC and Pod with dynamic provisioned volume from the StorageClass
    3. Generate `test_data` and write to the pod
    4. Delete the pod
    5. Update pvc.spec.resources to expand the volume
    6. Verify the volume expansion done using Longhorn API
    7. Create a new pod and validate the volume content
    """
    create_storage_class(storage_class)

    pod_name = 'csi-offline-expand-volume-test'
    pvc_name = pod_name + "-pvc"
    pvc['metadata']['name'] = pvc_name
    pvc['spec']['storageClassName'] = storage_class['metadata']['name']
    create_pvc(pvc)

    pod_manifest['metadata']['name'] = pod_name
    pod_manifest['spec']['volumes'] = [{
        'name':
            pod_manifest['spec']['containers'][0]['volumeMounts'][0]['name'],
        'persistentVolumeClaim': {'claimName': pvc_name},
    }]
    create_and_wait_pod(core_api, pod_manifest)
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data)
    delete_and_wait_pod(core_api, pod_name)

    pv = wait_and_get_pv_for_pvc(core_api, pvc_name)
    assert pv.status.phase == "Bound"
    volume_name = pv.spec.csi.volume_handle
    wait_for_volume_detached(client, volume_name)

    pvc['spec']['resources'] = {
        'requests': {
            'storage': size_to_string(EXPANDED_VOLUME_SIZE*Gi)
        }
    }
    expand_and_wait_for_pvc(core_api, pvc)
    wait_for_volume_expansion(client, volume_name)
    volume = client.by_id_volume(volume_name)
    assert volume.state == "detached"
    assert volume.size == str(EXPANDED_VOLUME_SIZE*Gi)

    pod_manifest['metadata']['name'] = pod_name
    pod_manifest['spec']['volumes'] = [{
        'name':
            pod_manifest['spec']['containers'][0]['volumeMounts'][0]['name'],
        'persistentVolumeClaim': {'claimName': pvc_name},
    }]
    create_and_wait_pod(core_api, pod_manifest)

    resp = read_volume_data(core_api, pod_name)
    assert resp == test_data

    volume = client.by_id_volume(volume_name)
    engine = get_volume_engine(volume)
    assert volume.size == str(EXPANDED_VOLUME_SIZE*Gi)
    assert volume.size == engine.size
示例#35
0
def test_offline_node_with_attached_volume_and_pod(
        client, core_api, volume_name, make_deployment_with_pvc,
        reset_cluster_ready_status):  # NOQA
    """
    Test offline node with attached volume and pod

    1. Create PV/PVC/Deployment manifest.
    2. Update deployment's tolerations to 20 seconds to speed up test
    3. Update deployment's node affinity rule to avoid the current node
    4. Create volume, PV/PVC and deployment.
    5. Find the pod in the deployment and write `test_data` into it
    6. Shutdown the node pod is running on
    7. Wait for deployment to delete the pod
        1. Deployment cannot delete the pod here because kubelet doesn't
        response
    8. Force delete the terminating pod
    9. Wait for the new pod to be created and the volume attached
    10. Check `test_data` in the new pod
    """
    toleration_seconds = 20

    apps_api = get_apps_api_client()
    cloudprovider = detect_cloudprovider()

    volume_name = generate_volume_name()
    pv_name = volume_name + "-pv"
    pvc_name = volume_name + "-pvc"
    deployment_name = volume_name + "-dep"

    longhorn_test_node_name = get_self_host_id()

    deployment_manifest = make_deployment_with_pvc(deployment_name, pvc_name)

    unreachable_toleration = {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": toleration_seconds
    }

    not_ready_toleration = {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": toleration_seconds
    }

    deployment_manifest["spec"]["template"]["spec"]["tolerations"] =\
        [unreachable_toleration, not_ready_toleration]

    node_affinity_roles = {
        "nodeAffinity": {
            "requiredDuringSchedulingIgnoredDuringExecution": {
                "nodeSelectorTerms": [{
                    "matchExpressions": [{
                        "key": "kubernetes.io/hostname",
                        "operator": "NotIn",
                        "values": [longhorn_test_node_name]
                    }]
                }]
            }
        }
    }

    deployment_manifest["spec"]["template"]["spec"]["affinity"] =\
        node_affinity_roles

    longhorn_volume = create_and_check_volume(client, volume_name, size=SIZE)

    wait_for_volume_detached(client, volume_name)

    create_pv_for_volume(client, core_api, longhorn_volume, pv_name)

    create_pvc_for_volume(client, core_api, longhorn_volume, pvc_name)

    create_and_wait_deployment(apps_api, deployment_manifest)

    deployment_label_selector =\
        "name=" + deployment_manifest["metadata"]["labels"]["name"]

    deployment_pod_list =\
        core_api.list_namespaced_pod(namespace="default",
                                     label_selector=deployment_label_selector)

    assert deployment_pod_list.items.__len__() == 1

    pod_name = deployment_pod_list.items[0].metadata.name

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    write_pod_volume_data(core_api, pod_name, test_data)

    node_name = deployment_pod_list.items[0].spec.node_name
    node = cloudprovider.node_id(node_name)

    cloudprovider.node_shutdown(node)

    k8s_node_down = wait_for_node_down_k8s(node_name, core_api)

    assert k8s_node_down

    client = get_longhorn_api_client()

    longhorn_node_down = wait_for_node_down_longhorn(node_name, client)
    assert longhorn_node_down

    time.sleep(toleration_seconds + 5)

    for i in range(TERMINATING_POD_RETRYS):
        deployment_pod_list =\
            core_api.list_namespaced_pod(
                namespace="default",
                label_selector=deployment_label_selector
            )

        terminating_pod_name = None
        for pod in deployment_pod_list.items:
            if pod.metadata.__getattribute__("deletion_timestamp") is not None:
                terminating_pod_name = pod.metadata.name
                break

        if terminating_pod_name is not None:
            break
        else:
            time.sleep(TERMINATING_POD_INTERVAL)

    assert terminating_pod_name is not None

    core_api.delete_namespaced_pod(namespace="default",
                                   name=terminating_pod_name,
                                   grace_period_seconds=0)

    delete_and_wait_pod(core_api, terminating_pod_name)

    deployment_pod_list =\
        core_api.list_namespaced_pod(
            namespace="default",
            label_selector=deployment_label_selector
        )

    assert deployment_pod_list.items.__len__() == 1

    wait_for_volume_detached(client, volume_name)
    wait_for_volume_healthy(client, volume_name)

    deployment_pod_list =\
        core_api.list_namespaced_pod(
            namespace="default",
            label_selector=deployment_label_selector
        )

    assert deployment_pod_list.items.__len__() == 1

    new_pod_name = deployment_pod_list.items[0].metadata.name

    wait_pod(new_pod_name)

    resp_data = read_volume_data(core_api, new_pod_name)

    assert test_data == resp_data
示例#36
0
def test_csi_expansion_with_replica_failure(client, core_api, storage_class, pvc, pod_manifest):  # NOQA
    """
    Test expansion success but with one replica expansion failure

    1. Create a new `storage_class` with `allowVolumeExpansion` set
    2. Create PVC and Pod with dynamic provisioned volume from the StorageClass
    3. Create an empty directory with expansion snapshot tmp meta file path
       for one replica so that the replica expansion will fail
    4. Generate `test_data` and write to the pod
    5. Delete the pod and wait for volume detachment
    6. Update pvc.spec.resources to expand the volume
    7. Check expansion result using Longhorn API. There will be expansion error
       caused by the failed replica but overall the expansion should succeed.
    8. Create a new pod and
       check if the volume will rebuild the failed replica
    9. Validate the volume content, then check if data writing looks fine
    """
    create_storage_class(storage_class)

    pod_name = 'csi-expansion-with-replica-failure-test'
    pvc_name = pod_name + "-pvc"
    pvc['metadata']['name'] = pvc_name
    pvc['spec']['storageClassName'] = storage_class['metadata']['name']
    create_pvc(pvc)

    pod_manifest['metadata']['name'] = pod_name
    pod_manifest['spec']['volumes'] = [{
        'name':
            pod_manifest['spec']['containers'][0]['volumeMounts'][0]['name'],
        'persistentVolumeClaim': {'claimName': pvc_name},
    }]
    create_and_wait_pod(core_api, pod_manifest)

    expand_size = str(EXPANDED_VOLUME_SIZE*Gi)
    pv = wait_and_get_pv_for_pvc(core_api, pvc_name)
    assert pv.status.phase == "Bound"
    volume_name = pv.spec.csi.volume_handle
    volume = client.by_id_volume(volume_name)
    failed_replica = volume.replicas[0]
    fail_replica_expansion(client, core_api,
                           volume_name, expand_size, [failed_replica])

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data)

    delete_and_wait_pod(core_api, pod_name)
    wait_for_volume_detached(client, volume_name)

    # There will be replica expansion error info
    # but the expansion should succeed.
    pvc['spec']['resources'] = {
        'requests': {
            'storage': size_to_string(EXPANDED_VOLUME_SIZE*Gi)
        }
    }
    expand_and_wait_for_pvc(core_api, pvc)
    wait_for_expansion_failure(client, volume_name)
    wait_for_volume_expansion(client, volume_name)
    volume = client.by_id_volume(volume_name)
    assert volume.state == "detached"
    assert volume.size == expand_size
    for r in volume.replicas:
        if r.name == failed_replica.name:
            assert r.failedAt != ""
        else:
            assert r.failedAt == ""

    # Check if the replica will be rebuilded
    # and if the volume still works fine.
    create_and_wait_pod(core_api, pod_manifest)
    volume = wait_for_volume_healthy(client, volume_name)
    for r in volume.replicas:
        if r.name == failed_replica.name:
            assert r.mode == ""
        else:
            assert r.mode == "RW"
    resp = read_volume_data(core_api, pod_name)
    assert resp == test_data
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data)
    resp = read_volume_data(core_api, pod_name)
    assert resp == test_data
示例#37
0
def backup_test(client, host_id, volname):
    volume = client.by_id_volume(volname)
    volume.snapshotCreate()
    w_data = generate_random_data(VOLUME_RWTEST_SIZE)
    start_pos = generate_random_pos(VOLUME_RWTEST_SIZE)
    l_data = volume_write(volume["endpoint"], start_pos, w_data)
    snap2 = volume.snapshotCreate()
    volume.snapshotCreate()

    volume.snapshotBackup(name=snap2["name"])

    found = False
    for i in range(100):
        bvs = client.list_backupVolume()
        for bv in bvs:
            if bv["name"] == volname:
                found = True
                break
        if found:
            break
        time.sleep(1)
    assert found

    found = False
    for i in range(20):
        backups = bv.backupList()
        for b in backups:
            if b["snapshotName"] == snap2["name"]:
                found = True
                break
        if found:
            break
        time.sleep(1)
    assert found

    new_b = bv.backupGet(name=b["name"])
    assert new_b["name"] == b["name"]
    assert new_b["url"] == b["url"]
    assert new_b["snapshotName"] == b["snapshotName"]
    assert new_b["snapshotCreated"] == b["snapshotCreated"]
    assert new_b["created"] == b["created"]
    assert new_b["volumeName"] == b["volumeName"]
    assert new_b["volumeSize"] == b["volumeSize"]
    assert new_b["volumeCreated"] == b["volumeCreated"]

    # test restore
    restoreName = generate_volume_name()
    volume = client.create_volume(name=restoreName, size=SIZE,
                                  numberOfReplicas=2,
                                  fromBackup=b["url"])
    volume = common.wait_for_volume_detached(client, restoreName)
    assert volume["name"] == restoreName
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, restoreName)
    r_data = volume_read(volume["endpoint"], start_pos, l_data)
    assert r_data == w_data
    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, restoreName)
    client.delete(volume)

    volume = wait_for_volume_delete(client, restoreName)

    bv.backupDelete(name=b["name"])

    backups = bv.backupList()
    found = False
    for b in backups:
        if b["snapshotName"] == snap2["name"]:
            found = True
            break
    assert not found