Exemplo n.º 1
0
def test_provisioner_mount(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that a StorageClass provisioned volume can be created, mounted,
    unmounted, and deleted properly on the Kubernetes cluster.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-mount-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [
        create_pvc_spec(pvc['metadata']['name'])
    ]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    volume_size = DEFAULT_VOLUME_SIZE * Gi

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == pvc_volume_name
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes[0]["state"] == "attached"
Exemplo n.º 2
0
def test_provisioner_params(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that substituting different StorageClass parameters is reflected in
    the resulting PersistentVolumeClaim.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-params-test'
    volume_size = 2 * Gi
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['resources']['requests']['storage'] = \
        size_to_string(volume_size)
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    storage_class['parameters'] = {
        'numberOfReplicas': '2',
        'staleReplicaTimeout': '20'
    }

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == pvc_volume_name
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes[0]["state"] == "attached"
Exemplo n.º 3
0
def test_provisioner_io(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that input and output on a StorageClass provisioned
    PersistentVolumeClaim works as expected.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-io-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])
    write_volume_data(core_api, pod_name, test_data)
    delete_and_wait_pod(core_api, pod_name)

    common.wait_for_volume_detached(client, pvc_volume_name)

    pod_name = 'flexvolume-provisioner-io-test-2'
    pod['metadata']['name'] = pod_name
    create_and_wait_pod(core_api, pod)
    resp = read_volume_data(core_api, pod_name)

    assert resp == test_data
Exemplo n.º 4
0
def test_provisioner_io(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that input and output on a StorageClass provisioned
    PersistentVolumeClaim works as expected.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-io-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [
        create_pvc_spec(pvc['metadata']['name'])
    ]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    test_data = generate_random_data(VOLUME_RWTEST_SIZE)

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])
    write_volume_data(core_api, pod_name, test_data)
    delete_and_wait_pod(core_api, pod_name)

    common.wait_for_volume_detached(client, pvc_volume_name)

    pod_name = 'flexvolume-provisioner-io-test-2'
    pod['metadata']['name'] = pod_name
    create_and_wait_pod(core_api, pod)
    resp = read_volume_data(core_api, pod_name)

    assert resp == test_data
Exemplo n.º 5
0
def test_provisioner_mount(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that a StorageClass provisioned volume can be created, mounted,
    unmounted, and deleted properly on the Kubernetes cluster.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-mount-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    volume_size = DEFAULT_VOLUME_SIZE * Gi

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == pvc_volume_name
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes[0]["state"] == "attached"
Exemplo n.º 6
0
def test_provisioner_tags(client, core_api, node_default_tags, storage_class,
                          pvc, pod):  # NOQA
    """
    Test that a StorageClass can properly provision a volume with requested
    Tags.

    Test prerequisite:
      - set Replica Node Level Soft Anti-Affinity enabled

    1. Use `node_default_tags` to add default tags to nodes.
    2. Create a StorageClass with disk and node tag set.
    3. Create PVC and Pod.
    4. Verify the volume has the correct parameters and tags.
    """

    replica_node_soft_anti_affinity_setting = \
        client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)
    client.update(replica_node_soft_anti_affinity_setting, value="true")

    # Prepare pod and volume specs.
    pod_name = 'provisioner-tags-test'
    tag_spec = {
        "disk": ["ssd", "nvme"],
        "expected": 1,
        "node": ["storage", "main"]
    }

    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    storage_class['parameters']['diskSelector'] = 'ssd,nvme'
    storage_class['parameters']['nodeSelector'] = 'storage,main'
    volume_size = DEFAULT_VOLUME_SIZE * Gi

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes.data[0].name == pvc_volume_name
    assert volumes.data[0].size == str(volume_size)
    assert volumes.data[0].numberOfReplicas == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes.data[0].state == "attached"
    check_volume_replicas(volumes.data[0], tag_spec, node_default_tags)
Exemplo n.º 7
0
def test_provisioner_params(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that substituting different StorageClass parameters is reflected in
    the resulting PersistentVolumeClaim.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-params-test'
    volume_size = 2 * Gi
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [
        create_pvc_spec(pvc['metadata']['name'])
    ]
    pvc['spec']['resources']['requests']['storage'] = \
        size_to_string(volume_size)
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    storage_class['parameters'] = {
        'numberOfReplicas': '2',
        'staleReplicaTimeout': '20'
    }

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == pvc_volume_name
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes[0]["state"] == "attached"
Exemplo n.º 8
0
def test_provisioner_tags(client, core_api, node_default_tags, storage_class,
                          pvc, pod):  # NOQA
    """
    Test that a StorageClass can properly provision a volume with requested
    Tags.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-tags-test'
    tag_spec = {
        "disk": ["ssd", "nvme"],
        "expected": 1,
        "node": ["storage", "main"]
    }

    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    storage_class['parameters']['diskSelector'] = 'ssd,nvme'
    storage_class['parameters']['nodeSelector'] = 'storage,main'
    volume_size = DEFAULT_VOLUME_SIZE * Gi

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == pvc_volume_name
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes[0]["state"] == "attached"
    check_volume_replicas(volumes[0], tag_spec, node_default_tags)
Exemplo n.º 9
0
def test_rwx_multi_statefulset_with_same_pvc(core_api, pvc, statefulset,
                                             pod):  # NOQA
    """
    Test writing of data into a volume from multiple pods using same PVC

    1. Create a volume with 'accessMode' rwx.
    2. Create a PV and a PVC with access mode 'readwritemany' and attach to the
       volume.
    3. Deploy a StatefulSet of 2 pods with the existing PVC above created.
    4. Wait for both pods to come up.
    5. Create a pod with the existing PVC above created.
    6. Wait for StatefulSet to come up healthy.
    7. Write data all three pods and compute md5sum.
    8. Check the data md5sum in the share manager pod.
    """
    pvc_name = 'pvc-multi-pods-test'
    statefulset_name = 'statefulset-rwx-same-pvc-test'
    pod_name = 'pod-rwx-same-pvc-test'

    pvc['metadata']['name'] = pvc_name
    pvc['spec']['storageClassName'] = 'longhorn'
    pvc['spec']['accessModes'] = ['ReadWriteMany']

    core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                       namespace='default')

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['template']['spec']['volumes'] = \
        [create_pvc_spec(pvc_name)]
    del statefulset['spec']['volumeClaimTemplates']

    create_and_wait_statefulset(statefulset)

    pv_name = get_volume_name(core_api, pvc_name)
    share_manager_name = 'share-manager-' + pv_name

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api,
                          statefulset_name + '-0',
                          test_data,
                          filename='test1')
    assert test_data == read_volume_data(core_api,
                                         statefulset_name + '-1',
                                         filename='test1')

    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc_name)]
    create_and_wait_pod(core_api, pod)

    assert test_data == read_volume_data(core_api, pod_name, filename='test1')

    test_data_2 = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data_2, filename='test2')

    command1 = 'cat /export' + '/' + pv_name + '/' + 'test1'
    command2 = 'cat /export' + '/' + pv_name + '/' + 'test2'

    assert test_data == exec_command_in_pod(core_api, command1,
                                            share_manager_name,
                                            LONGHORN_NAMESPACE)
    assert test_data_2 == exec_command_in_pod(core_api, command2,
                                              share_manager_name,
                                              LONGHORN_NAMESPACE)
Exemplo n.º 10
0
def test_rwx_deployment_with_multi_pods(core_api, pvc,
                                        make_deployment_with_pvc):  # NOQA
    """
    Test deployment of 2 pods with same PVC.

    1. Create a volume with 'accessMode' rwx.
    2. Create a PV and a PVC with access mode 'readwritemany' and attach to the
       volume.
    3. Create a deployment of 2 pods with PVC created
    4. Wait for 2 pods to come up healthy.
    5. Write data in both pods and compute md5sum.
    6. Check the data md5sum in the share manager pod.
    """

    pvc_name = 'pvc-deployment-multi-pods-test'
    pvc['metadata']['name'] = pvc_name
    pvc['spec']['storageClassName'] = 'longhorn'
    pvc['spec']['accessModes'] = ['ReadWriteMany']

    core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                       namespace='default')

    deployment = make_deployment_with_pvc('deployment-multi-pods-test',
                                          pvc_name,
                                          replicas=2)
    apps_api = get_apps_api_client()
    create_and_wait_deployment(apps_api, deployment)

    pv_name = get_volume_name(core_api, pvc_name)
    share_manager_name = 'share-manager-' + pv_name
    deployment_label_selector = "name=" + \
                                deployment["metadata"]["labels"]["name"]

    deployment_pod_list = \
        core_api.list_namespaced_pod(namespace="default",
                                     label_selector=deployment_label_selector)

    assert deployment_pod_list.items.__len__() == 2

    pod_name_1 = deployment_pod_list.items[0].metadata.name
    test_data_1 = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name_1, test_data_1, filename='test1')

    pod_name_2 = deployment_pod_list.items[1].metadata.name
    command = 'cat /data/test1'
    pod_data_2 = exec_command_in_pod(core_api, command, pod_name_2, 'default')

    assert test_data_1 == pod_data_2

    test_data_2 = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name_2, test_data_2, filename='test2')

    command = 'cat /export' + '/' + pv_name + '/' + 'test1'
    share_manager_data_1 = exec_command_in_pod(core_api, command,
                                               share_manager_name,
                                               LONGHORN_NAMESPACE)
    assert test_data_1 == share_manager_data_1

    command = 'cat /export' + '/' + pv_name + '/' + 'test2'
    share_manager_data_2 = exec_command_in_pod(core_api, command,
                                               share_manager_name,
                                               LONGHORN_NAMESPACE)

    assert test_data_2 == share_manager_data_2
Exemplo n.º 11
0
def test_rwx_delete_share_manager_pod(core_api, statefulset):  # NOQA
    """
    Test moving of Share manager pod from one node to another.

    1. Create a StatefulSet of 1 pod with VolumeClaimTemplate where accessMode
       is 'RWX'.
    2. Wait for StatefulSet to come up healthy.
    3. Write data and compute md5sum.
    4. Delete the share manager pod.
    5. Wait for a new pod to be created and volume getting attached.
    6. Check the data md5sum in statefulSet.
    7. Write more data to it and compute md5sum.
    8. Check the data md5sum in share manager volume.
    """

    statefulset_name = 'statefulset-delete-share-manager-pods-test'

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['replicas'] = 1
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\
        = 'longhorn'
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \
        = ['ReadWriteMany']

    create_and_wait_statefulset(statefulset)

    pod_name = statefulset_name + '-' + '0'
    pvc_name = \
        statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name'] \
        + '-' + statefulset_name + '-0'
    pv_name = get_volume_name(core_api, pvc_name)
    share_manager_name = 'share-manager-' + pv_name

    test_data = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data, filename='test1')

    delete_and_wait_pod(core_api,
                        share_manager_name,
                        namespace=LONGHORN_NAMESPACE)

    target_pod = core_api.read_namespaced_pod(name=pod_name,
                                              namespace='default')
    wait_delete_pod(core_api, target_pod.metadata.uid)
    wait_for_pod_remount(core_api, pod_name)

    test_data_2 = generate_random_data(VOLUME_RWTEST_SIZE)
    write_pod_volume_data(core_api, pod_name, test_data_2, filename='test2')

    command1 = 'cat /export/' + pv_name + '/test1'
    share_manager_data_1 = exec_command_in_pod(core_api, command1,
                                               share_manager_name,
                                               LONGHORN_NAMESPACE)
    assert test_data == share_manager_data_1

    command2 = 'cat /export/' + pv_name + '/test2'
    share_manager_data_2 = exec_command_in_pod(core_api, command2,
                                               share_manager_name,
                                               LONGHORN_NAMESPACE)
    assert test_data_2 == share_manager_data_2
Exemplo n.º 12
0
def test_rwx_with_statefulset_multi_pods(core_api, statefulset):  # NOQA
    """
    Test creation of share manager pod and rwx volumes from 2 pods.

    1. Create a StatefulSet of 2 pods with VolumeClaimTemplate where accessMode
       is 'RWX'.
    2. Wait for both pods to come up running.
    3. Verify there are two share manager pods created in the longhorn
       namespace and they have the directory with the PV name in the
       path `/export`
    4. Write data in both pods and compute md5sum.
    5. Compare md5sum of the data with the data written the share manager.
    """

    statefulset_name = 'statefulset-rwx-multi-pods-test'
    share_manager_name = []
    volumes_name = []

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\
        = 'longhorn'
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \
        = ['ReadWriteMany']

    create_and_wait_statefulset(statefulset)

    for i in range(2):
        pvc_name = \
            statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name']\
            + '-' + statefulset_name + '-' + str(i)
        pv_name = get_volume_name(core_api, pvc_name)

        assert pv_name is not None

        volumes_name.append(pv_name)
        share_manager_name.append('share-manager-' + pv_name)

        check_pod_existence(core_api,
                            share_manager_name[i],
                            namespace=LONGHORN_NAMESPACE)

    command = "ls /export | grep -i 'pvc' | wc -l"

    assert exec_command_in_pod(core_api, command, share_manager_name[0],
                               LONGHORN_NAMESPACE) == '1'
    assert exec_command_in_pod(core_api, command, share_manager_name[1],
                               LONGHORN_NAMESPACE) == '1'

    md5sum_pod = []
    for i in range(2):
        test_pod_name = statefulset_name + '-' + str(i)
        test_data = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api, test_pod_name, test_data)
        md5sum_pod.append(test_data)

    for i in range(2):
        command = 'cat /export' + '/' + volumes_name[i] + '/' + 'test'
        pod_data = exec_command_in_pod(core_api, command,
                                       share_manager_name[i],
                                       LONGHORN_NAMESPACE)

        assert pod_data == md5sum_pod[i]
Exemplo n.º 13
0
def test_rwx_statefulset_scale_down_up(core_api, statefulset):  # NOQA
    """
    Test Scaling up and down of pods attached to rwx volume.

    1. Create a StatefulSet of 2 pods with VolumeClaimTemplate where accessMode
       is 'RWX'.
    2. Wait for StatefulSet pods to come up healthy.
    3. Write data and compute md5sum in the both pods.
    4. Delete the pods.
    5. Wait for the pods to be terminated.
    6. Verify the share manager pods are no longer available and the volume is
       detached.
    6. Recreate the pods
    7. Wait for new pods to come up.
    8. Check the data md5sum in new pods.
    """

    statefulset_name = 'statefulset-rwx-scale-down-up-test'
    share_manager_name = []

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\
        = 'longhorn'
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \
        = ['ReadWriteMany']

    create_and_wait_statefulset(statefulset)

    for i in range(2):
        pvc_name = \
            statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name']\
            + '-' + statefulset_name + '-' + str(i)
        pv_name = get_volume_name(core_api, pvc_name)

        assert pv_name is not None

        share_manager_name.append('share-manager-' + pv_name)

        check_pod_existence(core_api,
                            share_manager_name[i],
                            namespace=LONGHORN_NAMESPACE)

    md5sum_pod = []
    for i in range(2):
        test_pod_name = statefulset_name + '-' + str(i)
        test_data = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api, test_pod_name, test_data)
        md5sum_pod.append(test_data)

    statefulset['spec']['replicas'] = replicas = 0
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={'spec': {
            'replicas': replicas
        }})
    for i in range(DEFAULT_STATEFULSET_TIMEOUT):
        s_set = apps_api.read_namespaced_stateful_set(
            name=statefulset['metadata']['name'], namespace='default')
        if s_set.status.ready_replicas == replicas or \
                (replicas == 0 and not s_set.status.ready_replicas):
            break
        time.sleep(DEFAULT_STATEFULSET_INTERVAL)

    pods = core_api.list_namespaced_pod(namespace=LONGHORN_NAMESPACE)

    found = False
    for item in pods.items:
        if item.metadata.name == share_manager_name[0] or \
                item.metadata.name == share_manager_name[1]:
            found = True
            break

    assert not found

    statefulset['spec']['replicas'] = replicas = 2
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={'spec': {
            'replicas': replicas
        }})
    wait_statefulset(statefulset)

    for i in range(2):
        test_pod_name = statefulset_name + '-' + str(i)
        command = 'cat /data/test'
        pod_data = exec_command_in_pod(core_api, command, test_pod_name,
                                       'default')

        assert pod_data == md5sum_pod[i]
Exemplo n.º 14
0
def test_cloning_interrupted(client, core_api, pvc, pod, clone_pvc, clone_pod):  # NOQA
    """
    1. Create a PVC:
        ```yaml
        apiVersion: v1
        kind: PersistentVolumeClaim
        metadata:
          name: source-pvc
        spec:
          storageClassName: longhorn
          accessModes:
            - ReadWriteOnce
          resources:
            requests:
              storage: 3Gi
        ```
    2. Specify the `source-pvc` in a pod yaml and start the pod
    3. Wait for the pod to be running, write 500MB of data to the mount
       path of the volume
    4. Clone a volume by creating the PVC:
        ```yaml
        apiVersion: v1
        kind: PersistentVolumeClaim
        metadata:
          name: cloned-pvc
        spec:
          storageClassName: longhorn
          dataSource:
            name: source-pvc
            kind: PersistentVolumeClaim
          accessModes:
            - ReadWriteOnce
          resources:
            requests:
              storage: 3Gi
        ```
    5. Wait for the `CloneStatus.State` in `cloned-pvc` to be `initiated`
    6. Kill all replicas process of the `source-pvc`
    7. Wait for the `CloneStatus.State` in `cloned-pvc` to be `failed`
    8. Clean up `clone-pvc`
    9. Redeploy `cloned-pvc` and clone pod
    10. In 3-min retry loop, verify cloned pod become running
    11. `cloned-pvc` has the same data as `source-pvc`
    12. In 2-min retry loop, verify the volume of the `clone-pvc`
        eventually becomes healthy.
    """
    # Step-1
    source_pvc_name = 'source-pvc' + generate_random_suffix()
    pvc['metadata']['name'] = source_pvc_name
    pvc['spec']['storageClassName'] = 'longhorn'
    core_api.create_namespaced_persistent_volume_claim(
        body=pvc, namespace='default')
    wait_for_pvc_phase(core_api, source_pvc_name, "Bound")

    # Step-2
    pod_name = 'source-pod' + generate_random_suffix()
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(source_pvc_name)]
    create_and_wait_pod(core_api, pod)

    # Step-3
    write_pod_volume_random_data(core_api, pod_name,
                                 '/data/test', DATA_SIZE_IN_MB_3)
    source_data = get_pod_data_md5sum(core_api, pod_name, '/data/test')

    source_volume_name = get_volume_name(core_api, source_pvc_name)

    # Step-4
    clone_pvc_name = 'clone-pvc' + generate_random_suffix()
    clone_pvc['metadata']['name'] = clone_pvc_name
    clone_pvc['spec']['storageClassName'] = 'longhorn'
    clone_pvc['spec']['dataSource'] = {
        'name': source_pvc_name,
        'kind': 'PersistentVolumeClaim'
    }
    core_api.create_namespaced_persistent_volume_claim(
        body=clone_pvc, namespace='default')

    # Step-5
    clone_volume_name = get_clone_volume_name(client, source_volume_name)
    wait_for_volume_clone_status(client, clone_volume_name, VOLUME_FIELD_STATE,
                                 'initiated')

    # Step-6
    crash_replica_processes(client, core_api, source_volume_name)

    # Step-7
    wait_for_volume_faulted(client, source_volume_name)
    wait_for_volume_clone_status(client, clone_volume_name, VOLUME_FIELD_STATE,
                                 'failed')

    # Step-8
    delete_and_wait_pvc(core_api, clone_pvc_name)

    # Step-9
    clone_pvc_name = 'clone-pvc-2' + generate_random_suffix()
    clone_pvc['metadata']['name'] = clone_pvc_name
    clone_pvc['spec']['storageClassName'] = 'longhorn'
    clone_pvc['spec']['dataSource'] = {
        'name': source_pvc_name,
        'kind': 'PersistentVolumeClaim'
    }
    core_api.create_namespaced_persistent_volume_claim(
        body=clone_pvc, namespace='default')
    wait_for_pvc_phase(core_api, clone_pvc_name, "Bound")

    # Step-9
    clone_pod_name = 'clone-pod' + generate_random_suffix()
    clone_pod['metadata']['name'] = clone_pod_name
    clone_pod['spec']['volumes'] = [create_pvc_spec(clone_pvc_name)]
    create_and_wait_pod(core_api, clone_pod)

    # Step-10
    clone_volume_name = get_volume_name(core_api, clone_pvc_name)
    wait_for_volume_clone_status(client, clone_volume_name, VOLUME_FIELD_STATE,
                                 VOLUME_FIELD_CLONE_COMPLETED)

    # Step-11
    clone_data = get_pod_data_md5sum(core_api, clone_pod_name, '/data/test')
    assert source_data == clone_data

    # Step-12
    wait_for_volume_healthy(client, clone_volume_name)
Exemplo n.º 15
0
def test_recurring_jobs_when_volume_detached_unexpectedly(
        settings_reset, set_random_backupstore, client, core_api, apps_api,
        pvc, make_deployment_with_pvc):  # NOQA
    """
    Test recurring jobs when volume detached unexpectedly

    Context:

    If the volume is automatically attached by the recurring backup job,
    make sure that workload pod eventually is able to use the volume
    when volume is detached unexpectedly during the backup process.

    Steps:

    1. Create a volume, attach to a pod of a deployment,
       write 500MB to the volume.
    2. Scale down the deployment. The volume is detached.
    3. Turn on `Allow Recurring Job While Volume Is Detached` setting.
    4. Create a recurring backup job that runs every 2 mins.
    5. Wait until the recurring backup job starts and the backup progress
       is > 50%, kill the engine process of the volume.
    6. Verify volume automatically reattached and is healthy again.
    7. Wait until the backup finishes.
    8. Wait for the volume to be in detached state with
       `frontendDisabled=false`
    9. Scale up the deployment.
       Verify that we can read the file `lost+found` from the workload pod
    10. Turn off `Allow Recurring Job While Volume Is Detached` setting
       Clean up backups, volumes.
    """

    recurring_job_setting = \
        client.by_id_setting(SETTING_RECURRING_JOB_WHILE_VOLUME_DETACHED)
    client.update(recurring_job_setting, value="true")

    pvc_name = 'pvc-volume-detached-unexpectedly-test'
    pvc['metadata']['name'] = pvc_name
    pvc['spec']['storageClassName'] = 'longhorn'

    core_api.create_namespaced_persistent_volume_claim(body=pvc,
                                                       namespace='default')

    deployment = make_deployment_with_pvc(
        'deployment-volume-detached-unexpectedly-test', pvc_name)
    create_and_wait_deployment(apps_api, deployment)
    pod_names = common.get_deployment_pod_names(core_api, deployment)
    vol_name = get_volume_name(core_api, pvc_name)

    write_pod_volume_random_data(core_api, pod_names[0], "/data/test",
                                 DATA_SIZE_IN_MB_3)

    data = read_volume_data(core_api, pod_names[0], 'default')
    deployment['spec']['replicas'] = 0
    apps_api.patch_namespaced_deployment(body=deployment,
                                         namespace='default',
                                         name=deployment["metadata"]["name"])
    vol = wait_for_volume_detached(client, vol_name)

    jobs = [{
        "name": RECURRING_JOB_NAME,
        "cron": "*/2 * * * *",
        "task": "backup",
        "retain": 1
    }]
    vol.recurringUpdate(jobs=jobs)
    time.sleep(60)
    wait_for_recurring_backup_to_start(client,
                                       core_api,
                                       vol_name,
                                       expected_snapshot_count=1,
                                       minimum_progress=50)

    crash_engine_process_with_sigkill(client, core_api, vol_name)
    # Check if the volume is reattached after recurring backup is interrupted
    time.sleep(10)
    wait_for_volume_healthy_no_frontend(client, vol_name)

    # Since the backup state is removed after the backup complete and it
    # could happen quickly. Checking for the both in-progress and complete
    # state could be hard to catch, thus we only check the complete state
    def backup_complete_predicate(b):
        return b.state == "complete" and b.error == ""

    common.wait_for_backup_state(client, vol_name, backup_complete_predicate)

    wait_for_volume_detached(client, vol_name)

    deployment['spec']['replicas'] = 1
    apps_api.patch_namespaced_deployment(body=deployment,
                                         namespace='default',
                                         name=deployment["metadata"]["name"])
    wait_deployment_replica_ready(apps_api, deployment["metadata"]["name"], 1)
    pod_names = common.get_deployment_pod_names(core_api, deployment)

    assert read_volume_data(core_api, pod_names[0], 'default') == data

    # Use fixture to cleanup the backupstore and since we
    # crashed the engine replica initiated the backup, it's
    # backupstore lock will still be present, so we need
    # to wait till the lock is expired, before we can delete
    # the backups
    vol.recurringUpdate(jobs=[])
    backupstore.backupstore_wait_for_lock_expiration()
Exemplo n.º 16
0
def test_cloning_basic(client, core_api, pvc, pod, clone_pvc, clone_pod, storage_class_name='longhorn'):  # NOQA
    """
    1. Create a PVC:
        ```yaml
        apiVersion: v1
        kind: PersistentVolumeClaim
        metadata:
          name: source-pvc
        spec:
          storageClassName: longhorn
          accessModes:
            - ReadWriteOnce
          resources:
            requests:
              storage: 3Gi
        ```
    2. Specify the `source-pvc` in a pod yaml and start the pod
    3. Wait for the pod to be running, write some data to the mount
       path of the volume
    4. Clone a volume by creating the PVC:
        ```yaml
        apiVersion: v1
        kind: PersistentVolumeClaim
        metadata:
          name: cloned-pvc
        spec:
          storageClassName: longhorn
          dataSource:
            name: source-pvc
            kind: PersistentVolumeClaim
          accessModes:
            - ReadWriteOnce
          resources:
            requests:
              storage: 3Gi
        ```
    5. Wait for the `CloneStatus.State` in `cloned-pvc` to be `completed`
    6. Clone volume should get detached after cloning completion, wait for it.
    7. Specify the `cloned-pvc` in a cloned pod yaml and deploy the cloned pod
    8. In 3-min retry loop, wait for the cloned pod to be running
    9. Verify the data in `cloned-pvc` is the same as in `source-pvc`
    10. In 2-min retry loop, verify the volume of the `clone-pvc` eventually
       becomes healthy
    """
    # Step-1
    source_pvc_name = 'source-pvc' + generate_random_suffix()
    pvc['metadata']['name'] = source_pvc_name
    pvc['spec']['storageClassName'] = storage_class_name
    core_api.create_namespaced_persistent_volume_claim(
        body=pvc, namespace='default')
    wait_for_pvc_phase(core_api, source_pvc_name, "Bound")

    # Step-2
    pod_name = 'source-pod' + generate_random_suffix()
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(source_pvc_name)]
    create_and_wait_pod(core_api, pod)

    # Step-3
    write_pod_volume_random_data(core_api, pod_name,
                                 '/data/test', DATA_SIZE_IN_MB_2)
    source_data = get_pod_data_md5sum(core_api, pod_name, '/data/test')

    # Step-4
    clone_pvc_name = 'clone-pvc' + generate_random_suffix()
    clone_pvc['metadata']['name'] = clone_pvc_name
    clone_pvc['spec']['storageClassName'] = storage_class_name
    clone_pvc['spec']['dataSource'] = {
        'name': source_pvc_name,
        'kind': 'PersistentVolumeClaim'
    }
    core_api.create_namespaced_persistent_volume_claim(
        body=clone_pvc, namespace='default')
    wait_for_pvc_phase(core_api, clone_pvc_name, "Bound")

    # Step-5
    clone_volume_name = get_volume_name(core_api, clone_pvc_name)
    wait_for_volume_clone_status(client, clone_volume_name, VOLUME_FIELD_STATE,
                                 VOLUME_FIELD_CLONE_COMPLETED)

    # Step-6
    wait_for_volume_detached(client, clone_volume_name)

    # Step-7,8
    clone_pod_name = 'clone-pod' + generate_random_suffix()
    clone_pod['metadata']['name'] = clone_pod_name
    clone_pod['spec']['volumes'] = [create_pvc_spec(clone_pvc_name)]
    create_and_wait_pod(core_api, clone_pod)
    clone_data = get_pod_data_md5sum(core_api, clone_pod_name, '/data/test')

    # Step-9
    assert source_data == clone_data

    # Step-10
    wait_for_volume_healthy(client, clone_volume_name)
Exemplo n.º 17
0
def test_rwx_parallel_writing(core_api, statefulset, pod):  # NOQA
    """
    Test parallel writing of data

    1. Create a StatefulSet of 1 pod with VolumeClaimTemplate where accessMode
       is 'RWX'.
    2. Wait for StatefulSet to come up healthy.
    3. Create another statefulSet with same pvc which got created with first
       statefulSet.
    4. Wait for statefulSet to come up healthy.
    5. Start writing 800 MB data in first statefulSet `file 1` and start
       writing 500 MB data in second statefulSet `file 2`.
    6. Compute md5sum.
    7. Check the data md5sum in share manager pod volume
    """

    statefulset_name = 'statefulset-rwx-parallel-writing-test'

    statefulset['metadata']['name'] = \
        statefulset['spec']['selector']['matchLabels']['app'] = \
        statefulset['spec']['serviceName'] = \
        statefulset['spec']['template']['metadata']['labels']['app'] = \
        statefulset_name
    statefulset['spec']['replicas'] = 1
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\
        = 'longhorn'
    statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \
        = ['ReadWriteMany']

    create_and_wait_statefulset(statefulset)
    statefulset_pod_name = statefulset_name + '-0'

    pvc_name = \
        statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name'] \
        + '-' + statefulset_name + '-0'
    pv_name = get_volume_name(core_api, pvc_name)
    share_manager_name = 'share-manager-' + pv_name

    pod_name = 'pod-parallel-write-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc_name)]
    create_and_wait_pod(core_api, pod)

    with Pool(2) as p:
        p.map(
            write_data_into_pod,
            [statefulset_pod_name + ':/data/test1', pod_name + ':/data/test2'])

    md5sum1 = get_pod_data_md5sum(core_api, statefulset_pod_name, 'data/test1')
    md5sum2 = get_pod_data_md5sum(core_api, pod_name, 'data/test2')

    command1 = 'md5sum /export' + '/' + pv_name + '/' + 'test1' + \
               " | awk '{print $1}'"
    share_manager_data1 = exec_command_in_pod(core_api, command1,
                                              share_manager_name,
                                              LONGHORN_NAMESPACE)
    assert md5sum1 == share_manager_data1

    command2 = 'md5sum /export' + '/' + pv_name + '/' + 'test2' + \
               " | awk '{print $1}'"
    share_manager_data2 = exec_command_in_pod(core_api, command2,
                                              share_manager_name,
                                              LONGHORN_NAMESPACE)
    assert md5sum2 == share_manager_data2
Exemplo n.º 18
0
def test_cloning_with_detached_source_volume(client, core_api, pvc, clone_pvc):  # NOQA
    """
        1. Create a PVC:
            ```yaml
            apiVersion: v1
            kind: PersistentVolumeClaim
            metadata:
              name: source-pvc
            spec:
              storageClassName: longhorn
              accessModes:
                - ReadWriteOnce
              resources:
                requests:
                  storage: 10Gi
            ```
        2. Wait for volume to be created and attach it to a node.
        3. Write some data to the mount path of the volume
        4. Detach the volume and wait for the volume to be in detached state.
        5. Clone a volume by creating the PVC:
            ```yaml
            apiVersion: v1
            kind: PersistentVolumeClaim
            metadata:
              name: cloned-pvc
            spec:
              storageClassName: longhorn
              dataSource:
                name: source-pvc
                kind: PersistentVolumeClaim
              accessModes:
                - ReadWriteOnce
              resources:
                requests:
                  storage: 10Gi
            ```
        6. Wait for `source-pvc` to be attached
        7. Wait for a new snapshot created in `source-pvc` volume created
        8. Wait for the `CloneStatus.State` in `cloned-pvc` to be `completed`
        9. Wait for `source-pvc` to be detached
        10. Attach the cloned volume to a node
        11. Verify the data in `cloned-pvc` is the same as in `source-pvc`.
        12. In 2-min retry loop, verify the volume of the `clone-pvc`
            eventually becomes healthy.
    """
    # Step-1
    source_pvc_name = 'source-pvc' + generate_random_suffix()
    pvc['metadata']['name'] = source_pvc_name
    pvc['spec']['storageClassName'] = 'longhorn'
    core_api.create_namespaced_persistent_volume_claim(
        body=pvc, namespace='default')
    wait_for_pvc_phase(core_api, source_pvc_name, "Bound")

    # Step-2
    source_volume_name = get_volume_name(core_api, source_pvc_name)
    lht_host_id = get_self_host_id()
    source_volume = client.by_id_volume(source_volume_name)
    source_volume.attach(hostId=lht_host_id)
    source_volume = wait_for_volume_healthy(client, source_volume_name)

    # Step-3
    data = write_volume_random_data(source_volume)

    # Steps-4
    source_volume.detach(hostId=lht_host_id)
    wait_for_volume_detached(client, source_volume_name)

    # Step-5
    clone_pvc_name = 'clone-pvc' + generate_random_suffix()
    clone_pvc['metadata']['name'] = clone_pvc_name
    clone_pvc['spec']['storageClassName'] = 'longhorn'
    clone_pvc['spec']['dataSource'] = {
        'name': source_pvc_name,
        'kind': 'PersistentVolumeClaim'
    }
    core_api.create_namespaced_persistent_volume_claim(
        body=clone_pvc, namespace='default')
    wait_for_pvc_phase(core_api, clone_pvc_name, "Bound")

    # Step-6
    source_volume = wait_for_volume_attached(client, source_volume_name)

    # Step-7
    wait_for_snapshot_count(source_volume, 2)

    # Step-8
    clone_volume_name = get_volume_name(core_api, clone_pvc_name)
    wait_for_volume_clone_status(client, clone_volume_name, VOLUME_FIELD_STATE,
                                 VOLUME_FIELD_CLONE_COMPLETED)
    wait_for_volume_detached(client, clone_volume_name)

    # Step-9
    wait_for_volume_detached(client, source_volume_name)

    # Step-10
    clone_volume = client.by_id_volume(clone_volume_name)
    clone_volume.attach(hostId=lht_host_id)
    wait_for_volume_attached(client, clone_volume_name)
    clone_volume = wait_for_volume_endpoint(client, clone_volume_name)

    # Step-11
    check_volume_data(clone_volume, data)

    # Step-12
    wait_for_volume_healthy(client, clone_volume_name)