Exemplo n.º 1
0
def csi_mount_test(
        client,
        core_api,
        csi_pv,
        pvc,
        pod,  # NOQA
        volume_size,
        base_image=""):  # NOQA
    pod_name = 'csi-mount-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['volumeName'] = csi_pv['metadata']['name']
    update_storageclass_references(CSI_PV_TEST_STORAGE_NAME, csi_pv, pvc)

    create_pv_storage(core_api, client, csi_pv, pvc, base_image)
    create_and_wait_pod(core_api, pod)

    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == csi_pv['metadata']['name']
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(csi_pv['spec']['csi']['volumeAttributes']["numberOfReplicas"])
    assert volumes[0]["state"] == "attached"
    assert volumes[0]["baseImage"] == base_image
Exemplo n.º 2
0
def test_flexvolume_mount(client, core_api, flexvolume, pod):  # NOQA
    """
    Test that a statically defined volume can be created, mounted, unmounted,
    and deleted properly on the Kubernetes cluster.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    pod_name = 'flexvolume-mount-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['containers'][0]['volumeMounts'][0]['name'] = \
        flexvolume['name']
    pod['spec']['volumes'] = [flexvolume]
    volume_size = DEFAULT_VOLUME_SIZE * Gi

    create_and_wait_pod(core_api, pod)

    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == flexvolume['name']
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == int(
        flexvolume["flexVolume"]["options"]["numberOfReplicas"])
    assert volumes[0]["state"] == "attached"
Exemplo n.º 3
0
def ha_simple_recovery_test(client, volume_name, size, base_image=""):  # NOQA
    volume = client.create_volume(name=volume_name, size=size,
                                  numberOfReplicas=2, baseImage=base_image)
    volume = common.wait_for_volume_detached(client, volume_name)
    assert volume["name"] == volume_name
    assert volume["size"] == size
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    assert volume["created"] != ""
    assert volume["baseImage"] == base_image

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    ha_rebuild_replica_test(client, volume_name)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 4
0
def test_ha_prohibit_deleting_last_replica(client, volume_name):  # NOQA
    volume = client.create_volume(name=volume_name, size=SIZE,
                                  numberOfReplicas=1)
    volume = common.wait_for_volume_detached(client, volume_name)
    assert volume["name"] == volume_name
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 1
    assert volume["state"] == "detached"
    assert volume["created"] != ""

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    assert len(volume["replicas"]) == 1
    replica0 = volume["replicas"][0]

    with pytest.raises(Exception) as e:
        volume.replicaRemove(name=replica0["name"])
    assert "no other healthy replica available" in str(e.value)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 5
0
def test_csi_mount(client, core_api, csi_pv, pvc, pod):  # NOQA
    """
    Test that a statically defined CSI volume can be created, mounted,
    unmounted, and deleted properly on the Kubernetes cluster.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    pod_name = 'csi-mount-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['volumeName'] = csi_pv['metadata']['name']
    volume_size = DEFAULT_VOLUME_SIZE * Gi

    create_pv_storage(core_api, client, csi_pv, pvc)
    create_and_wait_pod(core_api, pod)

    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == csi_pv['metadata']['name']
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(csi_pv['spec']['csi']['volumeAttributes']["numberOfReplicas"])
    assert volumes[0]["state"] == "attached"
def test_statefulset_mount(client, core_api, storage_class,
                           statefulset):  # NOQA
    """
    Tests that volumes provisioned for a StatefulSet can be properly created,
    mounted, unmounted, and deleted on the Kubernetes cluster.
    """

    statefulset_name = 'statefulset-mount-test'
    update_test_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        # Workaround for checking volume name since they differ per pod.
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    # Confirm that we've iterated through all the volumes.
    assert len(pod_info) == 0
Exemplo n.º 7
0
def test_provisioner_mount(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that a StorageClass provisioned volume can be created, mounted,
    unmounted, and deleted properly on the Kubernetes cluster.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-mount-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    volume_size = DEFAULT_VOLUME_SIZE * Gi

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == pvc_volume_name
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes[0]["state"] == "attached"
Exemplo n.º 8
0
def test_flexvolume_tags(client, core_api, node_default_tags, flexvolume,
                         pod):  # NOQA
    """
    Test that the FlexVolume provisioner is able to handle diskSelectors and
    nodeSelectors properly.
    """
    pod_name = 'flexvolume-tag-test'
    tag_spec = {
        "disk": ["ssd", "nvme"],
        "expected": 1,
        "node": ["storage", "main"]
    }
    volume_size = DEFAULT_VOLUME_SIZE * Gi

    flexvolume["flexvolume"]["options"]["diskSelector"] = "ssd,nvme"
    flexvolume["flexvolume"]["options"]["nodeSelector"] = "storage,main"
    pod['metadata']['name'] = pod_name
    pod['spec']['containers'][0]['volumeMounts'][0]['name'] = \
        flexvolume['name']
    pod['spec']['volumes'] = [flexvolume]
    create_and_wait_pod(core_api, pod)

    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == flexvolume['name']
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == int(
        flexvolume["flexVolume"]["options"]["numberOfReplicas"])
    assert volumes[0]["state"] == "attached"
    check_volume_replicas(volumes[0], tag_spec, node_default_tags)
Exemplo n.º 9
0
def test_provisioner_params(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that substituting different StorageClass parameters is reflected in
    the resulting PersistentVolumeClaim.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-params-test'
    volume_size = 2 * Gi
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['resources']['requests']['storage'] = \
        size_to_string(volume_size)
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    storage_class['parameters'] = {
        'numberOfReplicas': '2',
        'staleReplicaTimeout': '20'
    }

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == pvc_volume_name
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes[0]["state"] == "attached"
Exemplo n.º 10
0
def test_recurring_job_in_volume_creation(clients, volume_name):  # NOQA
    for host_id, client in clients.iteritems():  # NOQA
        break

    # error when creating volume with duplicate jobs
    with pytest.raises(Exception) as e:
        client.create_volume(name=volume_name, size=SIZE,
                             numberOfReplicas=2,
                             recurringJobs=create_jobs1() + create_jobs1())
    assert "duplicate job" in str(e.value)

    client.create_volume(name=volume_name, size=SIZE,
                         numberOfReplicas=2, recurringJobs=create_jobs1())
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    # 5 minutes
    time.sleep(300)
    check_jobs1_result(volume)

    volume = volume.detach()
    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 11
0
def test_provisioner_mount(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that a StorageClass provisioned volume can be created, mounted,
    unmounted, and deleted properly on the Kubernetes cluster.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-mount-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [
        create_pvc_spec(pvc['metadata']['name'])
    ]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    volume_size = DEFAULT_VOLUME_SIZE * Gi

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == pvc_volume_name
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes[0]["state"] == "attached"
Exemplo n.º 12
0
def test_statefulset_mount(client, core_api, storage_class, statefulset):  # NOQA
    """
    Tests that volumes provisioned for a StatefulSet can be properly created,
    mounted, unmounted, and deleted on the Kubernetes cluster.
    """

    statefulset_name = 'statefulset-mount-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        # Workaround for checking volume name since they differ per pod.
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    # Confirm that we've iterated through all the volumes.
    assert len(pod_info) == 0
Exemplo n.º 13
0
def test_recurring_job_in_volume_creation(clients, volume_name):  # NOQA
    for host_id, client in clients.iteritems():  # NOQA
        break

    # error when creating volume with duplicate jobs
    with pytest.raises(Exception) as e:
        client.create_volume(name=volume_name,
                             size=SIZE,
                             numberOfReplicas=2,
                             recurringJobs=create_jobs1() + create_jobs1())
    assert "duplicate job" in str(e.value)

    client.create_volume(name=volume_name,
                         size=SIZE,
                         numberOfReplicas=2,
                         recurringJobs=create_jobs1())
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    # 5 minutes
    time.sleep(300)
    check_jobs1_result(volume)

    volume = volume.detach()
    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 14
0
def test_tag_scheduling_failure(client, node_default_tags):  # NOQA
    """
    Test that scheduling fails if no Nodes/Disks with the requested Tags are
    available.

    Case 1:
    Validate that if specifying nonexist tags in volume, API call will fail.

    Case 2:

    1. Specify existing but no node or disk can unsatisfied tags.
    2. Validate the volume will failed the scheduling
    """
    invalid_tag_cases = [
        # Only one Disk Tag exists.
        {
            "disk": ["doesnotexist", "ssd"],
            "node": []
        },
        # Only one Node Tag exists.
        {
            "disk": [],
            "node": ["doesnotexist", "main"]
        }
    ]
    for tags in invalid_tag_cases:
        volume_name = generate_volume_name()  # NOQA
        with pytest.raises(Exception) as e:
            client.create_volume(name=volume_name,
                                 size=SIZE,
                                 numberOfReplicas=3,
                                 diskSelector=tags["disk"],
                                 nodeSelector=tags["node"])
        assert "does not exist" in str(e.value)

    unsatisfied_tag_cases = [{
        "disk": [],
        "node": ["main", "fallback"]
    }, {
        "disk": ["ssd", "m2"],
        "node": []
    }]
    for tags in unsatisfied_tag_cases:
        volume_name = generate_volume_name()
        client.create_volume(name=volume_name,
                             size=SIZE,
                             numberOfReplicas=3,
                             diskSelector=tags["disk"],
                             nodeSelector=tags["node"])
        volume = wait_for_volume_detached(client, volume_name)
        assert volume.diskSelector == tags["disk"]
        assert volume.nodeSelector == tags["node"]
        wait_scheduling_failure(client, volume_name)

        client.delete(volume)
        wait_for_volume_delete(client, volume.name)
        volumes = client.list_volume()
        assert len(volumes) == 0
Exemplo n.º 15
0
def csi_mount_test(client, core_api, csi_pv, pvc, pod_make,  # NOQA
                   volume_size, base_image=""): # NOQA
    create_and_wait_csi_pod('csi-mount-test', client, core_api, csi_pv, pvc,
                            pod_make, base_image, "")

    volumes = client.list_volume().data
    assert len(volumes) == 1
    assert volumes[0].name == csi_pv['metadata']['name']
    assert volumes[0].size == str(volume_size)
    assert volumes[0].numberOfReplicas == \
        int(csi_pv['spec']['csi']['volumeAttributes']["numberOfReplicas"])
    assert volumes[0].state == "attached"
    assert volumes[0].baseImage == base_image
Exemplo n.º 16
0
def csi_mount_test(client, core_api, csi_pv, pvc, pod_make,  # NOQA
                   volume_size, base_image=""): # NOQA
    create_and_wait_csi_pod('csi-mount-test', client, core_api, csi_pv, pvc,
                            pod_make, base_image, "")

    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == csi_pv['metadata']['name']
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(csi_pv['spec']['csi']['volumeAttributes']["numberOfReplicas"])
    assert volumes[0]["state"] == "attached"
    assert volumes[0]["baseImage"] == base_image
Exemplo n.º 17
0
def test_ha_salvage(client, volume_name):  # NOQA
    # get a random client

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=2)
    volume = common.wait_for_volume_detached(client, volume_name)
    assert volume["name"] == volume_name
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    assert volume["created"] != ""

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    assert len(volume["replicas"]) == 2
    replica0_name = volume["replicas"][0]["name"]
    replica1_name = volume["replicas"][1]["name"]

    data = write_random_data(volume["endpoint"])

    common.k8s_delete_replica_pods_for_volume(volume_name)

    volume = common.wait_for_volume_faulted(client, volume_name)
    assert len(volume["replicas"]) == 2
    assert volume["replicas"][0]["failedAt"] != ""
    assert volume["replicas"][1]["failedAt"] != ""

    volume.salvage(names=[replica0_name, replica1_name])

    volume = common.wait_for_volume_detached(client, volume_name)
    assert len(volume["replicas"]) == 2
    assert volume["replicas"][0]["failedAt"] == ""
    assert volume["replicas"][1]["failedAt"] == ""

    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    check_data(volume["endpoint"], data)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 18
0
def test_recurring_job_in_volume_creation(set_random_backupstore, client,
                                          volume_name):  # NOQA
    """
    Test create volume with recurring jobs

    1. Create volume with recurring jobs though Longhorn API
    2. Verify the recurring jobs run correctly
    """
    host_id = get_self_host_id()

    # error when creating volume with duplicate jobs
    with pytest.raises(Exception) as e:
        client.create_volume(name=volume_name,
                             size=SIZE,
                             numberOfReplicas=2,
                             recurringJobs=create_jobs1() + create_jobs1())
    assert "duplicate job" in str(e.value)

    client.create_volume(name=volume_name,
                         size=SIZE,
                         numberOfReplicas=2,
                         recurringJobs=create_jobs1())
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host_id)
    volume = wait_for_volume_healthy(client, volume_name)

    # wait until the beginning of an even minute
    wait_until_begin_of_an_even_minute()
    # wait until the 10th second of an even minute
    # to avoid writing data at the same time backup is taking
    time.sleep(10)

    write_volume_random_data(volume)
    time.sleep(150)  # 2.5 minutes
    write_volume_random_data(volume)
    time.sleep(150)  # 2.5 minutes

    check_jobs1_result(volume)

    volume = volume.detach(hostId="")
    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 19
0
def test_provisioner_tags(client, core_api, node_default_tags, storage_class,
                          pvc, pod):  # NOQA
    """
    Test that a StorageClass can properly provision a volume with requested
    Tags.

    Test prerequisite:
      - set Replica Node Level Soft Anti-Affinity enabled

    1. Use `node_default_tags` to add default tags to nodes.
    2. Create a StorageClass with disk and node tag set.
    3. Create PVC and Pod.
    4. Verify the volume has the correct parameters and tags.
    """

    replica_node_soft_anti_affinity_setting = \
        client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)
    client.update(replica_node_soft_anti_affinity_setting, value="true")

    # Prepare pod and volume specs.
    pod_name = 'provisioner-tags-test'
    tag_spec = {
        "disk": ["ssd", "nvme"],
        "expected": 1,
        "node": ["storage", "main"]
    }

    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    storage_class['parameters']['diskSelector'] = 'ssd,nvme'
    storage_class['parameters']['nodeSelector'] = 'storage,main'
    volume_size = DEFAULT_VOLUME_SIZE * Gi

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes.data[0].name == pvc_volume_name
    assert volumes.data[0].size == str(volume_size)
    assert volumes.data[0].numberOfReplicas == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes.data[0].state == "attached"
    check_volume_replicas(volumes.data[0], tag_spec, node_default_tags)
Exemplo n.º 20
0
def ha_salvage_test(client, volume_name, base_image=""):  # NOQA
    volume = client.create_volume(name=volume_name, size=SIZE,
                                  numberOfReplicas=2, baseImage=base_image)
    volume = common.wait_for_volume_detached(client, volume_name)
    assert volume["name"] == volume_name
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    assert volume["created"] != ""
    assert volume["baseImage"] == base_image

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    assert len(volume["replicas"]) == 2
    replica0_name = volume["replicas"][0]["name"]
    replica1_name = volume["replicas"][1]["name"]

    data = write_volume_random_data(volume)

    common.k8s_delete_replica_pods_for_volume(volume_name)

    volume = common.wait_for_volume_faulted(client, volume_name)
    assert len(volume["replicas"]) == 2
    assert volume["replicas"][0]["failedAt"] != ""
    assert volume["replicas"][1]["failedAt"] != ""

    volume.salvage(names=[replica0_name, replica1_name])

    volume = common.wait_for_volume_detached(client, volume_name)
    assert len(volume["replicas"]) == 2
    assert volume["replicas"][0]["failedAt"] == ""
    assert volume["replicas"][1]["failedAt"] == ""

    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    check_volume_data(volume, data)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 21
0
def test_recurring_job(clients, volume_name):  # NOQA
    for host_id, client in clients.iteritems():  # NOQA
        break

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=2)
    volume = common.wait_for_volume_detached(client, volume_name)

    jobs = create_jobs1()
    volume.recurringUpdate(jobs=jobs)

    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    # 5 minutes
    time.sleep(300)
    check_jobs1_result(volume)

    job_backup2 = {
        "name": "backup2",
        "cron": "* * * * *",
        "task": "backup",
        "retain": 2
    }
    volume.recurringUpdate(jobs=[jobs[0], job_backup2])

    # 5 minutes
    time.sleep(300)

    snapshots = volume.snapshotList()
    count = 0
    for snapshot in snapshots:
        if snapshot["removed"] is False:
            count += 1
    # 2 from job_snap, 1 from job_backup, 2 from job_backup2, 1 volume-head
    assert count == 6

    volume = volume.detach()

    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)

    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 22
0
def flexvolume_mount_test(client, core_api, flexvolume, pod,
                          volume_size):  # NOQA
    pod_name = 'flexvolume-mount-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['containers'][0]['volumeMounts'][0]['name'] = \
        flexvolume['name']
    pod['spec']['volumes'] = [flexvolume]

    create_and_wait_pod(core_api, pod)

    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == flexvolume['name']
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == int(
        flexvolume["flexVolume"]["options"]["numberOfReplicas"])
    assert volumes[0]["state"] == "attached"
Exemplo n.º 23
0
def flexvolume_mount_test(client, core_api, flexvolume, pod, volume_size): # NOQA
    pod_name = 'flexvolume-mount-test'
    pod['metadata']['name'] = pod_name
    pod['spec']['containers'][0]['volumeMounts'][0]['name'] = \
        flexvolume['name']
    pod['spec']['volumes'] = [
        flexvolume
    ]

    create_and_wait_pod(core_api, pod)

    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == flexvolume['name']
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == int(
        flexvolume["flexVolume"]["options"]["numberOfReplicas"])
    assert volumes[0]["state"] == "attached"
Exemplo n.º 24
0
def test_recurring_job(clients, volume_name):  # NOQA
    for host_id, client in clients.iteritems():  # NOQA
        break

    volume = client.create_volume(name=volume_name, size=SIZE,
                                  numberOfReplicas=2)
    volume = common.wait_for_volume_detached(client, volume_name)

    jobs = create_jobs1()
    volume.recurringUpdate(jobs=jobs)

    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    # 5 minutes
    time.sleep(300)
    check_jobs1_result(volume)

    job_backup2 = {"name": "backup2", "cron": "* * * * *",
                   "task": "backup", "retain": 2}
    volume.recurringUpdate(jobs=[jobs[0], job_backup2])

    # 5 minutes
    time.sleep(300)

    snapshots = volume.snapshotList()
    count = 0
    for snapshot in snapshots:
        if snapshot["removed"] is False:
            count += 1
    # 2 from job_snap, 1 from job_backup, 2 from job_backup2, 1 volume-head
    assert count == 6

    volume = volume.detach()

    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)

    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 25
0
def test_recurring_job_in_volume_creation(clients, volume_name):  # NOQA
    """
    Test create volume with recurring jobs

    1. Create volume with recurring jobs though Longhorn API
    2. Verify the recurring jobs run correctly
    """
    for host_id, client in iter(clients.items()):  # NOQA
        break

    set_random_backupstore(client)

    # error when creating volume with duplicate jobs
    with pytest.raises(Exception) as e:
        client.create_volume(name=volume_name,
                             size=SIZE,
                             numberOfReplicas=2,
                             recurringJobs=create_jobs1() + create_jobs1())
    assert "duplicate job" in str(e.value)

    client.create_volume(name=volume_name,
                         size=SIZE,
                         numberOfReplicas=2,
                         recurringJobs=create_jobs1())
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    # 5 minutes
    time.sleep(300)
    check_jobs1_result(volume)

    volume = volume.detach()
    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 26
0
def test_provisioner_params(client, core_api, storage_class, pvc, pod):  # NOQA
    """
    Test that substituting different StorageClass parameters is reflected in
    the resulting PersistentVolumeClaim.

    Fixtures are torn down here in reverse order that they are specified as a
    parameter. Take caution when reordering test fixtures.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-params-test'
    volume_size = 2 * Gi
    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [
        create_pvc_spec(pvc['metadata']['name'])
    ]
    pvc['spec']['resources']['requests']['storage'] = \
        size_to_string(volume_size)
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    storage_class['parameters'] = {
        'numberOfReplicas': '2',
        'staleReplicaTimeout': '20'
    }

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == pvc_volume_name
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes[0]["state"] == "attached"
Exemplo n.º 27
0
def test_provisioner_tags(client, core_api, node_default_tags, storage_class,
                          pvc, pod):  # NOQA
    """
    Test that a StorageClass can properly provision a volume with requested
    Tags.
    """

    # Prepare pod and volume specs.
    pod_name = 'provisioner-tags-test'
    tag_spec = {
        "disk": ["ssd", "nvme"],
        "expected": 1,
        "node": ["storage", "main"]
    }

    pod['metadata']['name'] = pod_name
    pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])]
    pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME
    storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME
    storage_class['parameters']['diskSelector'] = 'ssd,nvme'
    storage_class['parameters']['nodeSelector'] = 'storage,main'
    volume_size = DEFAULT_VOLUME_SIZE * Gi

    create_storage(core_api, storage_class, pvc)
    create_and_wait_pod(core_api, pod)
    pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name'])

    # Confirm that the volume has all the correct parameters we gave it.
    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == pvc_volume_name
    assert volumes[0]["size"] == str(volume_size)
    assert volumes[0]["numberOfReplicas"] == \
        int(storage_class['parameters']['numberOfReplicas'])
    assert volumes[0]["state"] == "attached"
    check_volume_replicas(volumes[0], tag_spec, node_default_tags)
Exemplo n.º 28
0
def test_recurring_job(set_random_backupstore, client, volume_name):  # NOQA
    """
    Test recurring job

    1. Setup a random backupstore
    2. Create a volume.
    3. Create two jobs
        1 job 1: snapshot every one minute, retain 2
        1 job 2: backup every two minutes, retain 1
    4. Attach the volume.
       Wait until the 10th second since the beginning of an even minute
    5. Write some data. Sleep 2.5 minutes.
       Write some data. Sleep 2.5 minutes
    6. Verify we have 4 snapshots total
        1. 2 snapshots, 1 backup, 1 volume-head
    7. Update jobs to replace the backup job
        1. New backup job run every one minute, retain 2
    8. Write some data. Sleep 2.5 minutes.
       Write some data. Sleep 2.5 minutes
    9. We should have 6 snapshots
        1. 2 from job_snap, 1 from job_backup, 2 from job_backup2, 1
        volume-head
    10. Make sure there are exactly 4 completed backups.
        1. old backup job completed 2 backups
        2. new backup job completed 2 backups
    11. Make sure we have no backup in progress
    """
    '''
    The timeline looks like this:
    0   1   2   3   4   5   6   7   8   9   10     (minute)
    |W  |   | W |   |   |W  |   | W |   |   |      (write data)
    |   S   |   S   |   |   S   |   S   |   |      (job_snap)
    |   |   B   |   B   |   |   |   |   |   |      (job_backup1)
    |   |   |   |   |   |   B   |   B   |   |      (job_backup2)
    '''

    host_id = get_self_host_id()

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=2)
    volume = common.wait_for_volume_detached(client, volume_name)

    jobs = create_jobs1()
    volume.recurringUpdate(jobs=jobs)

    volume = volume.attach(hostId=host_id)
    volume = wait_for_volume_healthy(client, volume_name)

    # wait until the beginning of an even minute
    wait_until_begin_of_an_even_minute()
    # wait until the 20th second of an even minute
    # make sure that snapshot job happens before the backup job
    time.sleep(20)

    write_volume_random_data(volume)
    time.sleep(120)  # 2 minutes
    write_volume_random_data(volume)
    time.sleep(120)  # 2 minutes

    check_jobs1_result(volume)

    job_backup2 = {
        "name": "backup2",
        "cron": "* * * * *",
        "task": "backup",
        "retain": 2
    }
    volume.recurringUpdate(jobs=[jobs[0], job_backup2])

    write_volume_random_data(volume)
    time.sleep(120)  # 2 minutes
    write_volume_random_data(volume)
    time.sleep(120)  # 2 minutes

    snapshots = volume.snapshotList()
    count = 0
    for snapshot in snapshots:
        if snapshot.removed is False:
            count += 1
    # 2 from job_snap, 1 from job_backup, 2 from job_backup2, 1 volume-head
    assert count == 6

    complete_backup_number = 0
    in_progress_backup_number = 0
    volume = client.by_id_volume(volume_name)
    for b in volume.backupStatus:
        assert b.error == ""
        if b.state == "complete":
            complete_backup_number += 1
        elif b.state == "in_progress":
            in_progress_backup_number += 1

    # 2 completed backups from job_backup
    # 2 completed backups from job_backup2
    assert complete_backup_number == 4

    assert in_progress_backup_number == 0

    volume = volume.detach(hostId="")

    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)

    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 29
0
def test_upgrade(
        upgrade_longhorn_manager_repo_url,
        upgrade_longhorn_manager_repo_branch,
        upgrade_longhorn_manager_image,
        upgrade_longhorn_engine_image,
        upgrade_longhorn_instance_manager_image,
        upgrade_longhorn_share_manager_image,
        upgrade_longhorn_backing_image_manager_image,
        client,
        core_api,
        volume_name,
        csi_pv,  # NOQA
        pvc,
        pod_make,
        statefulset,
        storage_class):  # NOQA
    """
    Test Longhorn upgrade

    TODO
    The test will cover both volume has revision counter enabled and
    disabled cases.

    Prerequisite:
      - Disable Auto Salvage Setting

    1. Find the upgrade image tag
    2. Create a volume, generate and write data into the volume.
        1. Create a volume with revision counter enabled case.
        2. Create a volume with revision counter disabled case.
    3. Create a Pod using a volume, generate and write data
    4. Create a StatefulSet with 2 replicas,
       generate and write data to their volumes
    5. Keep all volumes attached
    6. Upgrade Longhorn system.
    7. Check Pod and StatefulSet didn't restart after upgrade
    8. Check All volumes data
    9. Write data to StatefulSet pods, and Attached volume
    10. Check data written to StatefulSet pods, and attached volume.
    11. Detach the volume, and Delete Pod, and
        StatefulSet to detach theirvolumes
    12. Upgrade all volumes engine images.
    13. Attach the volume, and recreate Pod, and StatefulSet
    14. Check All volumes data
    """
    longhorn_manager_repo = upgrade_longhorn_manager_repo_url
    longhorn_manager_branch = upgrade_longhorn_manager_repo_branch
    longhorn_manager_image = upgrade_longhorn_manager_image
    longhorn_engine_image = upgrade_longhorn_engine_image
    longhorn_instance_manager_image = upgrade_longhorn_instance_manager_image
    longhorn_share_manager_image = upgrade_longhorn_share_manager_image
    longhorn_backing_image_manager_image = \
        upgrade_longhorn_backing_image_manager_image

    host_id = get_self_host_id()
    pod_data_path = "/data/test"

    pod_volume_name = generate_volume_name()

    auto_salvage_setting = client.by_id_setting(SETTING_AUTO_SALVAGE)
    setting = client.update(auto_salvage_setting, value="false")

    assert setting.name == SETTING_AUTO_SALVAGE
    assert setting.value == "false"

    # Create Volume attached to a node.
    volume1 = create_and_check_volume(client, volume_name, size=SIZE)
    volume1.attach(hostId=host_id)
    volume1 = wait_for_volume_healthy(client, volume_name)
    volume1_data = write_volume_random_data(volume1)

    # Create Volume used by Pod
    pod_name, pv_name, pvc_name, pod_md5sum = \
        prepare_pod_with_data_in_mb(client, core_api, csi_pv, pvc,
                                    pod_make, pod_volume_name,
                                    data_path=pod_data_path,
                                    add_liveness_probe=False)

    # Create multiple volumes used by StatefulSet
    statefulset_name = 'statefulset-upgrade-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)
    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)
    statefulset_pod_info = get_statefulset_pod_info(core_api, statefulset)

    for sspod_info in statefulset_pod_info:
        sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api, sspod_info['pod_name'],
                              sspod_info['data'])

    # upgrade Longhorn
    assert longhorn_upgrade(longhorn_manager_repo, longhorn_manager_branch,
                            longhorn_manager_image, longhorn_engine_image,
                            longhorn_instance_manager_image,
                            longhorn_share_manager_image,
                            longhorn_backing_image_manager_image)

    client = get_longhorn_api_client()

    # wait for 1 minute before checking pod restarts
    time.sleep(60)

    pod = core_api.read_namespaced_pod(name=pod_name, namespace='default')
    assert pod.status.container_statuses[0].restart_count == 0

    for sspod_info in statefulset_pod_info:
        sspod = core_api.read_namespaced_pod(name=sspod_info['pod_name'],
                                             namespace='default')
        assert \
            sspod.status.container_statuses[0].restart_count == 0

    for sspod_info in statefulset_pod_info:
        resp = read_volume_data(core_api, sspod_info['pod_name'])
        assert resp == sspod_info['data']

    res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path)
    assert res_pod_md5sum == pod_md5sum

    check_volume_data(volume1, volume1_data)

    for sspod_info in statefulset_pod_info:
        sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE)
        write_pod_volume_data(core_api, sspod_info['pod_name'],
                              sspod_info['data'])

    for sspod_info in statefulset_pod_info:
        resp = read_volume_data(core_api, sspod_info['pod_name'])
        assert resp == sspod_info['data']

    volume1 = client.by_id_volume(volume_name)
    volume1_data = write_volume_random_data(volume1)
    check_volume_data(volume1, volume1_data)

    statefulset['spec']['replicas'] = replicas = 0
    apps_api = get_apps_api_client()

    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={'spec': {
            'replicas': replicas
        }})

    delete_and_wait_pod(core_api, pod_name)

    volume = client.by_id_volume(volume_name)
    volume.detach(hostId="")

    volumes = client.list_volume()
    for v in volumes:
        wait_for_volume_detached(client, v.name)

    engineimages = client.list_engine_image()
    for ei in engineimages:
        if ei.image == longhorn_engine_image:
            new_ei = ei

    volumes = client.list_volume()
    for v in volumes:
        volume = client.by_id_volume(v.name)
        volume.engineUpgrade(image=new_ei.image)

    statefulset['spec']['replicas'] = replicas = 2
    apps_api = get_apps_api_client()

    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={'spec': {
            'replicas': replicas
        }})

    wait_statefulset(statefulset)

    pod = pod_make(name=pod_name)
    pod['spec']['volumes'] = [create_pvc_spec(pvc_name)]
    create_and_wait_pod(core_api, pod)

    volume1 = client.by_id_volume(volume_name)
    volume1.attach(hostId=host_id)
    volume1 = wait_for_volume_healthy(client, volume_name)

    for sspod_info in statefulset_pod_info:
        resp = read_volume_data(core_api, sspod_info['pod_name'])
        assert resp == sspod_info['data']

    res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path)
    assert res_pod_md5sum == pod_md5sum

    check_volume_data(volume1, volume1_data)
Exemplo n.º 30
0
def test_statefulset_scaling(client, core_api, storage_class,
                             statefulset):  # NOQA
    """
    Test that scaling up a StatefulSet successfully provisions new volumes.

    1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
    2. Wait for pods to run.
    3. Verify the properities of volumes.
    4. Scale the StatefulSet to 3 replicas
    5. Wait for the new pod to become ready.
    6. Verify the new volume properties.
    """

    statefulset_name = 'statefulset-scaling-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        found = False
        for pod in pod_info:
            if v.name == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v.size == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v.numberOfReplicas == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v.state == 'attached'
    assert len(pod_info) == 0

    statefulset['spec']['replicas'] = replicas = 3
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={'spec': {
            'replicas': replicas
        }})
    for i in range(DEFAULT_POD_TIMEOUT):
        s_set = apps_api.read_namespaced_stateful_set(name=statefulset_name,
                                                      namespace='default')
        if s_set.status.ready_replicas == replicas:
            break
        time.sleep(DEFAULT_POD_INTERVAL)
    assert s_set.status.ready_replicas == replicas

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == replicas
    for v in volumes:
        found = False
        for pod in pod_info:
            if v.name == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v.size == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v.numberOfReplicas == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v.state == 'attached'
    assert len(pod_info) == 0
Exemplo n.º 31
0
def test_ha_simple_recovery(client, volume_name):  # NOQA
    # get a random client

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=2)
    volume = common.wait_for_volume_detached(client, volume_name)
    assert volume["name"] == volume_name
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    assert volume["created"] != ""

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    volume = client.by_id_volume(volume_name)
    assert volume["endpoint"] == DEV_PATH + volume_name

    assert len(volume["replicas"]) == 2
    replica0 = volume["replicas"][0]
    assert replica0["name"] != ""

    replica1 = volume["replicas"][1]
    assert replica1["name"] != ""

    data = write_random_data(volume["endpoint"])

    volume = volume.replicaRemove(name=replica0["name"])

    # wait until we saw a replica starts rebuilding
    new_replica_found = False
    for i in range(RETRY_COUNTS):
        v = client.by_id_volume(volume_name)
        for r in v["replicas"]:
            if r["name"] != replica0["name"] and \
                    r["name"] != replica1["name"]:
                new_replica_found = True
                break
        if new_replica_found:
            break
        time.sleep(RETRY_ITERVAL)
    assert new_replica_found

    volume = common.wait_for_volume_healthy(client, volume_name)

    volume = client.by_id_volume(volume_name)
    assert volume["state"] == common.VOLUME_STATE_ATTACHED
    assert volume["robustness"] == common.VOLUME_ROBUSTNESS_HEALTHY
    assert len(volume["replicas"]) >= 2

    found = False
    for replica in volume["replicas"]:
        if replica["name"] == replica1["name"]:
            found = True
            break
    assert found

    check_data(volume['endpoint'], data)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 32
0
def test_recurring_job(clients, volume_name):  # NOQA
    """
    Test recurring job

    1. Setup a random backupstore
    2. Create a volume.
    3. Create two jobs
        1 job 1: snapshot every one minute, retain 2
        1 job 2: backup every two minutes, retain 1
    4. Attach the volume.
    5. Sleep for 5 minutes
    6. Verify we have 4 snapshots total
        1. 2 snapshots, 1 backup, 1 volume-head
    7. Update jobs to replace the backup job
        1. New backup job run every one minute, retain 2
    8. Sleep for 5 minutes.
    9. We should have 6 snapshots
        1. 2 from job_snap, 1 from job_backup, 2 from job_backup2, 1
        volume-head
    10. Make sure we have no more than 5 backups.
        1. old backup job may have at most 1 backups
        2. new backup job may have at most 3 backups
        3. FIXME: Seems we should have at most 4 backups?
    11. Make sure we have no more than 2 backups in progress
        1. FIXME: Seems we should have at most 1 from the new job?
    """
    for host_id, client in iter(clients.items()):  # NOQA
        break

    set_random_backupstore(client)

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=2)
    volume = common.wait_for_volume_detached(client, volume_name)

    jobs = create_jobs1()
    volume.recurringUpdate(jobs=jobs)

    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    # 5 minutes
    time.sleep(300)
    check_jobs1_result(volume)

    job_backup2 = {
        "name": "backup2",
        "cron": "* * * * *",
        "task": "backup",
        "retain": 2
    }
    volume.recurringUpdate(jobs=[jobs[0], job_backup2])

    # 5 minutes
    time.sleep(300)

    snapshots = volume.snapshotList()
    count = 0
    for snapshot in snapshots:
        if snapshot.removed is False:
            count += 1
    # 2 from job_snap, 1 from job_backup, 2 from job_backup2, 1 volume-head
    assert count == 6

    complete_backup_number = 0
    in_progress_backup_number = 0
    volume = client.by_id_volume(volume_name)
    for b in volume.backupStatus:
        assert b.error == ""
        if b.state == "complete":
            complete_backup_number += 1
        elif b.state == "in_progress":
            in_progress_backup_number += 1
    assert complete_backup_number <= MAX_BACKUP_STATUS_SIZE
    # 1 from job_backup, 1 from job_backup2
    assert in_progress_backup_number <= 2

    volume = volume.detach()

    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)

    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 33
0
def ha_backup_deletion_recovery_test(client, volume_name, size, base_image=""):  # NOQA
    volume = client.create_volume(name=volume_name, size=size,
                                  numberOfReplicas=2, baseImage=base_image)
    volume = common.wait_for_volume_detached(client, volume_name)

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)
    # test backupTarget for multiple settings
    backupstores = common.get_backupstore_url()
    for backupstore in backupstores:
        if common.is_backupTarget_s3(backupstore):
            backupsettings = backupstore.split("$")
            setting = client.update(setting, value=backupsettings[0])
            assert setting["value"] == backupsettings[0]

            credential = client.by_id_setting(
                    common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)
            credential = client.update(credential, value=backupsettings[1])
            assert credential["value"] == backupsettings[1]
        else:
            setting = client.update(setting, value=backupstore)
            assert setting["value"] == backupstore
            credential = client.by_id_setting(
                    common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)
            credential = client.update(credential, value="")
            assert credential["value"] == ""

        data = write_volume_random_data(volume)
        snap2 = volume.snapshotCreate()
        volume.snapshotCreate()

        volume.snapshotBackup(name=snap2["name"])

        _, b = common.find_backup(client, volume_name, snap2["name"])

        res_name = common.generate_volume_name()
        res_volume = client.create_volume(name=res_name, size=size,
                                          numberOfReplicas=2,
                                          fromBackup=b["url"])
        res_volume = common.wait_for_volume_detached(client, res_name)
        res_volume = res_volume.attach(hostId=host_id)
        res_volume = common.wait_for_volume_healthy(client, res_name)
        check_volume_data(res_volume, data)

        snapshots = res_volume.snapshotList()
        # only the backup snapshot + volume-head
        assert len(snapshots) == 2
        backup_snapshot = ""
        for snap in snapshots:
            if snap["name"] != "volume-head":
                backup_snapshot = snap["name"]
        assert backup_snapshot != ""

        res_volume.snapshotCreate()
        snapshots = res_volume.snapshotList()
        assert len(snapshots) == 3

        res_volume.snapshotDelete(name=backup_snapshot)
        res_volume.snapshotPurge()
        snapshots = res_volume.snapshotList()
        assert len(snapshots) == 2

        ha_rebuild_replica_test(client, res_name)

        res_volume = res_volume.detach()
        res_volume = common.wait_for_volume_detached(client, res_name)

        client.delete(res_volume)
        common.wait_for_volume_delete(client, res_name)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Exemplo n.º 34
0
def test_statefulset_scaling(client, core_api, storage_class, statefulset):  # NOQA
    """
    Test that scaling up a StatefulSet successfully provisions new volumes.
    """

    statefulset_name = 'statefulset-scaling-test'
    update_statefulset_manifests(statefulset, storage_class, statefulset_name)

    create_storage_class(storage_class)
    create_and_wait_statefulset(statefulset)

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == statefulset['spec']['replicas']
    for v in volumes:
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    assert len(pod_info) == 0

    statefulset['spec']['replicas'] = replicas = 3
    apps_api = get_apps_api_client()
    apps_api.patch_namespaced_stateful_set(
        name=statefulset_name,
        namespace='default',
        body={
            'spec': {
                'replicas': replicas
            }
        })
    for i in range(DEFAULT_POD_TIMEOUT):
        s_set = apps_api.read_namespaced_stateful_set(
            name=statefulset_name,
            namespace='default')
        if s_set.status.ready_replicas == replicas:
            break
        time.sleep(DEFAULT_POD_INTERVAL)
    assert s_set.status.ready_replicas == replicas

    pod_info = get_statefulset_pod_info(core_api, statefulset)

    volumes = client.list_volume()
    assert len(volumes) == replicas
    for v in volumes:
        found = False
        for pod in pod_info:
            if v['name'] == pod['pv_name']:
                found = True
                break
        assert found
        pod_info.remove(pod)

        assert v['size'] == str(DEFAULT_VOLUME_SIZE * Gi)
        assert v['numberOfReplicas'] == \
            int(storage_class['parameters']['numberOfReplicas'])
        assert v['state'] == 'attached'
    assert len(pod_info) == 0