def migration_rollback_test(clients, volume_name, base_image=""):  # NOQA
    client = get_random_client(clients)
    hosts = clients.keys()
    host1 = hosts[0]
    host2 = hosts[1]

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=REPLICA_COUNT,
                                  baseImage=base_image)
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host1)
    volume = common.wait_for_volume_healthy(client, volume_name)

    volume = volume.migrationStart(nodeId=host2)
    attached_nodes = get_volume_attached_nodes(volume)
    assert host1 in attached_nodes
    assert volume["migrationNodeID"] == host2

    volume = common.wait_for_volume_migration_ready(client, volume_name)
    volume = volume.migrationRollback()
    volume = common.wait_for_volume_migration_node(client, volume_name, host1)
    assert volume["migrationNodeID"] == ""

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)
    client.delete(volume)

    wait_for_volume_delete(client, volume_name)
def migration_confirm_test(clients, volume_name, base_image=""):  # NOQA
    client = get_random_client(clients)
    hosts = clients.keys()
    host1 = hosts[0]
    host2 = hosts[1]

    volume = client.create_volume(name=volume_name, size=SIZE,
                                  numberOfReplicas=REPLICA_COUNT,
                                  baseImage=base_image)
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host1)
    volume = common.wait_for_volume_healthy(client, volume_name)

    volume = volume.migrationStart(nodeId=host2)
    attached_nodes = get_volume_attached_nodes(volume)
    assert host1 in attached_nodes
    assert volume["migrationNodeID"] == host2
    with pytest.raises(Exception) as e:
        volume.migrationConfirm()
    assert "migration is not ready" in str(e.value)

    volume = common.wait_for_volume_migration_ready(client, volume_name)
    volume = volume.migrationConfirm()
    volume = common.wait_for_volume_migration_node(client,
                                                   volume_name,
                                                   host2)
    assert volume["migrationNodeID"] == ""

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)
    client.delete(volume)

    wait_for_volume_delete(client, volume_name)
Beispiel #3
0
def test_replica_zone_anti_affinity(client, core_api, volume_name,
                                    k8s_node_zone_tags):  # NOQA
    """
    Test replica scheduler with zone anti-affinity

    1. Set zone anti-affinity to hard.
    2. Label nodes 1 & 2 with same zone label "zone1".
    Label node 3 with zone label "zone2".
    3. Create a volume with 3 replicas.
    4. Wait for volume condition `scheduled` to be false.
    5. Label node 2 with zone label "zone3".
    6. Wait for volume condition `scheduled` to be success.
    7. Clear the volume.
    8. Set zone anti-affinity to soft.
    9. Change the zone labels on node 1 & 2 & 3 to "zone1".
    10. Create a volume.
    11. Wait for volume condition `scheduled` to be success.
    12. Clean up the replica count, the zone labels and the volume.
    """

    wait_longhorn_node_zone_updated(client)

    replica_node_soft_anti_affinity_setting = \
        client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)
    client.update(replica_node_soft_anti_affinity_setting, value="false")

    replica_zone_soft_anti_affinity_setting = \
        client.by_id_setting(SETTING_REPLICA_ZONE_SOFT_ANTI_AFFINITY)
    client.update(replica_zone_soft_anti_affinity_setting, value="false")

    volume = create_and_check_volume(client, volume_name)

    lh_nodes = client.list_node()

    count = 0
    for node in lh_nodes:
        count += 1
        set_k8s_node_zone_label(core_api, node.name, "lh-zone" + str(count))

    wait_longhorn_node_zone_updated(client)

    wait_for_volume_condition_scheduled(client, volume_name, "status",
                                        CONDITION_STATUS_TRUE)

    replica_zone_soft_anti_affinity_setting = \
        client.by_id_setting(SETTING_REPLICA_ZONE_SOFT_ANTI_AFFINITY)
    client.update(replica_zone_soft_anti_affinity_setting, value="true")

    volume = client.by_id_volume(volume_name)
    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    for node in lh_nodes:
        set_k8s_node_zone_label(core_api, node.name, "lh-zone1")

    wait_longhorn_node_zone_updated(client)

    volume = create_and_check_volume(client, volume_name)
    wait_for_volume_condition_scheduled(client, volume_name, "status",
                                        CONDITION_STATUS_TRUE)
def test_recurring_job_in_volume_creation(clients, volume_name):  # NOQA
    for host_id, client in clients.iteritems():  # NOQA
        break

    # error when creating volume with duplicate jobs
    with pytest.raises(Exception) as e:
        client.create_volume(name=volume_name, size=SIZE,
                             numberOfReplicas=2,
                             recurringJobs=create_jobs1() + create_jobs1())
    assert "duplicate job" in str(e.value)

    client.create_volume(name=volume_name, size=SIZE,
                         numberOfReplicas=2, recurringJobs=create_jobs1())
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    # 5 minutes
    time.sleep(300)
    check_jobs1_result(volume)

    volume = volume.detach()
    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
def migration_confirm_test(clients, volume_name, base_image=""):  # NOQA
    client = get_random_client(clients)
    hosts = clients.keys()
    host1 = hosts[0]
    host2 = hosts[1]

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=REPLICA_COUNT,
                                  baseImage=base_image)
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host1)
    volume = common.wait_for_volume_healthy(client, volume_name)

    volume = volume.migrationStart(nodeId=host2)
    attached_nodes = get_volume_attached_nodes(volume)
    assert host1 in attached_nodes
    assert volume["migrationNodeID"] == host2
    with pytest.raises(Exception) as e:
        volume.migrationConfirm()
    assert "migration is not ready" in str(e.value)

    volume = common.wait_for_volume_migration_ready(client, volume_name)
    volume = volume.migrationConfirm()
    volume = common.wait_for_volume_migration_node(client, volume_name, host2)
    assert volume["migrationNodeID"] == ""

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)
    client.delete(volume)

    wait_for_volume_delete(client, volume_name)
def migration_rollback_test(clients, volume_name, base_image=""):  # NOQA
    client = get_random_client(clients)
    hosts = clients.keys()
    host1 = hosts[0]
    host2 = hosts[1]

    volume = client.create_volume(name=volume_name, size=SIZE,
                                  numberOfReplicas=REPLICA_COUNT,
                                  baseImage=base_image)
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host1)
    volume = common.wait_for_volume_healthy(client, volume_name)

    volume = volume.migrationStart(nodeId=host2)
    attached_nodes = get_volume_attached_nodes(volume)
    assert host1 in attached_nodes
    assert volume["migrationNodeID"] == host2

    volume = common.wait_for_volume_migration_ready(client, volume_name)
    volume = volume.migrationRollback()
    volume = common.wait_for_volume_migration_node(client, volume_name, host1)
    assert volume["migrationNodeID"] == ""

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)
    client.delete(volume)

    wait_for_volume_delete(client, volume_name)
def test_volume_multinode(clients):  # NOQA
    hosts = clients.keys()

    volume = clients[hosts[0]].create_volume(name=VOLUME_NAME,
                                             size=SIZE,
                                             numberOfReplicas=2)
    volume = wait_for_volume_state(clients[hosts[0]], VOLUME_NAME, "detached")

    for host_id in hosts:
        volume = volume.attach(hostId=host_id)
        volume = wait_for_volume_state(clients[hosts[1]], VOLUME_NAME,
                                       "healthy")
        assert volume["state"] == "healthy"
        assert volume["controller"]["hostId"] == host_id
        volume = volume.detach()
        volume = wait_for_volume_state(clients[hosts[2]], VOLUME_NAME,
                                       "detached")

    volume = volume.attach(hostId=hosts[0])
    volume = wait_for_volume_state(clients[hosts[1]], VOLUME_NAME, "healthy")
    assert volume["state"] == "healthy"
    assert volume["controller"]["hostId"] == hosts[0]

    snapshot_test(clients[hosts[1]])
    backup_test(clients[hosts[2]], hosts[2])

    clients[hosts[0]].delete(volume)
    wait_for_volume_delete(clients[hosts[1]], VOLUME_NAME)

    volumes = clients[hosts[2]].list_volume()
    assert len(volumes) == 0
Beispiel #8
0
def test_replica_scheduler_no_disks(client):  # NOQA
    nodes = client.list_node()
    # delete all disks on each node
    for node in nodes:
        disks = node["disks"]
        name = node["name"]
        # set allowScheduling to false
        for fsid, disk in disks.iteritems():
            disk["allowScheduling"] = False
        update_disks = get_update_disks(disks)
        node = node.diskUpdate(disks=update_disks)
        for fsid, disk in node["disks"].iteritems():
            # wait for node controller update disk status
            wait_for_disk_status(client, name, fsid,
                                 "allowScheduling", False)
            wait_for_disk_status(client, name, fsid,
                                 "storageScheduled", 0)

        node = client.by_id_node(name)
        for fsid, disk in node["disks"].iteritems():
            assert not disk["allowScheduling"]
        node = node.diskUpdate(disks=[])
        node = common.wait_for_disk_update(client, name, 0)
        assert len(node["disks"]) == 0

    # test there's no disk fit for volume
    vol_name = common.generate_volume_name()
    volume = client.create_volume(name=vol_name,
                                  size=SIZE, numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(client, vol_name,
                                                        "status",
                                                        CONDITION_STATUS_FALSE)
    client.delete(volume)
    common.wait_for_volume_delete(client, vol_name)
Beispiel #9
0
def test_ha_prohibit_deleting_last_replica(client, volume_name):  # NOQA
    volume = client.create_volume(name=volume_name, size=SIZE,
                                  numberOfReplicas=1)
    volume = common.wait_for_volume_detached(client, volume_name)
    assert volume["name"] == volume_name
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 1
    assert volume["state"] == "detached"
    assert volume["created"] != ""

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    assert len(volume["replicas"]) == 1
    replica0 = volume["replicas"][0]

    with pytest.raises(Exception) as e:
        volume.replicaRemove(name=replica0["name"])
    assert "no other healthy replica available" in str(e.value)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #10
0
def test_replica_scheduler_exceed_over_provisioning(client):  # NOQA
    over_provisioning_setting = client.by_id_setting(
        SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE)
    old_provisioning_setting = over_provisioning_setting["value"]
    # set storage over provisioning percentage to 100
    over_provisioning_setting = client.update(over_provisioning_setting,
                                              value="100")

    # test exceed over provisioning limit couldn't be scheduled
    nodes = client.list_node()
    for node in nodes:
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            disk["storageReserved"] = \
                disk["storageMaximum"] - 1*Gi
        update_disks = get_update_disks(disks)
        node = node.diskUpdate(disks=update_disks)
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            wait_for_disk_status(client, node["name"],
                                 fsid, "storageReserved",
                                 disk["storageMaximum"] - 1*Gi)

    vol_name = common.generate_volume_name()
    volume = client.create_volume(name=vol_name,
                                  size=str(2*Gi), numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(client, vol_name,
                                                        "status",
                                                        CONDITION_STATUS_FALSE)
    client.delete(volume)
    common.wait_for_volume_delete(client, vol_name)
    client.update(over_provisioning_setting, value=old_provisioning_setting)
def test_delete_with_static_pv(client, core_api, volume_name):  # NOQA
    """
    Test that deleting a Volume with related static Persistent Volume and
    Persistent Volume Claim resources successfully deletes the Volume and
    cleans up those resources.

    1. Create a Volume in Longhorn.
    2. Create a static Persistent Volume and Persistent Volume Claim for the
    Volume through Longhorn.
    3. Wait for the Kubernetes Status to indicate the existence of these
    resources.
    4. Attempt deletion of the Volume.
    5. Verify that the Volume and its associated resources have been deleted.
    """
    volume = create_and_check_volume(client, volume_name)
    pv_name = 'pv-' + volume_name
    pvc_name = 'pvc-' + volume_name
    create_pv_for_volume(client, core_api, volume, pv_name)
    create_pvc_for_volume(client, core_api, volume, pvc_name)

    ks = {
        'pvName': pv_name,
        'pvStatus': 'Bound',
        'namespace': 'default',
        'pvcName': pvc_name,
        'lastPVCRefAt': '',
        'lastPodRefAt': '',
    }
    wait_volume_kubernetes_status(client, volume_name, ks)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)
    wait_delete_pv(core_api, pv_name)
    wait_delete_pvc(core_api, pvc_name)
Beispiel #12
0
def ha_simple_recovery_test(client, volume_name, size, base_image=""):  # NOQA
    volume = client.create_volume(name=volume_name, size=size,
                                  numberOfReplicas=2, baseImage=base_image)
    volume = common.wait_for_volume_detached(client, volume_name)
    assert volume["name"] == volume_name
    assert volume["size"] == size
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    assert volume["created"] != ""
    assert volume["baseImage"] == base_image

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    ha_rebuild_replica_test(client, volume_name)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #13
0
def test_replica_scheduler_no_disks(client):  # NOQA
    nodes = client.list_node()
    # delete all disks on each node
    for node in nodes:
        disks = node["disks"]
        name = node["name"]
        # set allowScheduling to false
        for fsid, disk in disks.iteritems():
            disk["allowScheduling"] = False
        update_disks = get_update_disks(disks)
        node = node.diskUpdate(disks=update_disks)
        for fsid, disk in node["disks"].iteritems():
            # wait for node controller update disk status
            wait_for_disk_status(client, name, fsid, "allowScheduling", False)
            wait_for_disk_status(client, name, fsid, "storageScheduled", 0)

        node = client.by_id_node(name)
        for fsid, disk in node["disks"].iteritems():
            assert not disk["allowScheduling"]
        node = node.diskUpdate(disks=[])
        node = common.wait_for_disk_update(client, name, 0)
        assert len(node["disks"]) == 0

    # test there's no disk fit for volume
    vol_name = common.generate_volume_name()
    volume = client.create_volume(name=vol_name,
                                  size=SIZE,
                                  numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(
        client, vol_name, "status", CONDITION_STATUS_FALSE)
    client.delete(volume)
    common.wait_for_volume_delete(client, vol_name)
Beispiel #14
0
def test_volume_update_replica_count(clients, volume_name):  # NOQA
    for host_id, client in clients.iteritems():
        break

    replica_count = 3
    volume = create_and_check_volume(client, volume_name, replica_count)

    volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    replica_count = 5
    volume = volume.updateReplicaCount(replicaCount=replica_count)
    volume = common.wait_for_volume_degraded(client, volume_name)
    volume = common.wait_for_volume_healthy(client, volume_name)
    assert len(volume["replicas"]) == replica_count

    old_replica_count = replica_count
    replica_count = 2
    volume = volume.updateReplicaCount(replicaCount=replica_count)
    volume = common.wait_for_volume_healthy(client, volume_name)
    assert len(volume["replicas"]) == old_replica_count

    volume.replicaRemove(name=volume["replicas"][0]["name"])
    volume.replicaRemove(name=volume["replicas"][1]["name"])
    volume.replicaRemove(name=volume["replicas"][2]["name"])

    volume = common.wait_for_volume_replica_count(client, volume_name,
                                                  replica_count)
    volume = common.wait_for_volume_healthy(client, volume_name)
    assert len(volume["replicas"]) == replica_count

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)
Beispiel #15
0
def test_recurring_job_in_volume_creation(clients, volume_name):  # NOQA
    for host_id, client in clients.iteritems():  # NOQA
        break

    # error when creating volume with duplicate jobs
    with pytest.raises(Exception) as e:
        client.create_volume(name=volume_name,
                             size=SIZE,
                             numberOfReplicas=2,
                             recurringJobs=create_jobs1() + create_jobs1())
    assert "duplicate job" in str(e.value)

    client.create_volume(name=volume_name,
                         size=SIZE,
                         numberOfReplicas=2,
                         recurringJobs=create_jobs1())
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    # 5 minutes
    time.sleep(300)
    check_jobs1_result(volume)

    volume = volume.detach()
    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #16
0
def test_replica_scheduler_exceed_over_provisioning(client):  # NOQA
    over_provisioning_setting = client.by_id_setting(
        SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE)
    old_provisioning_setting = over_provisioning_setting["value"]
    # set storage over provisioning percentage to 100
    over_provisioning_setting = client.update(over_provisioning_setting,
                                              value="100")

    # test exceed over provisioning limit couldn't be scheduled
    nodes = client.list_node()
    for node in nodes:
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            disk["storageReserved"] = \
                disk["storageMaximum"] - 1*Gi
        update_disks = get_update_disks(disks)
        node = node.diskUpdate(disks=update_disks)
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            wait_for_disk_status(client, node["name"], fsid, "storageReserved",
                                 disk["storageMaximum"] - 1 * Gi)

    vol_name = common.generate_volume_name()
    volume = client.create_volume(name=vol_name,
                                  size=str(2 * Gi),
                                  numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(
        client, vol_name, "status", CONDITION_STATUS_FALSE)
    client.delete(volume)
    common.wait_for_volume_delete(client, vol_name)
    client.update(over_provisioning_setting, value=old_provisioning_setting)
def test_tag_scheduling_failure(client, node_default_tags):  # NOQA
    """
    Test that scheduling fails if no Nodes/Disks with the requested Tags are
    available.

    Case 1:
    Validate that if specifying nonexist tags in volume, API call will fail.

    Case 2:

    1. Specify existing but no node or disk can unsatisfied tags.
    2. Validate the volume will failed the scheduling
    """
    invalid_tag_cases = [
        # Only one Disk Tag exists.
        {
            "disk": ["doesnotexist", "ssd"],
            "node": []
        },
        # Only one Node Tag exists.
        {
            "disk": [],
            "node": ["doesnotexist", "main"]
        }
    ]
    for tags in invalid_tag_cases:
        volume_name = generate_volume_name()  # NOQA
        with pytest.raises(Exception) as e:
            client.create_volume(name=volume_name,
                                 size=SIZE,
                                 numberOfReplicas=3,
                                 diskSelector=tags["disk"],
                                 nodeSelector=tags["node"])
        assert "does not exist" in str(e.value)

    unsatisfied_tag_cases = [{
        "disk": [],
        "node": ["main", "fallback"]
    }, {
        "disk": ["ssd", "m2"],
        "node": []
    }]
    for tags in unsatisfied_tag_cases:
        volume_name = generate_volume_name()
        client.create_volume(name=volume_name,
                             size=SIZE,
                             numberOfReplicas=3,
                             diskSelector=tags["disk"],
                             nodeSelector=tags["node"])
        volume = wait_for_volume_detached(client, volume_name)
        assert volume.diskSelector == tags["disk"]
        assert volume.nodeSelector == tags["node"]
        wait_scheduling_failure(client, volume_name)

        client.delete(volume)
        wait_for_volume_delete(client, volume.name)
        volumes = client.list_volume()
        assert len(volumes) == 0
Beispiel #18
0
def test_ha_simple_recovery(clients):  # NOQA
    # get a random client
    for host_id, client in clients.iteritems():
        break

    volume = client.create_volume(name=VOLUME_NAME,
                                  size=SIZE,
                                  numberOfReplicas=2)
    volume = wait_for_volume_state(client, VOLUME_NAME, "detached")
    assert volume["name"] == VOLUME_NAME
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    assert volume["created"] != ""

    volume = volume.attach(hostId=host_id)
    volume = wait_for_volume_state(client, VOLUME_NAME, "healthy")

    volume = client.by_id_volume(VOLUME_NAME)
    #    assert volume["endpoint"] == DEV_PATH + VOLUME_NAME

    assert len(volume["replicas"]) == 2
    replica0 = volume["replicas"][0]
    assert replica0["name"] != ""

    replica1 = volume["replicas"][1]
    assert replica1["name"] != ""

    volume = volume.replicaRemove(name=replica0["name"])
    assert len(volume["replicas"]) == 1
    volume = wait_for_volume_state(client, VOLUME_NAME, "degraded")

    volume = wait_for_volume_state(client, VOLUME_NAME, "healthy")

    volume = client.by_id_volume(VOLUME_NAME)
    assert volume["state"] == "healthy"
    assert len(volume["replicas"]) == 2

    new_replica0 = volume["replicas"][0]
    new_replica1 = volume["replicas"][1]

    assert (replica1["name"] == new_replica0["name"]
            or replica1["name"] == new_replica1["name"])

    volume = volume.detach()
    volume = wait_for_volume_state(client, VOLUME_NAME, "detached")

    client.delete(volume)
    wait_for_volume_delete(client, VOLUME_NAME)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #19
0
def volume_iscsi_basic_test(clients, volume_name, base_image=""):  # NOQA
    # get a random client
    for host_id, client in clients.iteritems():
        break

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=3,
                                  frontend="iscsi",
                                  baseImage=base_image)
    assert volume["name"] == volume_name
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 3
    assert volume["frontend"] == "iscsi"
    assert volume["baseImage"] == base_image

    volume = common.wait_for_volume_detached(client, volume_name)
    assert len(volume["replicas"]) == 3

    assert volume["state"] == "detached"
    assert volume["created"] != ""

    volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == volume["name"]
    assert volumes[0]["size"] == volume["size"]
    assert volumes[0]["numberOfReplicas"] == volume["numberOfReplicas"]
    assert volumes[0]["state"] == volume["state"]
    assert volumes[0]["created"] == volume["created"]
    assert volumes[0]["frontend"] == "iscsi"
    endpoint = get_volume_endpoint(volumes[0])
    assert endpoint.startswith("iscsi://")

    try:
        dev = iscsi_login(endpoint)
        volume_rw_test(dev)
    finally:
        iscsi_logout(endpoint)

    volume = volume.detach()

    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)

    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #20
0
def backing_image_basic_operation_test(client, volume_name, bi_name,
                                       bi_url):  # NOQA
    """
    Test Backing Image APIs.

    1. Create a backing image.
    2. Create and attach a Volume with the backing image set.
    3. Verify that the all disk states in the backing image are "downloaded".
    4. Try to use the API to manually clean up one disk for the backing image
       but get failed.
    5. Try to use the API to directly delete the backing image
       but get failed.
    6. Delete the volume.
    7. Use the API to manually clean up one disk for the backing image
    8. Delete the backing image.
    """

    volume = create_and_check_volume(client, volume_name, 3,
                                     str(BACKING_IMAGE_EXT4_SIZE), bi_name)
    lht_host_id = get_self_host_id()
    volume.attach(hostId=lht_host_id)
    volume = wait_for_volume_healthy(client, volume_name)
    assert volume.backingImage == bi_name
    assert volume.size == str(BACKING_IMAGE_EXT4_SIZE)

    random_disk_id = ""
    backing_image = client.by_id_backing_image(bi_name)
    assert backing_image.sourceType == BACKING_IMAGE_SOURCE_TYPE_DOWNLOAD
    assert backing_image.parameters["url"] == bi_url
    assert backing_image.currentChecksum != ""
    assert not backing_image.deletionTimestamp
    assert len(backing_image.diskFileStatusMap) == 3
    for disk_id, status in iter(backing_image.diskFileStatusMap.items()):
        assert status.state == "ready"
        random_disk_id = disk_id
    assert random_disk_id != ''

    with pytest.raises(Exception):
        backing_image.backingImageCleanup(disks=[random_disk_id])
    with pytest.raises(Exception):
        client.delete(backing_image)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    backing_image = client.by_id_backing_image(bi_name)
    backing_image.backingImageCleanup(disks=[random_disk_id])
    backing_image = wait_for_backing_image_disk_cleanup(
        client, bi_name, random_disk_id)
    client.delete(backing_image)
Beispiel #21
0
def test_csi_expansion_with_size_round_up(client, core_api):  # NOQA
    """
    test expand longhorn volume

    1. Create longhorn volume with size '1Gi'
    2. Attach, write data, and detach
    3. Expand volume size to '2000000000/2G' and
        check if size round up '2000683008'
    4. Attach, write data, and detach
    5. Expand volume size to '2Gi' and check if size is '2147483648'
    6. Attach, write data, and detach
    """

    volume_name = generate_volume_name()
    volume = create_and_check_volume(client, volume_name, 2, str(1 * Gi))

    self_hostId = get_self_host_id()
    volume.attach(hostId=self_hostId, disableFrontend=False)
    volume = wait_for_volume_healthy(client, volume_name)
    test_data = write_volume_random_data(volume)
    volume.detach(hostId="")
    volume = wait_for_volume_detached(client, volume_name)

    volume.expand(size="2000000000")
    wait_for_volume_expansion(client, volume_name)
    volume = client.by_id_volume(volume_name)
    assert volume.size == "2000683008"

    self_hostId = get_self_host_id()
    volume.attach(hostId=self_hostId, disableFrontend=False)
    volume = wait_for_volume_healthy(client, volume_name)
    check_volume_data(volume, test_data, False)
    test_data = write_volume_random_data(volume)
    volume.detach(hostId="")
    volume = wait_for_volume_detached(client, volume_name)

    volume.expand(size=str(2 * Gi))
    wait_for_volume_expansion(client, volume_name)
    volume = client.by_id_volume(volume_name)
    assert volume.size == "2147483648"

    self_hostId = get_self_host_id()
    volume.attach(hostId=self_hostId, disableFrontend=False)
    volume = wait_for_volume_healthy(client, volume_name)
    check_volume_data(volume, test_data, False)
    volume.detach(hostId="")
    volume = wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)
Beispiel #22
0
def test_setting_default_replica_count(clients, volume_name):  # NOQA
    client = get_random_client(clients)
    setting = client.by_id_setting(common.SETTING_DEFAULT_REPLICA_COUNT)
    old_value = setting["value"]
    setting = client.update(setting, value="5")

    volume = client.create_volume(name=volume_name, size=SIZE)
    volume = common.wait_for_volume_detached(client, volume_name)
    assert len(volume["replicas"]) == int(setting.value)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    setting = client.update(setting, value=old_value)
Beispiel #23
0
def test_ha_salvage(client, volume_name):  # NOQA
    # get a random client

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=2)
    volume = common.wait_for_volume_detached(client, volume_name)
    assert volume["name"] == volume_name
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    assert volume["created"] != ""

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    assert len(volume["replicas"]) == 2
    replica0_name = volume["replicas"][0]["name"]
    replica1_name = volume["replicas"][1]["name"]

    data = write_random_data(volume["endpoint"])

    common.k8s_delete_replica_pods_for_volume(volume_name)

    volume = common.wait_for_volume_faulted(client, volume_name)
    assert len(volume["replicas"]) == 2
    assert volume["replicas"][0]["failedAt"] != ""
    assert volume["replicas"][1]["failedAt"] != ""

    volume.salvage(names=[replica0_name, replica1_name])

    volume = common.wait_for_volume_detached(client, volume_name)
    assert len(volume["replicas"]) == 2
    assert volume["replicas"][0]["failedAt"] == ""
    assert volume["replicas"][1]["failedAt"] == ""

    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    check_data(volume["endpoint"], data)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #24
0
def test_recurring_job_in_volume_creation(set_random_backupstore, client,
                                          volume_name):  # NOQA
    """
    Test create volume with recurring jobs

    1. Create volume with recurring jobs though Longhorn API
    2. Verify the recurring jobs run correctly
    """
    host_id = get_self_host_id()

    # error when creating volume with duplicate jobs
    with pytest.raises(Exception) as e:
        client.create_volume(name=volume_name,
                             size=SIZE,
                             numberOfReplicas=2,
                             recurringJobs=create_jobs1() + create_jobs1())
    assert "duplicate job" in str(e.value)

    client.create_volume(name=volume_name,
                         size=SIZE,
                         numberOfReplicas=2,
                         recurringJobs=create_jobs1())
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host_id)
    volume = wait_for_volume_healthy(client, volume_name)

    # wait until the beginning of an even minute
    wait_until_begin_of_an_even_minute()
    # wait until the 10th second of an even minute
    # to avoid writing data at the same time backup is taking
    time.sleep(10)

    write_volume_random_data(volume)
    time.sleep(150)  # 2.5 minutes
    write_volume_random_data(volume)
    time.sleep(150)  # 2.5 minutes

    check_jobs1_result(volume)

    volume = volume.detach(hostId="")
    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #25
0
def test_recurring_job(clients, volume_name):  # NOQA
    for host_id, client in clients.iteritems():  # NOQA
        break

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=2)
    volume = common.wait_for_volume_detached(client, volume_name)

    jobs = create_jobs1()
    volume.recurringUpdate(jobs=jobs)

    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    # 5 minutes
    time.sleep(300)
    check_jobs1_result(volume)

    job_backup2 = {
        "name": "backup2",
        "cron": "* * * * *",
        "task": "backup",
        "retain": 2
    }
    volume.recurringUpdate(jobs=[jobs[0], job_backup2])

    # 5 minutes
    time.sleep(300)

    snapshots = volume.snapshotList()
    count = 0
    for snapshot in snapshots:
        if snapshot["removed"] is False:
            count += 1
    # 2 from job_snap, 1 from job_backup, 2 from job_backup2, 1 volume-head
    assert count == 6

    volume = volume.detach()

    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)

    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #26
0
def test_attach_without_frontend(clients, volume_name):  # NOQA
    for host_id, client in clients.iteritems():
        break

    volume = create_and_check_volume(client, volume_name)

    lht_hostId = get_self_host_id()
    volume.attach(hostId=lht_hostId, disableFrontend=False)
    common.wait_for_volume_healthy(client, volume_name)

    volume = client.by_id_volume(volume_name)
    assert volume["disableFrontend"] is False
    assert volume["frontend"] == "blockdev"

    snap1_data = write_volume_random_data(volume)
    snap1 = volume.snapshotCreate()

    write_volume_random_data(volume)
    volume.snapshotCreate()

    volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=lht_hostId, disableFrontend=True)
    common.wait_for_volume_healthy(client, volume_name)

    volume = client.by_id_volume(volume_name)
    engine = get_volume_engine(volume)
    assert volume["disableFrontend"] is True
    assert volume["frontend"] == "blockdev"
    assert engine["endpoint"] == ""

    volume.snapshotRevert(name=snap1["name"])

    volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=lht_hostId, disableFrontend=False)
    common.wait_for_volume_healthy(client, volume_name)

    volume = client.by_id_volume(volume_name)
    assert volume["disableFrontend"] is False
    assert volume["frontend"] == "blockdev"

    check_volume_data(volume, snap1_data)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)
Beispiel #27
0
def ha_salvage_test(client, volume_name, base_image=""):  # NOQA
    volume = client.create_volume(name=volume_name, size=SIZE,
                                  numberOfReplicas=2, baseImage=base_image)
    volume = common.wait_for_volume_detached(client, volume_name)
    assert volume["name"] == volume_name
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    assert volume["created"] != ""
    assert volume["baseImage"] == base_image

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    assert len(volume["replicas"]) == 2
    replica0_name = volume["replicas"][0]["name"]
    replica1_name = volume["replicas"][1]["name"]

    data = write_volume_random_data(volume)

    common.k8s_delete_replica_pods_for_volume(volume_name)

    volume = common.wait_for_volume_faulted(client, volume_name)
    assert len(volume["replicas"]) == 2
    assert volume["replicas"][0]["failedAt"] != ""
    assert volume["replicas"][1]["failedAt"] != ""

    volume.salvage(names=[replica0_name, replica1_name])

    volume = common.wait_for_volume_detached(client, volume_name)
    assert len(volume["replicas"]) == 2
    assert volume["replicas"][0]["failedAt"] == ""
    assert volume["replicas"][1]["failedAt"] == ""

    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    check_volume_data(volume, data)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
def test_backup(clients):  # NOQA
    for host_id, client in clients.iteritems():
        break

    volume = client.create_volume(name=VOLUME_NAME,
                                  size=SIZE,
                                  numberOfReplicas=2)
    volume = wait_for_volume_state(client, VOLUME_NAME, "detached")
    assert volume["name"] == VOLUME_NAME
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"

    volume = volume.attach(hostId=host_id)
    volume = wait_for_volume_state(client, VOLUME_NAME, "healthy")

    backup_test(client, host_id)
    volume = volume.detach()
    volume = wait_for_volume_state(client, VOLUME_NAME, "detached")

    client.delete(volume)
    volume = wait_for_volume_delete(client, VOLUME_NAME)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #29
0
def test_snapshot(clients, volume_name):  # NOQA
    for host_id, client in clients.iteritems():
        break

    volume = client.create_volume(name=volume_name, size=SIZE,
                                  numberOfReplicas=2)

    volume = common.wait_for_volume_detached(client, volume_name)
    assert volume["name"] == volume_name
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"

    lht_hostId = get_self_host_id()
    volume = volume.attach(hostId=lht_hostId)
    volume = common.wait_for_volume_healthy(client, volume_name)

    snapshot_test(client, volume_name)
    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    volume = wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #30
0
def test_ha_salvage(clients):  # NOQA
    # get a random client
    for host_id, client in clients.iteritems():
        break

    volume = client.create_volume(name=VOLUME_NAME,
                                  size=SIZE,
                                  numberOfReplicas=2)
    volume = wait_for_volume_state(client, VOLUME_NAME, "detached")
    assert volume["name"] == VOLUME_NAME
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    assert volume["created"] != ""

    volume = volume.attach(hostId=host_id)
    volume = wait_for_volume_state(client, VOLUME_NAME, "healthy")

    assert len(volume["replicas"]) == 2
    replica0_name = volume["replicas"][0]["name"]
    replica1_name = volume["replicas"][1]["name"]
    common.docker_stop(replica0_name, replica1_name)

    volume = wait_for_volume_state(client, VOLUME_NAME, "fault")
    assert len(volume["replicas"]) == 2
    assert volume["replicas"][0]["badTimestamp"] != ""
    assert volume["replicas"][1]["badTimestamp"] != ""

    volume.salvage(names=[replica0_name, replica1_name])

    volume = wait_for_volume_state(client, VOLUME_NAME, "detached")
    assert len(volume["replicas"]) == 2
    assert volume["replicas"][0]["badTimestamp"] == ""
    assert volume["replicas"][1]["badTimestamp"] == ""

    volume = volume.attach(hostId=host_id)
    volume = wait_for_volume_state(client, VOLUME_NAME, "healthy")

    volume = volume.detach()
    volume = wait_for_volume_state(client, VOLUME_NAME, "detached")

    client.delete(volume)
    wait_for_volume_delete(client, VOLUME_NAME)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #31
0
def test_volume_scheduling_failure(clients, volume_name):  # NOQA
    '''
    Test fail to schedule by disable scheduling for all the nodes
    Also test cannot attach a scheduling failed volume
    '''
    client = get_random_client(clients)
    nodes = client.list_node()
    assert len(nodes) > 0

    for node in nodes:
        node = client.update(node, allowScheduling=False)
        node = common.wait_for_node_update(client, node["id"],
                                           "allowScheduling", False)

    volume = client.create_volume(name=volume_name, size=SIZE,
                                  numberOfReplicas=3)

    volume = common.wait_for_volume_condition_scheduled(client, volume_name,
                                                        "status",
                                                        CONDITION_STATUS_FALSE)
    volume = common.wait_for_volume_detached(client, volume_name)
    self_node = get_self_host_id()
    with pytest.raises(Exception) as e:
        volume.attach(hostId=self_node)
    assert "not scheduled" in str(e.value)

    for node in nodes:
        node = client.update(node, allowScheduling=True)
        node = common.wait_for_node_update(client, node["id"],
                                           "allowScheduling", True)

    volume = common.wait_for_volume_condition_scheduled(client, volume_name,
                                                        "status",
                                                        CONDITION_STATUS_TRUE)
    volume = common.wait_for_volume_detached(client, volume_name)
    volume = volume.attach(hostId=self_node)
    volume = common.wait_for_volume_healthy(client, volume_name)
    endpoint = get_volume_endpoint(volume)
    assert endpoint != ""
    volume_rw_test(endpoint)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)
def test_recurring_job(clients, volume_name):  # NOQA
    for host_id, client in clients.iteritems():  # NOQA
        break

    volume = client.create_volume(name=volume_name, size=SIZE,
                                  numberOfReplicas=2)
    volume = common.wait_for_volume_detached(client, volume_name)

    jobs = create_jobs1()
    volume.recurringUpdate(jobs=jobs)

    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    # 5 minutes
    time.sleep(300)
    check_jobs1_result(volume)

    job_backup2 = {"name": "backup2", "cron": "* * * * *",
                   "task": "backup", "retain": 2}
    volume.recurringUpdate(jobs=[jobs[0], job_backup2])

    # 5 minutes
    time.sleep(300)

    snapshots = volume.snapshotList()
    count = 0
    for snapshot in snapshots:
        if snapshot["removed"] is False:
            count += 1
    # 2 from job_snap, 1 from job_backup, 2 from job_backup2, 1 volume-head
    assert count == 6

    volume = volume.detach()

    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)

    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #33
0
def test_recurring_snapshot(clients):  # NOQA
    for host_id, client in clients.iteritems():
        break

    volume = client.create_volume(name=VOLUME_NAME, size=SIZE,
                                  numberOfReplicas=2)
    volume = wait_for_volume_state(client, VOLUME_NAME, "detached")

    snap2s = {"name": "snap2s", "cron": "@every 2s",
              "task": "snapshot", "retain": 3}
    snap3s = {"name": "snap3s", "cron": "@every 3s",
              "task": "snapshot", "retain": 2}
    volume.recurringUpdate(jobs=[snap2s, snap3s])

    time.sleep(0.1)
    volume = volume.attach(hostId=host_id)
    volume = wait_for_volume_state(client, VOLUME_NAME, "healthy")

    time.sleep(10)

    snapshots = volume.snapshotList()
    assert len(snapshots) == 5

    snap4s = {"name": "snap4s", "cron": "@every 4s",
              "task": "snapshot", "retain": 2}
    volume.recurringUpdate(jobs=[snap2s, snap4s])

    time.sleep(10)

    snapshots = volume.snapshotList()
    assert len(snapshots) == 7

    volume = volume.detach()

    wait_for_volume_state(client, VOLUME_NAME, "detached")

    client.delete(volume)

    wait_for_volume_delete(client, VOLUME_NAME)

    volumes = client.list_volume()
    assert len(volumes) == 0
def test_recurring_job_in_volume_creation(clients, volume_name):  # NOQA
    """
    Test create volume with recurring jobs

    1. Create volume with recurring jobs though Longhorn API
    2. Verify the recurring jobs run correctly
    """
    for host_id, client in iter(clients.items()):  # NOQA
        break

    set_random_backupstore(client)

    # error when creating volume with duplicate jobs
    with pytest.raises(Exception) as e:
        client.create_volume(name=volume_name,
                             size=SIZE,
                             numberOfReplicas=2,
                             recurringJobs=create_jobs1() + create_jobs1())
    assert "duplicate job" in str(e.value)

    client.create_volume(name=volume_name,
                         size=SIZE,
                         numberOfReplicas=2,
                         recurringJobs=create_jobs1())
    volume = common.wait_for_volume_detached(client, volume_name)

    volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    # 5 minutes
    time.sleep(300)
    check_jobs1_result(volume)

    volume = volume.detach()
    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
def test_delete_provisioned_pvc(client, core_api, storage_class, pvc):  # NOQA
    """
    Test that deleting the Persistent Volume Claim for a dynamically
    provisioned Volume properly deletes the Volume and the associated
    Kubernetes resources.

    1. Create a Storage Class to test with.
    2. Create a Persistent Volume Claim that requests a Volume from that
    Storage Class.
    3. Wait for the Volume to be provisioned and for the Kubernetes Status to
    be updated correctly.
    4. Attempt to delete the Persistent Volume Claim.
    5. Verify that the associated Volume and its resources have been deleted.
    """
    pv = provision_and_wait_pv(client, core_api, storage_class, pvc)
    pv_name = pv.metadata.name
    volume_name = pv.spec.csi.volume_handle  # NOQA

    delete_and_wait_pvc(core_api, pvc['metadata']['name'])
    wait_delete_pv(core_api, pv_name)
    wait_for_volume_delete(client, volume_name)
def migration_rollback_test(clients, volume_name, backing_image=""):  # NOQA
    client, volume, data = setup_migration_test(clients, volume_name,
                                                backing_image)
    host1, host2 = get_hosts_for_migration_test(clients)

    volume.attach(hostId=host1)
    volume = common.wait_for_volume_healthy(client, volume_name)

    volume.attach(hostId=host2)
    volume = common.wait_for_volume_migration_ready(client, volume_name)

    volume.detach(hostId=host2)
    volume = common.wait_for_volume_migration_node(client, volume_name, host1)

    volume.detach(hostId="")
    volume = common.wait_for_volume_detached(client, volume_name)

    # verify test data
    check_volume_data(client, volume_name, data)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)
Beispiel #37
0
def backupstore_test(client, host_id, volname, size):
    volume = client.by_id_volume(volname)
    volume.snapshotCreate()
    data = write_volume_random_data(volume)
    snap2 = volume.snapshotCreate()
    volume.snapshotCreate()

    volume.snapshotBackup(name=snap2["name"])

    bv, b = common.find_backup(client, volname, snap2["name"])

    new_b = bv.backupGet(name=b["name"])
    assert new_b["name"] == b["name"]
    assert new_b["url"] == b["url"]
    assert new_b["snapshotName"] == b["snapshotName"]
    assert new_b["snapshotCreated"] == b["snapshotCreated"]
    assert new_b["created"] == b["created"]
    assert new_b["volumeName"] == b["volumeName"]
    assert new_b["volumeSize"] == b["volumeSize"]
    assert new_b["volumeCreated"] == b["volumeCreated"]

    # test restore
    restoreName = generate_volume_name()
    volume = client.create_volume(name=restoreName,
                                  size=size,
                                  numberOfReplicas=2,
                                  fromBackup=b["url"])
    volume = common.wait_for_volume_detached(client, restoreName)
    assert volume["name"] == restoreName
    assert volume["size"] == size
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, restoreName)
    check_volume_data(volume, data)
    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, restoreName)
    client.delete(volume)

    volume = wait_for_volume_delete(client, restoreName)

    bv.backupDelete(name=b["name"])

    backups = bv.backupList()
    found = False
    for b in backups:
        if b["snapshotName"] == snap2["name"]:
            found = True
            break
    assert not found
Beispiel #38
0
def ha_backup_deletion_recovery_test(client, volume_name, size, base_image=""):  # NOQA
    volume = client.create_volume(name=volume_name, size=size,
                                  numberOfReplicas=2, baseImage=base_image)
    volume = common.wait_for_volume_detached(client, volume_name)

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)
    # test backupTarget for multiple settings
    backupstores = common.get_backupstore_url()
    for backupstore in backupstores:
        if common.is_backupTarget_s3(backupstore):
            backupsettings = backupstore.split("$")
            setting = client.update(setting, value=backupsettings[0])
            assert setting["value"] == backupsettings[0]

            credential = client.by_id_setting(
                    common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)
            credential = client.update(credential, value=backupsettings[1])
            assert credential["value"] == backupsettings[1]
        else:
            setting = client.update(setting, value=backupstore)
            assert setting["value"] == backupstore
            credential = client.by_id_setting(
                    common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)
            credential = client.update(credential, value="")
            assert credential["value"] == ""

        data = write_volume_random_data(volume)
        snap2 = volume.snapshotCreate()
        volume.snapshotCreate()

        volume.snapshotBackup(name=snap2["name"])

        _, b = common.find_backup(client, volume_name, snap2["name"])

        res_name = common.generate_volume_name()
        res_volume = client.create_volume(name=res_name, size=size,
                                          numberOfReplicas=2,
                                          fromBackup=b["url"])
        res_volume = common.wait_for_volume_detached(client, res_name)
        res_volume = res_volume.attach(hostId=host_id)
        res_volume = common.wait_for_volume_healthy(client, res_name)
        check_volume_data(res_volume, data)

        snapshots = res_volume.snapshotList()
        # only the backup snapshot + volume-head
        assert len(snapshots) == 2
        backup_snapshot = ""
        for snap in snapshots:
            if snap["name"] != "volume-head":
                backup_snapshot = snap["name"]
        assert backup_snapshot != ""

        res_volume.snapshotCreate()
        snapshots = res_volume.snapshotList()
        assert len(snapshots) == 3

        res_volume.snapshotDelete(name=backup_snapshot)
        res_volume.snapshotPurge()
        snapshots = res_volume.snapshotList()
        assert len(snapshots) == 2

        ha_rebuild_replica_test(client, res_name)

        res_volume = res_volume.detach()
        res_volume = common.wait_for_volume_detached(client, res_name)

        client.delete(res_volume)
        common.wait_for_volume_delete(client, res_name)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #39
0
def cleanup_volume(client, vol_name):  # NOQA
    volume = client.by_id_volume(vol_name)
    volume.detach()
    client.delete(volume)
    common.wait_for_volume_delete(client, vol_name)