Beispiel #1
0
def volume_iscsi_basic_test(clients, volume_name, base_image=""):  # NOQA
    # get a random client
    for host_id, client in clients.iteritems():
        break

    volume = create_and_check_volume(client, volume_name, 3, SIZE, base_image,
                                     "iscsi")
    volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == volume["name"]
    assert volumes[0]["size"] == volume["size"]
    assert volumes[0]["numberOfReplicas"] == volume["numberOfReplicas"]
    assert volumes[0]["state"] == volume["state"]
    assert volumes[0]["created"] == volume["created"]
    assert volumes[0]["frontend"] == "iscsi"
    endpoint = get_volume_endpoint(volumes[0])
    assert endpoint.startswith("iscsi://")

    try:
        dev = iscsi_login(endpoint)
        volume_rw_test(dev)
    finally:
        iscsi_logout(endpoint)

    cleanup_volume(client, volume)
Beispiel #2
0
def create_host_disk(client, vol_name, size, node_id):  # NOQA
    # create a single replica volume and attach it to node
    volume = create_volume(client, vol_name, size, node_id, 1)

    # prepare the disk in the host filesystem
    disk_path = common.prepare_host_disk(get_volume_endpoint(volume),
                                         volume["name"])
    return disk_path
Beispiel #3
0
def create_host_disk(client, vol_name, size, node_id):  # NOQA
    # create a single replica volume and attach it to node
    volume = create_volume(client, vol_name, size, node_id, 1)

    # prepare the disk in the host filesystem
    disk_path = common.prepare_host_disk(get_volume_endpoint(volume),
                                         volume["name"])
    return disk_path
Beispiel #4
0
def volume_iscsi_basic_test(clients, volume_name, base_image=""):  # NOQA
    # get a random client
    for host_id, client in clients.iteritems():
        break

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=3,
                                  frontend="iscsi",
                                  baseImage=base_image)
    assert volume["name"] == volume_name
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == 3
    assert volume["frontend"] == "iscsi"
    assert volume["baseImage"] == base_image

    volume = common.wait_for_volume_detached(client, volume_name)
    assert len(volume["replicas"]) == 3

    assert volume["state"] == "detached"
    assert volume["created"] != ""

    volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == volume["name"]
    assert volumes[0]["size"] == volume["size"]
    assert volumes[0]["numberOfReplicas"] == volume["numberOfReplicas"]
    assert volumes[0]["state"] == volume["state"]
    assert volumes[0]["created"] == volume["created"]
    assert volumes[0]["frontend"] == "iscsi"
    endpoint = get_volume_endpoint(volumes[0])
    assert endpoint.startswith("iscsi://")

    try:
        dev = iscsi_login(endpoint)
        volume_rw_test(dev)
    finally:
        iscsi_logout(endpoint)

    volume = volume.detach()

    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)

    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #5
0
def test_volume_scheduling_failure(clients, volume_name):  # NOQA
    '''
    Test fail to schedule by disable scheduling for all the nodes
    Also test cannot attach a scheduling failed volume
    '''
    client = get_random_client(clients)
    nodes = client.list_node()
    assert len(nodes) > 0

    for node in nodes:
        node = client.update(node, allowScheduling=False)
        node = common.wait_for_node_update(client, node["id"],
                                           "allowScheduling", False)

    volume = client.create_volume(name=volume_name, size=SIZE,
                                  numberOfReplicas=3)

    volume = common.wait_for_volume_condition_scheduled(client, volume_name,
                                                        "status",
                                                        CONDITION_STATUS_FALSE)
    volume = common.wait_for_volume_detached(client, volume_name)
    self_node = get_self_host_id()
    with pytest.raises(Exception) as e:
        volume.attach(hostId=self_node)
    assert "not scheduled" in str(e.value)

    for node in nodes:
        node = client.update(node, allowScheduling=True)
        node = common.wait_for_node_update(client, node["id"],
                                           "allowScheduling", True)

    volume = common.wait_for_volume_condition_scheduled(client, volume_name,
                                                        "status",
                                                        CONDITION_STATUS_TRUE)
    volume = common.wait_for_volume_detached(client, volume_name)
    volume = volume.attach(hostId=self_node)
    volume = common.wait_for_volume_healthy(client, volume_name)
    endpoint = get_volume_endpoint(volume)
    assert endpoint != ""
    volume_rw_test(endpoint)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)
Beispiel #6
0
def ha_rebuild_replica_test(client, volname):  # NOQA
    volume = client.by_id_volume(volname)
    assert get_volume_endpoint(volume) == DEV_PATH + volname

    assert len(volume["replicas"]) == 2
    replica0 = volume["replicas"][0]
    assert replica0["name"] != ""

    replica1 = volume["replicas"][1]
    assert replica1["name"] != ""

    data = write_volume_random_data(volume)

    volume = volume.replicaRemove(name=replica0["name"])

    # wait until we saw a replica starts rebuilding
    new_replica_found = False
    for i in range(RETRY_COUNTS):
        v = client.by_id_volume(volname)
        for r in v["replicas"]:
            if r["name"] != replica0["name"] and \
                    r["name"] != replica1["name"]:
                new_replica_found = True
                break
        if new_replica_found:
            break
        time.sleep(RETRY_INTERVAL)
    assert new_replica_found

    volume = common.wait_for_volume_healthy(client, volname)

    volume = client.by_id_volume(volname)
    assert volume["state"] == common.VOLUME_STATE_ATTACHED
    assert volume["robustness"] == common.VOLUME_ROBUSTNESS_HEALTHY
    assert len(volume["replicas"]) >= 2

    found = False
    for replica in volume["replicas"]:
        if replica["name"] == replica1["name"]:
            found = True
            break
    assert found

    check_volume_data(volume, data)
Beispiel #7
0
def ha_rebuild_replica_test(client, volname):   # NOQA
    volume = client.by_id_volume(volname)
    assert get_volume_endpoint(volume) == DEV_PATH + volname

    assert len(volume["replicas"]) == 2
    replica0 = volume["replicas"][0]
    assert replica0["name"] != ""

    replica1 = volume["replicas"][1]
    assert replica1["name"] != ""

    data = write_volume_random_data(volume)

    volume = volume.replicaRemove(name=replica0["name"])

    # wait until we saw a replica starts rebuilding
    new_replica_found = False
    for i in range(RETRY_COUNTS):
        v = client.by_id_volume(volname)
        for r in v["replicas"]:
            if r["name"] != replica0["name"] and \
                    r["name"] != replica1["name"]:
                new_replica_found = True
                break
        if new_replica_found:
            break
        time.sleep(RETRY_INTERVAL)
    assert new_replica_found

    volume = common.wait_for_volume_healthy(client, volname)

    volume = client.by_id_volume(volname)
    assert volume["state"] == common.VOLUME_STATE_ATTACHED
    assert volume["robustness"] == common.VOLUME_ROBUSTNESS_HEALTHY
    assert len(volume["replicas"]) >= 2

    found = False
    for replica in volume["replicas"]:
        if replica["name"] == replica1["name"]:
            found = True
            break
    assert found

    check_volume_data(volume, data)
Beispiel #8
0
def test_xfs_pv_existing_volume(client, core_api, pod_manifest): # NOQA
    """
    Test create PV with existing XFS filesystem

    1. Create a volume
    2. Create PV/PVC for the existing volume, specify `xfs` as filesystem
    3. Attach the volume to the current node.
    4. Format it to `xfs`
    5. Create a POD using the volume

    FIXME: We should write data in step 4 and validate the data in step 5, make
    sure the disk won't be reformatted
    """
    volume_name = generate_volume_name()

    volume = create_and_check_volume(client, volume_name)

    create_pv_for_volume(client, core_api, volume, volume_name, "xfs")

    create_pvc_for_volume(client, core_api, volume, volume_name)

    host_id = get_self_host_id()

    volume = volume.attach(hostId=host_id)

    volume = wait_for_volume_healthy(client, volume_name)

    cmd = ['mkfs.xfs', get_volume_endpoint(volume)]
    subprocess.check_call(cmd)

    volume = volume.detach()

    volume = wait_for_volume_detached(client, volume_name)

    pod_manifest['spec']['volumes'] = [{
        "name": "pod-data",
        "persistentVolumeClaim": {
            "claimName": volume_name
        }
    }]

    create_and_wait_pod(core_api, pod_manifest)
Beispiel #9
0
def volume_basic_test(clients, volume_name, base_image=""):  # NOQA
    num_hosts = len(clients)
    num_replicas = 3

    # get a random client
    for host_id, client in clients.iteritems():
        break

    with pytest.raises(Exception):
        volume = client.create_volume(name="wrong_volume-name-1.0",
                                      size=SIZE,
                                      numberOfReplicas=2)
        volume = client.create_volume(name="wrong_volume-name",
                                      size=SIZE,
                                      numberOfReplicas=2)
        volume = client.create_volume(name="wrong_volume-name",
                                      size=SIZE,
                                      numberOfReplicas=2,
                                      frontend="invalid_frontend")

    volume = client.create_volume(name=volume_name,
                                  size=SIZE,
                                  numberOfReplicas=num_replicas,
                                  baseImage=base_image)

    assert volume["name"] == volume_name
    assert volume["size"] == SIZE
    assert volume["numberOfReplicas"] == num_replicas
    assert volume["frontend"] == "blockdev"
    assert volume["baseImage"] == base_image

    volume = common.wait_for_volume_detached(client, volume_name)
    assert len(volume["replicas"]) == num_replicas

    assert volume["state"] == "detached"
    assert volume["created"] != ""

    def validate_volume_basic(expected, actual):
        assert actual["name"] == expected["name"]
        assert actual["size"] == expected["size"]
        assert actual["numberOfReplicas"] == expected["numberOfReplicas"]
        assert actual["frontend"] == "blockdev"
        assert actual["baseImage"] == base_image
        assert actual["state"] == expected["state"]
        assert actual["created"] == expected["created"]

    volumes = client.list_volume()
    assert len(volumes) == 1
    validate_volume_basic(volume, volumes[0])

    volumeByName = client.by_id_volume(volume_name)
    validate_volume_basic(volume, volumeByName)

    lht_hostId = get_self_host_id()
    volume.attach(hostId=lht_hostId)
    volume = common.wait_for_volume_healthy(client, volume_name)

    volumeByName = client.by_id_volume(volume_name)
    validate_volume_basic(volume, volumeByName)
    assert get_volume_endpoint(volumeByName) == DEV_PATH + volume_name

    # validate soft anti-affinity
    hosts = {}
    for replica in volume["replicas"]:
        id = replica["hostId"]
        assert id != ""
        hosts[id] = True
    if num_hosts >= num_replicas:
        assert len(hosts) == num_replicas
    else:
        assert len(hosts) == num_hosts

    volumes = client.list_volume()
    assert len(volumes) == 1
    assert volumes[0]["name"] == volume["name"]
    assert volumes[0]["size"] == volume["size"]
    assert volumes[0]["numberOfReplicas"] == volume["numberOfReplicas"]
    assert volumes[0]["state"] == volume["state"]
    assert volumes[0]["created"] == volume["created"]
    assert get_volume_endpoint(volumes[0]) == DEV_PATH + volume_name

    volume = client.by_id_volume(volume_name)
    assert get_volume_endpoint(volume) == DEV_PATH + volume_name

    volume_rw_test(get_volume_endpoint(volume))

    volume = volume.detach()

    common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Beispiel #10
0
def test_node_delete_umount_disks(client):  # NOQA
    # create test disks for node
    disk_volume_name = 'vol-disk-1'
    lht_hostId = get_self_host_id()
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    disk_path1 = create_host_disk(client, disk_volume_name, str(Gi),
                                  lht_hostId)
    disk1 = {
        "path": disk_path1,
        "allowScheduling": True,
        "storageReserved": SMALL_DISK_SIZE
    }

    update_disk = get_update_disks(disks)
    for disk in update_disk:
        disk["allowScheduling"] = False
    # add new disk for node
    update_disk.append(disk1)
    # save disks to node
    node = node.diskUpdate(disks=update_disk)
    node = common.wait_for_disk_update(client, lht_hostId, len(update_disk))
    assert len(node["disks"]) == len(update_disk)
    node = client.by_id_node(lht_hostId)
    assert len(node["disks"]) == len(update_disk)

    disks = node["disks"]
    # wait for node controller to update disk status
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling",
                                 True)
            wait_for_disk_status(client, lht_hostId, fsid, "storageReserved",
                                 SMALL_DISK_SIZE)
            free, total = common.get_host_disk_size(disk_path1)
            wait_for_disk_status(client, lht_hostId, fsid, "storageAvailable",
                                 free)
            wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum",
                                 total)

    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for key, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            assert disk["allowScheduling"]
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == 0
            free, total = common.get_host_disk_size(disk_path1)
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
        else:
            assert not disk["allowScheduling"]

    # create a volume
    nodes = client.list_node()
    vol_name = common.generate_volume_name()
    volume = create_volume(client, vol_name, str(SMALL_DISK_SIZE), lht_hostId,
                           len(nodes))
    replicas = volume["replicas"]
    for replica in replicas:
        id = replica["hostId"]
        assert id != ""
        assert replica["running"]
        if id == lht_hostId:
            assert replica["dataPath"].startswith(disk_path1)

    # umount the disk
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    common.umount_disk(mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling",
                                 False)
            wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0)
            wait_for_disk_conditions(client, lht_hostId, fsid,
                                     DISK_CONDITION_READY,
                                     CONDITION_STATUS_FALSE)

    # check result
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    update_disks = []
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            assert not disk["allowScheduling"]
            assert disk["storageMaximum"] == 0
            assert disk["storageAvailable"] == 0
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == SMALL_DISK_SIZE
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_FALSE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_FALSE
        else:
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
            update_disks.append(disk)

    # delete umount disk exception
    with pytest.raises(Exception) as e:
        node.diskUpdate(disks=update_disks)
    assert "disable the disk" in str(e.value)

    # update other disks
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            disk["allowScheduling"] = True
    test_update = get_update_disks(disks)
    node = node.diskUpdate(disks=test_update)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling",
                                 True)
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            assert disk["allowScheduling"]

    # mount the disk back
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    disk_volume = client.by_id_volume(disk_volume_name)
    dev = get_volume_endpoint(disk_volume)
    common.mount_disk(dev, mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling",
                                 False)
            wait_for_disk_conditions(client, lht_hostId, fsid,
                                     DISK_CONDITION_READY,
                                     CONDITION_STATUS_TRUE)

    # check result
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            free, total = common.get_host_disk_size(disk_path1)
            assert not disk["allowScheduling"]
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == SMALL_DISK_SIZE
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
        else:
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE

    # delete volume and umount disk
    cleanup_volume(client, vol_name)
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    common.umount_disk(mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling",
                                 False)
            wait_for_disk_status(client, lht_hostId, fsid, "storageScheduled",
                                 0)
            wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0)

    # test delete the umount disk
    node = client.by_id_node(lht_hostId)
    node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId, len(update_disks))
    assert len(node["disks"]) == len(update_disks)
    cmd = ['rm', '-r', mount_path]
    subprocess.check_call(cmd)
Beispiel #11
0
def test_node_delete_umount_disks(client):  # NOQA
    # create test disks for node
    disk_volume_name = 'vol-disk-1'
    lht_hostId = get_self_host_id()
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    disk_path1 = create_host_disk(client, disk_volume_name,
                                  str(Gi), lht_hostId)
    disk1 = {"path": disk_path1, "allowScheduling": True,
             "storageReserved": SMALL_DISK_SIZE}

    update_disk = get_update_disks(disks)
    for disk in update_disk:
        disk["allowScheduling"] = False
    # add new disk for node
    update_disk.append(disk1)
    # save disks to node
    node = node.diskUpdate(disks=update_disk)
    node = common.wait_for_disk_update(client, lht_hostId,
                                       len(update_disk))
    assert len(node["disks"]) == len(update_disk)
    node = client.by_id_node(lht_hostId)
    assert len(node["disks"]) == len(update_disk)

    disks = node["disks"]
    # wait for node controller to update disk status
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid,
                                 "allowScheduling", True)
            wait_for_disk_status(client, lht_hostId, fsid,
                                 "storageReserved", SMALL_DISK_SIZE)
            free, total = common.get_host_disk_size(disk_path1)
            wait_for_disk_status(client, lht_hostId, fsid,
                                 "storageAvailable", free)
            wait_for_disk_status(client, lht_hostId, fsid,
                                 "storageMaximum", total)

    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for key, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            assert disk["allowScheduling"]
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == 0
            free, total = common.get_host_disk_size(disk_path1)
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
        else:
            assert not disk["allowScheduling"]

    # create a volume
    nodes = client.list_node()
    vol_name = common.generate_volume_name()
    volume = create_volume(client, vol_name, str(SMALL_DISK_SIZE),
                           lht_hostId, len(nodes))
    replicas = volume["replicas"]
    for replica in replicas:
        id = replica["hostId"]
        assert id != ""
        assert replica["running"]
        if id == lht_hostId:
            assert replica["dataPath"].startswith(disk_path1)

    # umount the disk
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    common.umount_disk(mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "allowScheduling", False)
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "storageMaximum", 0)
            wait_for_disk_conditions(client, lht_hostId, fsid,
                                     DISK_CONDITION_READY,
                                     CONDITION_STATUS_FALSE)

    # check result
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    update_disks = []
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            assert not disk["allowScheduling"]
            assert disk["storageMaximum"] == 0
            assert disk["storageAvailable"] == 0
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == SMALL_DISK_SIZE
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_FALSE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_FALSE
        else:
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
            update_disks.append(disk)

    # delete umount disk exception
    with pytest.raises(Exception) as e:
        node.diskUpdate(disks=update_disks)
    assert "disable the disk" in str(e.value)

    # update other disks
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            disk["allowScheduling"] = True
    test_update = get_update_disks(disks)
    node = node.diskUpdate(disks=test_update)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "allowScheduling", True)
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            assert disk["allowScheduling"]

    # mount the disk back
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    disk_volume = client.by_id_volume(disk_volume_name)
    dev = get_volume_endpoint(disk_volume)
    common.mount_disk(dev, mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "allowScheduling", False)
            wait_for_disk_conditions(client, lht_hostId, fsid,
                                     DISK_CONDITION_READY,
                                     CONDITION_STATUS_TRUE)

    # check result
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            free, total = common.get_host_disk_size(disk_path1)
            assert not disk["allowScheduling"]
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == SMALL_DISK_SIZE
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
        else:
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE

    # delete volume and umount disk
    cleanup_volume(client, vol_name)
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    common.umount_disk(mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "allowScheduling", False)
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "storageScheduled", 0)
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "storageMaximum", 0)

    # test delete the umount disk
    node = client.by_id_node(lht_hostId)
    node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId,
                                       len(update_disks))
    assert len(node["disks"]) == len(update_disks)
    cmd = ['rm', '-r', mount_path]
    subprocess.check_call(cmd)
Beispiel #12
0
def restore_and_check_random_backup(client, core_api, volume_name, pod_name,
                                    snapshots_md5sum):  # NOQA
    res_volume_name = volume_name + '-restore'

    host_id = get_self_host_id()

    snap_data = get_random_backup_snapshot_data(snapshots_md5sum)

    if snap_data is None:
        print("skipped, no recorded backup found", end=" ")
        return

    backup_url = snap_data.backup_url

    client.create_volume(name=res_volume_name,
                         size=VOLUME_SIZE,
                         fromBackup=backup_url)

    wait_for_volume_restoration_completed(client, res_volume_name)

    wait_for_volume_detached(client, res_volume_name)

    res_volume = client.by_id_volume(res_volume_name)

    res_volume.attach(hostId=host_id)

    res_volume = wait_for_volume_healthy(client, res_volume_name)

    dev = get_volume_endpoint(res_volume)

    mount_path = os.path.join(DIRECTORY_PATH, res_volume_name)

    command = ['mkdir', '-p', mount_path]
    subprocess.check_call(command)

    mount_disk(dev, mount_path)

    datafile_name = get_data_filename(pod_name)
    datafile_path = os.path.join(mount_path, datafile_name)

    command = ['md5sum', datafile_path]
    output = subprocess.check_output(command)

    bkp_data_md5sum = output.split()[0].decode('utf-8')

    bkp_checksum_ok = False
    if snap_data.data_md5sum == bkp_data_md5sum:
        bkp_checksum_ok = True

    umount_disk(mount_path)

    command = ['rmdir', mount_path]
    subprocess.check_call(command)

    res_volume = client.by_id_volume(res_volume_name)

    res_volume.detach()

    wait_for_volume_detached(client, res_volume_name)

    delete_and_wait_longhorn(client, res_volume_name)

    assert bkp_checksum_ok
Beispiel #13
0
def backing_image_content_test(client, volume_name_prefix, bi_name, bi_url):  # NOQA
    """
    Verify the content of the Backing Image is accessible and read-only for
    all volumes.

    1. Create a backing image. (Done by the caller)
    2. Create a Volume with the backing image set then attach it to host node.
    3. Verify that the all disk states in the backing image are "downloaded".
    4. Verify volume can be directly mounted and there is already data in the
       filesystem due to the backing image.
    5. Verify the volume r/w.
    6. Launch one more volume with the same backing image.
    7. Verify the data content of the new volume is the same as the data in
       step 4.
    5. Do cleanup. (Done by the caller)
    """
    lht_host_id = get_self_host_id()

    volume_name1 = volume_name_prefix + "-1"
    volume1 = create_and_check_volume(
        client, volume_name1, 3,
        str(BACKING_IMAGE_EXT4_SIZE), bi_name)
    volume1.attach(hostId=lht_host_id)
    volume1 = wait_for_volume_healthy(client, volume_name1)
    assert volume1.backingImage == bi_name
    assert volume1.size == str(BACKING_IMAGE_EXT4_SIZE)

    backing_image = client.by_id_backing_image(bi_name)
    assert backing_image.imageURL == bi_url
    assert not backing_image.deletionTimestamp
    assert len(backing_image.diskStateMap) == 3
    for disk_id, state in iter(backing_image.diskStateMap.items()):
        assert state == "downloaded"

    # Since there is already a filesystem with data in the backing image,
    # we can directly mount and access the volume without `mkfs`.
    dev1 = get_volume_endpoint(volume1)
    mount_path1 = os.path.join(DIRECTORY_PATH, volume_name1)
    mount_disk(dev1, mount_path1)
    output1 = subprocess.check_output(["ls", mount_path1])
    # The following random write may crash the filesystem of volume1,
    # need to umount it here
    cleanup_host_disk(volume_name1)

    # Verify r/w for the volume with a backing image
    data = write_volume_random_data(volume1)
    check_volume_data(volume1, data)

    volume_name2 = volume_name_prefix + "-2"
    volume2 = create_and_check_volume(
        client, volume_name2, 3,
        str(BACKING_IMAGE_EXT4_SIZE), bi_name)
    volume2.attach(hostId=lht_host_id)
    volume2 = wait_for_volume_healthy(client, volume_name2)
    assert volume1.backingImage == bi_name
    assert volume1.size == str(BACKING_IMAGE_EXT4_SIZE)
    dev2 = get_volume_endpoint(volume2)
    mount_path2 = os.path.join(DIRECTORY_PATH, volume_name2)
    mount_disk(dev2, mount_path2)
    output2 = subprocess.check_output(["ls", mount_path2])
    # The output is the content of the backing image, which should keep
    # unchanged
    assert output2 == output1

    cleanup_host_disk(volume_name2)
Beispiel #14
0
def ha_simple_recovery_test(client, volume_name, size, base_image=""):  # NOQA
    volume = client.create_volume(name=volume_name,
                                  size=size,
                                  numberOfReplicas=2,
                                  baseImage=base_image)
    volume = common.wait_for_volume_detached(client, volume_name)
    assert volume["name"] == volume_name
    assert volume["size"] == size
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    assert volume["created"] != ""
    assert volume["baseImage"] == base_image

    host_id = get_self_host_id()
    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    volume = client.by_id_volume(volume_name)
    assert get_volume_endpoint(volume) == DEV_PATH + volume_name

    assert len(volume["replicas"]) == 2
    replica0 = volume["replicas"][0]
    assert replica0["name"] != ""

    replica1 = volume["replicas"][1]
    assert replica1["name"] != ""

    data = write_volume_random_data(volume)

    volume = volume.replicaRemove(name=replica0["name"])

    # wait until we saw a replica starts rebuilding
    new_replica_found = False
    for i in range(RETRY_COUNTS):
        v = client.by_id_volume(volume_name)
        for r in v["replicas"]:
            if r["name"] != replica0["name"] and \
                    r["name"] != replica1["name"]:
                new_replica_found = True
                break
        if new_replica_found:
            break
        time.sleep(RETRY_ITERVAL)
    assert new_replica_found

    volume = common.wait_for_volume_healthy(client, volume_name)

    volume = client.by_id_volume(volume_name)
    assert volume["state"] == common.VOLUME_STATE_ATTACHED
    assert volume["robustness"] == common.VOLUME_ROBUSTNESS_HEALTHY
    assert len(volume["replicas"]) >= 2

    found = False
    for replica in volume["replicas"]:
        if replica["name"] == replica1["name"]:
            found = True
            break
    assert found

    check_volume_data(volume, data)

    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, volume_name)

    client.delete(volume)
    common.wait_for_volume_delete(client, volume_name)

    volumes = client.list_volume()
    assert len(volumes) == 0