コード例 #1
0
def test_replica_scheduler_large_volume_fit_small_disk(client):  # NOQA
    nodes = client.list_node()
    # create a small size disk on current node
    lht_hostId = get_self_host_id()
    node = client.by_id_node(lht_hostId)
    small_disk_path = create_host_disk(client, "vol-small", SIZE, lht_hostId)
    small_disk = {"path": small_disk_path, "allowScheduling": True}
    update_disks = get_update_disks(node["disks"])
    update_disks.append(small_disk)
    node = node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId, len(update_disks))
    assert len(node["disks"]) == len(update_disks)

    unexpected_disk = {}
    for fsid, disk in node["disks"].iteritems():
        if disk["path"] == small_disk_path:
            unexpected_disk["fsid"] = fsid
            unexpected_disk["path"] = disk["path"]
            break

    # volume is too large to fill into small size disk on current node
    vol_name = common.generate_volume_name()
    volume = create_volume(client, vol_name, str(Gi), lht_hostId, len(nodes))

    nodes = client.list_node()
    node_hosts = []
    for node in nodes:
        node_hosts.append(node["name"])

    # check replica on current node shouldn't schedule to small disk
    for replica in volume["replicas"]:
        id = replica["hostId"]
        assert id != ""
        assert replica["running"]
        if id == lht_hostId:
            assert replica["diskID"] != unexpected_disk["fsid"]
            assert replica["dataPath"] != unexpected_disk["path"]
        node_hosts = filter(lambda x: x != id, node_hosts)
    assert len(node_hosts) == 0

    cleanup_volume(client, vol_name)

    # cleanup test disks
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    disk = disks[unexpected_disk["fsid"]]
    disk["allowScheduling"] = False
    update_disks = get_update_disks(disks)
    node = node.diskUpdate(disks=update_disks)
    node = wait_for_disk_status(client, lht_hostId, unexpected_disk["fsid"],
                                "allowScheduling", False)
    disks = node["disks"]
    disk = disks[unexpected_disk["fsid"]]
    assert not disk["allowScheduling"]
    disks.pop(unexpected_disk["fsid"])
    update_disks = get_update_disks(disks)
    node.diskUpdate(disks=update_disks)
    cleanup_host_disk(client, 'vol-small')
コード例 #2
0
def test_node_controller_sync_storage_available(client):  # NOQA
    lht_hostId = get_self_host_id()
    # create a disk to test storageAvailable
    node = client.by_id_node(lht_hostId)
    test_disk_path = create_host_disk(client, "vol-test", SIZE, lht_hostId)
    test_disk = {"path": test_disk_path, "allowScheduling": True}
    update_disks = get_update_disks(node["disks"])
    update_disks.append(test_disk)
    node = node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId, len(update_disks))
    assert len(node["disks"]) == len(update_disks)

    # write specified byte data into disk
    test_file_path = os.path.join(test_disk_path, TEST_FILE)
    if os.path.exists(test_file_path):
        os.remove(test_file_path)
    cmd = ['dd', 'if=/dev/zero', 'of=' + test_file_path, 'bs=1M', 'count=1']
    subprocess.check_call(cmd)
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    # wait for node controller update disk status
    expect_disk = {}
    free, total = common.get_host_disk_size(test_disk_path)
    for fsid, disk in disks.iteritems():
        if disk["path"] == test_disk_path:
            node = wait_for_disk_status(client, lht_hostId, fsid,
                                        "storageAvailable", free)
            expect_disk = node["disks"][fsid]
            break

    assert expect_disk["storageAvailable"] == free

    os.remove(test_file_path)
    # cleanup test disks
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    wait_fsid = ''
    for fsid, disk in disks.iteritems():
        if disk["path"] == test_disk_path:
            wait_fsid = fsid
            disk["allowScheduling"] = False

    update_disks = get_update_disks(disks)
    node = node.diskUpdate(disks=update_disks)
    node = wait_for_disk_status(client, lht_hostId, wait_fsid,
                                "allowScheduling", False)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == test_disk_path:
            disks.pop(fsid)
            break
    update_disks = get_update_disks(disks)
    node = node.diskUpdate(disks=update_disks)
    node = wait_for_disk_update(client, lht_hostId, len(update_disks))
    assert len(node["disks"]) == len(update_disks)
    cleanup_host_disk(client, 'vol-test')
コード例 #3
0
ファイル: test_node.py プロジェクト: rancher/longhorn-tests
def test_node_controller_sync_storage_available(client):  # NOQA
    lht_hostId = get_self_host_id()
    # create a disk to test storageAvailable
    node = client.by_id_node(lht_hostId)
    test_disk_path = create_host_disk(client, "vol-test", SIZE, lht_hostId)
    test_disk = {"path": test_disk_path, "allowScheduling": True}
    update_disks = get_update_disks(node["disks"])
    update_disks.append(test_disk)
    node = node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId, len(update_disks))
    assert len(node["disks"]) == len(update_disks)

    # write specified byte data into disk
    test_file_path = os.path.join(test_disk_path, TEST_FILE)
    if os.path.exists(test_file_path):
        os.remove(test_file_path)
    cmd = ['dd', 'if=/dev/zero', 'of=' + test_file_path, 'bs=1M', 'count=1']
    subprocess.check_call(cmd)
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    # wait for node controller update disk status
    expect_disk = {}
    free, total = common.get_host_disk_size(test_disk_path)
    for fsid, disk in disks.iteritems():
        if disk["path"] == test_disk_path:
            node = wait_for_disk_status(client, lht_hostId, fsid,
                                        "storageAvailable", free)
            expect_disk = node["disks"][fsid]
            break

    assert expect_disk["storageAvailable"] == free

    os.remove(test_file_path)
    # cleanup test disks
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    wait_fsid = ''
    for fsid, disk in disks.iteritems():
        if disk["path"] == test_disk_path:
            wait_fsid = fsid
            disk["allowScheduling"] = False

    update_disks = get_update_disks(disks)
    node = node.diskUpdate(disks=update_disks)
    node = wait_for_disk_status(client, lht_hostId, wait_fsid,
                                "allowScheduling", False)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == test_disk_path:
            disks.pop(fsid)
            break
    update_disks = get_update_disks(disks)
    node = node.diskUpdate(disks=update_disks)
    node = wait_for_disk_update(client, lht_hostId, len(update_disks))
    assert len(node["disks"]) == len(update_disks)
    cleanup_host_disk(client, 'vol-test')
コード例 #4
0
ファイル: test_node.py プロジェクト: rancher/longhorn-tests
def test_replica_scheduler_no_disks(client):  # NOQA
    nodes = client.list_node()
    # delete all disks on each node
    for node in nodes:
        disks = node["disks"]
        name = node["name"]
        # set allowScheduling to false
        for fsid, disk in disks.iteritems():
            disk["allowScheduling"] = False
        update_disks = get_update_disks(disks)
        node = node.diskUpdate(disks=update_disks)
        for fsid, disk in node["disks"].iteritems():
            # wait for node controller update disk status
            wait_for_disk_status(client, name, fsid,
                                 "allowScheduling", False)
            wait_for_disk_status(client, name, fsid,
                                 "storageScheduled", 0)

        node = client.by_id_node(name)
        for fsid, disk in node["disks"].iteritems():
            assert not disk["allowScheduling"]
        node = node.diskUpdate(disks=[])
        node = common.wait_for_disk_update(client, name, 0)
        assert len(node["disks"]) == 0

    # test there's no disk fit for volume
    vol_name = common.generate_volume_name()
    volume = client.create_volume(name=vol_name,
                                  size=SIZE, numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(client, vol_name,
                                                        "status",
                                                        CONDITION_STATUS_FALSE)
    client.delete(volume)
    common.wait_for_volume_delete(client, vol_name)
コード例 #5
0
def test_replica_scheduler_no_disks(client):  # NOQA
    nodes = client.list_node()
    # delete all disks on each node
    for node in nodes:
        disks = node["disks"]
        name = node["name"]
        # set allowScheduling to false
        for fsid, disk in disks.iteritems():
            disk["allowScheduling"] = False
        update_disks = get_update_disks(disks)
        node = node.diskUpdate(disks=update_disks)
        for fsid, disk in node["disks"].iteritems():
            # wait for node controller update disk status
            wait_for_disk_status(client, name, fsid, "allowScheduling", False)
            wait_for_disk_status(client, name, fsid, "storageScheduled", 0)

        node = client.by_id_node(name)
        for fsid, disk in node["disks"].iteritems():
            assert not disk["allowScheduling"]
        node = node.diskUpdate(disks=[])
        node = common.wait_for_disk_update(client, name, 0)
        assert len(node["disks"]) == 0

    # test there's no disk fit for volume
    vol_name = common.generate_volume_name()
    volume = client.create_volume(name=vol_name,
                                  size=SIZE,
                                  numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(
        client, vol_name, "status", CONDITION_STATUS_FALSE)
    client.delete(volume)
    common.wait_for_volume_delete(client, vol_name)
コード例 #6
0
def test_replica_scheduler_exceed_over_provisioning(client):  # NOQA
    over_provisioning_setting = client.by_id_setting(
        SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE)
    old_provisioning_setting = over_provisioning_setting["value"]
    # set storage over provisioning percentage to 100
    over_provisioning_setting = client.update(over_provisioning_setting,
                                              value="100")

    # test exceed over provisioning limit couldn't be scheduled
    nodes = client.list_node()
    for node in nodes:
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            disk["storageReserved"] = \
                disk["storageMaximum"] - 1*Gi
        update_disks = get_update_disks(disks)
        node = node.diskUpdate(disks=update_disks)
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            wait_for_disk_status(client, node["name"], fsid, "storageReserved",
                                 disk["storageMaximum"] - 1 * Gi)

    vol_name = common.generate_volume_name()
    volume = client.create_volume(name=vol_name,
                                  size=str(2 * Gi),
                                  numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(
        client, vol_name, "status", CONDITION_STATUS_FALSE)
    client.delete(volume)
    common.wait_for_volume_delete(client, vol_name)
    client.update(over_provisioning_setting, value=old_provisioning_setting)
コード例 #7
0
def test_replica_scheduler_just_under_over_provisioning(client):  # NOQA
    over_provisioning_setting = client.by_id_setting(
        SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE)
    old_provisioning_setting = over_provisioning_setting["value"]
    # set storage over provisioning percentage to 100
    over_provisioning_setting = client.update(over_provisioning_setting,
                                              value="100")

    lht_hostId = get_self_host_id()
    nodes = client.list_node()
    expect_node_disk = {}
    max_size_array = []
    for node in nodes:
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            if disk["path"] == DEFAULT_DISK_PATH:
                expect_disk = disk
                expect_disk["fsid"] = fsid
                expect_node_disk[node["name"]] = expect_disk
                max_size_array.append(disk["storageMaximum"])
            disk["storageReserved"] = 0
            update_disks = get_update_disks(disks)
            node = node.diskUpdate(disks=update_disks)
            disks = node["disks"]
            for fsid, disk in disks.iteritems():
                wait_for_disk_status(client, node["name"], fsid,
                                     "storageReserved", 0)

    max_size = min(max_size_array)
    # test just under over provisioning limit could be scheduled
    vol_name = common.generate_volume_name()
    volume = client.create_volume(name=vol_name,
                                  size=str(max_size),
                                  numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(
        client, vol_name, "status", CONDITION_STATUS_TRUE)
    volume = common.wait_for_volume_detached(client, vol_name)
    assert volume["state"] == "detached"
    assert volume["created"] != ""

    volume.attach(hostId=lht_hostId)
    volume = common.wait_for_volume_healthy(client, vol_name)
    nodes = client.list_node()
    node_hosts = []
    for node in nodes:
        node_hosts.append(node["name"])
    # check all replica should be scheduled to default disk
    for replica in volume["replicas"]:
        id = replica["hostId"]
        assert id != ""
        assert replica["running"]
        expect_disk = expect_node_disk[id]
        assert replica["diskID"] == expect_disk["fsid"]
        assert expect_disk["path"] in replica["dataPath"]
        node_hosts = filter(lambda x: x != id, node_hosts)
    assert len(node_hosts) == 0

    # clean volume and disk
    cleanup_volume(client, vol_name)
    client.update(over_provisioning_setting, value=old_provisioning_setting)
コード例 #8
0
ファイル: test_node.py プロジェクト: rancher/longhorn-tests
def test_replica_scheduler_exceed_over_provisioning(client):  # NOQA
    over_provisioning_setting = client.by_id_setting(
        SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE)
    old_provisioning_setting = over_provisioning_setting["value"]
    # set storage over provisioning percentage to 100
    over_provisioning_setting = client.update(over_provisioning_setting,
                                              value="100")

    # test exceed over provisioning limit couldn't be scheduled
    nodes = client.list_node()
    for node in nodes:
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            disk["storageReserved"] = \
                disk["storageMaximum"] - 1*Gi
        update_disks = get_update_disks(disks)
        node = node.diskUpdate(disks=update_disks)
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            wait_for_disk_status(client, node["name"],
                                 fsid, "storageReserved",
                                 disk["storageMaximum"] - 1*Gi)

    vol_name = common.generate_volume_name()
    volume = client.create_volume(name=vol_name,
                                  size=str(2*Gi), numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(client, vol_name,
                                                        "status",
                                                        CONDITION_STATUS_FALSE)
    client.delete(volume)
    common.wait_for_volume_delete(client, vol_name)
    client.update(over_provisioning_setting, value=old_provisioning_setting)
コード例 #9
0
ファイル: test_tagging.py プロジェクト: ywei88/longhorn-tests
def test_tag_scheduling_on_update(client, node_default_tags, volume_name):  # NOQA
    """
    Test that Replicas get scheduled if a Node/Disk disks updated with the
    proper Tags.
    """
    tag_spec = {
        "disk": ["ssd", "m2"],
        "expected": 1,
        "node": ["main", "fallback"]
    }
    client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=3,
                         diskSelector=tag_spec["disk"],
                         nodeSelector=tag_spec["node"])
    volume = wait_for_volume_detached(client, volume_name)
    assert volume["diskSelector"] == tag_spec["disk"]
    assert volume["nodeSelector"] == tag_spec["node"]

    wait_scheduling_failure(client, volume_name)

    host_id = get_self_host_id()
    node = client.by_id_node(host_id)
    update_disks = get_update_disks(node["disks"])
    update_disks[0]["tags"] = tag_spec["disk"]
    node = node.diskUpdate(disks=update_disks)
    set_node_tags(client, node, tag_spec["node"])
    scheduled = False
    for i in range(RETRY_COUNTS):
        v = client.by_id_volume(volume_name)
        if v["conditions"]["scheduled"]["status"] == "True":
            scheduled = True
        if scheduled:
            break
        sleep(RETRY_INTERVAL)
    assert scheduled

    volume.attach(hostId=host_id)
    volume = wait_for_volume_healthy(client, volume_name)
    nodes = client.list_node()
    node_mapping = {node["id"]: {
        "disk": get_update_disks(node["disks"])[0]["tags"],
        "node": node["tags"]
    } for node in nodes}
    assert len(volume["replicas"]) == 3
    check_volume_replicas(volume, tag_spec, node_mapping)

    cleanup_volume(client, volume)
コード例 #10
0
def delete_extra_disks_on_host(client, disk_names):  # NOQA
    lht_hostId = get_self_host_id()

    node = client.by_id_node(lht_hostId)
    update_disk = get_update_disks(node.disks)

    for disk_name in disk_names:
        update_disk[disk_name].allowScheduling = False
        update_disk[disk_name].evictionRequested = True

    node = node.diskUpdate(disks=update_disk)

    for disk_name in disk_names:
        wait_for_disk_status(client, lht_hostId, disk_name, "storageScheduled",
                             0)
コード例 #11
0
def test_tag_basic(client):  # NOQA
    """
    Test that applying Tags to Nodes/Disks and retrieving them work as
    expected. Ensures that Tags are properly validated when updated.

    1. Generate tags and apply to the disk and nodes
    2. Make sure the tags are applied
    3. Try to apply invalid tags to the disk and node. Action will fail.
    """
    host_id = get_self_host_id()
    node = client.by_id_node(host_id)
    disks = get_update_disks(node.disks)
    assert len(node.disks) == 1
    assert len(node.disks[list(node.disks)[0]].tags) == 0, f" disks = {disks}"
    assert len(node.tags) == 0

    unsorted_disk, sorted_disk = generate_unordered_tag_names()
    unsorted_node, sorted_node = generate_unordered_tag_names()
    update_disks = get_update_disks(node.disks)
    update_disks[list(update_disks)[0]].tags = unsorted_disk
    node = update_node_disks(client, node.name, disks=update_disks)
    disks = get_update_disks(node.disks)
    assert disks[list(disks)[0]].tags == sorted_disk

    node = set_node_tags(client, node, unsorted_node)
    assert node.tags == sorted_node

    improper_tag_cases = [
        [""],  # Empty string
        [" "],  # Whitespace
        ["/"],  # Leading /
        [","],  # Illegal character
    ]
    for tags in improper_tag_cases:
        with pytest.raises(Exception) as e:
            set_node_tags(client, node, tags)
        assert "at least one error encountered while validating tags" in \
               str(e.value)
        with pytest.raises(Exception) as e:
            update_disks = get_update_disks(node.disks)
            update_disks[list(update_disks)[0]].tags = tags
            update_node_disks(client, node.name, disks=update_disks)
        assert "at least one error encountered while validating tags" in \
               str(e.value)

    update_disks = get_update_disks(node.disks)
    update_disks[list(update_disks)[0]].tags = []
    node = update_node_disks(client, node.name, disks=update_disks)
    disks = get_update_disks(node.disks)
    assert len(node.disks[list(node.disks)[0]].tags) == 0, f"disks = {disks}"

    node = set_node_tags(client, node)
    assert len(node.tags) == 0
コード例 #12
0
ファイル: test_tagging.py プロジェクト: ywei88/longhorn-tests
def test_tag_basic(client):  # NOQA
    """
    Test that applying Tags to Nodes/Disks and retrieving them work as
    expected. Ensures that Tags are properly validated when updated.
    """
    host_id = get_self_host_id()
    node = client.by_id_node(host_id)
    disks = get_update_disks(node["disks"])
    assert len(node["disks"]) == 1
    assert disks[0]["tags"] is None
    assert node["tags"] is None

    unsorted_disk, sorted_disk = generate_unordered_tag_names()
    unsorted_node, sorted_node = generate_unordered_tag_names()
    update_disks = get_update_disks(node["disks"])
    update_disks[0]["tags"] = unsorted_disk
    node = node.diskUpdate(disks=update_disks)
    disks = get_update_disks(node["disks"])
    assert disks[0]["tags"] == sorted_disk

    node = set_node_tags(client, node, unsorted_node)
    assert node["tags"] == sorted_node

    improper_tag_cases = [
        [""],   # Empty string
        [" "],  # Whitespace
        ["/"],  # Leading /
        [","],  # Illegal character
    ]
    for tags in improper_tag_cases:
        with pytest.raises(Exception) as e:
            set_node_tags(client, node, tags)
        assert "at least one error encountered while validating tags" in \
               str(e.value)
        with pytest.raises(Exception) as e:
            update_disks = get_update_disks(node["disks"])
            update_disks[0]["tags"] = tags
            node.diskUpdate(disks=update_disks)
        assert "at least one error encountered while validating tags" in \
               str(e.value)

    update_disks = get_update_disks(node["disks"])
    update_disks[0]["tags"] = []
    node = node.diskUpdate(disks=update_disks)
    disks = get_update_disks(node["disks"])
    assert disks[0]["tags"] is None

    node = set_node_tags(client, node)
    assert node["tags"] is None
コード例 #13
0
def crate_disks_on_host(client, disk_names, request):  # NOQA
    disk_paths = []

    lht_hostId = get_self_host_id()
    node = client.by_id_node(lht_hostId)
    update_disks = get_update_disks(node.disks)

    for name in disk_names:
        disk_path = create_host_disk(client, name, str(Gi), lht_hostId)
        disk = {"path": disk_path, "allowScheduling": True}
        update_disks[name] = disk
        disk_paths.append(disk_path)

    node = node.diskUpdate(disks=update_disks)
    node = wait_for_disk_update(client, node.name, len(update_disks))

    def finalizer():
        delete_extra_disks_on_host(client, disk_names)
        for disk_name in disk_names:
            cleanup_host_disks(client, disk_name)

    request.addfinalizer(finalizer)

    return disk_paths
コード例 #14
0
def test_orphaned_dirs_in_duplicated_disks(client, volume_name,
                                           request):  # NOQA
    """
    Test orphaned dirs in duplicated disks. LH should not create a orphan CR
    for the orphaned dir in the deduplicate and unscheduled disk.
    1. Create a new disk for holding orphaned replica directories
    2. Create a folder under the new disk. This folder will be the duplicated
       disk. Add it to the node.
    3. Create a volume and attach to the current node
    4. Create multiple orphaned replica directories in the two disks by
       copying the active replica directory
    5. Clean up volume
    6. Verify orphan list only contains the orphan CRs for replica directories
       in the ready disk
    7. Delete all orphan CRs
    8. Verify orphan list is empty
    9. Verify orphaned directories in the new disk are deleted
    10. Verify orphaned directories in the duplicated disk are no deleted
    """

    disk_names = [
        "vol-disk-" + generate_random_id(4),
        "vol-disk-" + generate_random_id(4)
    ]

    # Step 1
    disk_paths = []

    lht_hostId = get_self_host_id()
    cleanup_node_disks(client, lht_hostId)
    disk_paths = crate_disks_on_host(client, [disk_names[0]], request)

    # Step 2: create duplicated disks for node
    node = client.by_id_node(lht_hostId)
    disks = node.disks
    disk_path = os.path.join(disk_paths[0], disk_names[1])
    disk_paths.append(disk_path)
    exec_nsenter("mkdir -p {}".format(disk_path))
    disk2 = {"path": disk_path, "allowScheduling": True}

    update_disk = get_update_disks(disks)
    update_disk[disk_names[1]] = disk2
    node = node.diskUpdate(disks=update_disk)
    node = wait_for_disk_update(client, lht_hostId, len(update_disk))

    # Step 3
    volume = create_volume_with_replica_on_host(client, volume_name)

    # Step 4
    num_orphans = 5
    create_orphaned_directories_on_host(volume, disk_paths, num_orphans)

    # Step 5
    cleanup_volume_by_name(client, volume_name)

    # Step 6
    assert wait_for_orphan_count(client, num_orphans, 180) == num_orphans

    # Step 7
    delete_orphans(client)

    # Step 8
    count = wait_for_orphan_count(client, 0, 180)
    assert count == 0

    # Step 9: The orphaned directories in the ready disk should be deleted
    assert wait_for_file_count(os.path.join(disk_paths[0], "replicas"), 0,
                               180) == 0

    # Step 10: The orphaned directories in the duplicated disk should not be
    # deleted
    assert wait_for_file_count(os.path.join(disk_paths[1], "replicas"),
                               num_orphans, 180) == num_orphans
コード例 #15
0
def test_tag_scheduling_on_update(client, node_default_tags,
                                  volume_name):  # NOQA
    """
    Test that Replicas get scheduled if a Node/Disk disks updated with the
    proper Tags.

    Test prerequisites:
      - set Replica Node Level Soft Anti-Affinity enabled

    1. Create volume with tags that can not be satisfied
    2. Wait for volume to fail scheduling
    3. Update the node and disk with extra tags to satisfy the volume
    4. Verify now volume has been scheduled
    5. Attach the volume and check the replicas has been scheduled properly
    """
    replica_node_soft_anti_affinity_setting = \
        client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)
    client.update(replica_node_soft_anti_affinity_setting, value="true")

    tag_spec = {
        "disk": ["ssd", "m2"],
        "expected": 1,
        "node": ["main", "fallback"]
    }
    client.create_volume(name=volume_name,
                         size=SIZE,
                         numberOfReplicas=3,
                         diskSelector=tag_spec["disk"],
                         nodeSelector=tag_spec["node"])
    volume = wait_for_volume_detached(client, volume_name)
    assert volume.diskSelector == tag_spec["disk"]
    assert volume.nodeSelector == tag_spec["node"]

    wait_scheduling_failure(client, volume_name)

    host_id = get_self_host_id()
    node = client.by_id_node(host_id)
    update_disks = get_update_disks(node.disks)
    update_disks[list(update_disks)[0]].tags = tag_spec["disk"]
    node = update_node_disks(client, node.name, disks=update_disks)
    set_node_tags(client, node, tag_spec["node"])
    scheduled = False
    for i in range(RETRY_COUNTS):
        v = client.by_id_volume(volume_name)
        if v.conditions.scheduled.status == "True":
            scheduled = True
        if scheduled:
            break
        sleep(RETRY_INTERVAL)
    assert scheduled

    volume.attach(hostId=host_id)
    volume = wait_for_volume_healthy(client, volume_name)
    nodes = client.list_node()
    node_mapping = {
        node.id: {
            "disk": node.disks[list(node.disks)[0]].tags,
            "node": node.tags
        }
        for node in nodes
    }
    assert len(volume.replicas) == 3
    check_volume_replicas(volume, tag_spec, node_mapping)

    cleanup_volume(client, volume)
コード例 #16
0
def test_replica_scheduler_too_large_volume_fit_any_disks(client):  # NOQA
    nodes = client.list_node()
    lht_hostId = get_self_host_id()
    expect_node_disk = {}
    for node in nodes:
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            if disk["path"] == DEFAULT_DISK_PATH:
                expect_disk = disk
                expect_disk["fsid"] = fsid
                expect_node_disk[node["name"]] = expect_disk
            disk["storageReserved"] = disk["storageMaximum"]
        update_disks = get_update_disks(disks)
        node.diskUpdate(disks=update_disks)

    # volume is too large to fill into any disks
    volume_size = 4 * Gi
    vol_name = common.generate_volume_name()
    client.create_volume(name=vol_name,
                         size=str(volume_size),
                         numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(
        client, vol_name, "status", CONDITION_STATUS_FALSE)

    # Reduce StorageReserved of each default disk so that each node can fit
    # only one replica.
    needed_for_scheduling = int(
        volume_size * 1.5 * 100 /
        int(DEFAULT_STORAGE_OVER_PROVISIONING_PERCENTAGE))
    nodes = client.list_node()
    for node in nodes:
        disks = node["disks"]
        update_disks = get_update_disks(disks)
        for disk in update_disks:
            disk["storageReserved"] = \
                disk["storageMaximum"] - needed_for_scheduling
        node = node.diskUpdate(disks=update_disks)
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            wait_for_disk_status(
                client, node["name"], fsid, "storageReserved",
                disk["storageMaximum"] - needed_for_scheduling)

    # check volume status
    volume = common.wait_for_volume_condition_scheduled(
        client, vol_name, "status", CONDITION_STATUS_TRUE)
    volume = common.wait_for_volume_detached(client, vol_name)
    assert volume["state"] == "detached"
    assert volume["created"] != ""

    volume.attach(hostId=lht_hostId)
    volume = common.wait_for_volume_healthy(client, vol_name)
    nodes = client.list_node()
    node_hosts = []
    for node in nodes:
        node_hosts.append(node["name"])
    # check all replica should be scheduled to default disk
    for replica in volume["replicas"]:
        id = replica["hostId"]
        assert id != ""
        assert replica["running"]
        expect_disk = expect_node_disk[id]
        assert replica["diskID"] == expect_disk["fsid"]
        assert expect_disk["path"] in replica["dataPath"]
        node_hosts = filter(lambda x: x != id, node_hosts)
    assert len(node_hosts) == 0

    # clean volume and disk
    cleanup_volume(client, vol_name)
コード例 #17
0
def test_node_disk_update(client):  # NOQA
    lht_hostId = get_self_host_id()
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    # test add same disk by different mount path exception
    with pytest.raises(Exception) as e:
        disk = {
            "path": "/var/lib",
            "allowScheduling": True,
            "storageReserved": 2 * Gi
        }
        update_disk = get_update_disks(disks)
        update_disk.append(disk)
        node = node.diskUpdate(disks=update_disk)
    assert "the same file system" in str(e.value)

    # test delete disk exception
    with pytest.raises(Exception) as e:
        node.diskUpdate(disks=[])
    assert "disable the disk" in str(e.value)

    # test storageReserved invalid exception
    with pytest.raises(Exception) as e:
        for fsid, disk in disks.iteritems():
            disk["storageReserved"] = disk["storageMaximum"] + 1 * Gi
        update_disk = get_update_disks(disks)
        node.diskUpdate(disks=update_disk)
    assert "storageReserved setting of disk" in str(e.value)

    # create multiple disks for node
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    disk_path1 = create_host_disk(client, 'vol-disk-1', str(Gi), lht_hostId)
    disk1 = {"path": disk_path1, "allowScheduling": True}
    disk_path2 = create_host_disk(client, 'vol-disk-2', str(Gi), lht_hostId)
    disk2 = {"path": disk_path2, "allowScheduling": True}

    update_disk = get_update_disks(disks)
    # add new disk for node
    update_disk.append(disk1)
    update_disk.append(disk2)

    # save disks to node
    node = node.diskUpdate(disks=update_disk)
    node = common.wait_for_disk_update(client, lht_hostId, len(update_disk))
    assert len(node["disks"]) == len(update_disk)
    node = client.by_id_node(lht_hostId)
    assert len(node["disks"]) == len(update_disk)

    # update disk
    disks = node["disks"]
    update_disk = get_update_disks(disks)
    for disk in update_disk:
        # keep default disk for other tests
        if disk["path"] == disk_path1 or disk["path"] == disk_path2:
            disk["allowScheduling"] = False
            disk["storageReserved"] = SMALL_DISK_SIZE
    node = node.diskUpdate(disks=update_disk)
    disks = node["disks"]
    # wait for node controller to update disk status
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1 or disk["path"] == disk_path2:
            wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling",
                                 False)
            wait_for_disk_status(client, lht_hostId, fsid, "storageReserved",
                                 SMALL_DISK_SIZE)
            free, total = common.get_host_disk_size(disk_path1)
            wait_for_disk_status(client, lht_hostId, fsid, "storageAvailable",
                                 free)

    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for key, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            assert not disk["allowScheduling"]
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == 0
            free, total = common.get_host_disk_size(disk_path1)
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free
        elif disk["path"] == disk_path2:
            assert not disk["allowScheduling"]
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == 0
            free, total = common.get_host_disk_size(disk_path2)
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free

    # delete other disks, just remain default disk
    update_disk = get_update_disks(disks)
    remain_disk = []
    for disk in update_disk:
        if disk["path"] != disk_path1 and disk["path"] != disk_path2:
            remain_disk.append(disk)
    node = node.diskUpdate(disks=remain_disk)
    node = wait_for_disk_update(client, lht_hostId, len(remain_disk))
    assert len(node["disks"]) == len(remain_disk)
    # cleanup disks
    cleanup_host_disk(client, 'vol-disk-1', 'vol-disk-2')
コード例 #18
0
ファイル: test_node.py プロジェクト: rancher/longhorn-tests
def test_replica_scheduler_large_volume_fit_small_disk(client):  # NOQA
    nodes = client.list_node()
    # create a small size disk on current node
    lht_hostId = get_self_host_id()
    node = client.by_id_node(lht_hostId)
    small_disk_path = create_host_disk(client, "vol-small",
                                       SIZE, lht_hostId)
    small_disk = {"path": small_disk_path, "allowScheduling": True}
    update_disks = get_update_disks(node["disks"])
    update_disks.append(small_disk)
    node = node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId,
                                       len(update_disks))
    assert len(node["disks"]) == len(update_disks)

    unexpected_disk = {}
    for fsid, disk in node["disks"].iteritems():
        if disk["path"] == small_disk_path:
            unexpected_disk["fsid"] = fsid
            unexpected_disk["path"] = disk["path"]
            break

    # volume is too large to fill into small size disk on current node
    vol_name = common.generate_volume_name()
    volume = create_volume(client, vol_name, str(Gi), lht_hostId, len(nodes))

    nodes = client.list_node()
    node_hosts = []
    for node in nodes:
        node_hosts.append(node["name"])

    # check replica on current node shouldn't schedule to small disk
    for replica in volume["replicas"]:
        id = replica["hostId"]
        assert id != ""
        assert replica["running"]
        if id == lht_hostId:
            assert replica["diskID"] != unexpected_disk["fsid"]
            assert replica["dataPath"] != unexpected_disk["path"]
        node_hosts = filter(lambda x: x != id, node_hosts)
    assert len(node_hosts) == 0

    cleanup_volume(client, vol_name)

    # cleanup test disks
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    disk = disks[unexpected_disk["fsid"]]
    disk["allowScheduling"] = False
    update_disks = get_update_disks(disks)
    node = node.diskUpdate(disks=update_disks)
    node = wait_for_disk_status(client, lht_hostId,
                                unexpected_disk["fsid"],
                                "allowScheduling", False)
    disks = node["disks"]
    disk = disks[unexpected_disk["fsid"]]
    assert not disk["allowScheduling"]
    disks.pop(unexpected_disk["fsid"])
    update_disks = get_update_disks(disks)
    node.diskUpdate(disks=update_disks)
    cleanup_host_disk(client, 'vol-small')
コード例 #19
0
def test_replica_cleanup(client):  # NOQA
    nodes = client.list_node()
    lht_hostId = get_self_host_id()

    node = client.by_id_node(lht_hostId)
    extra_disk_path = create_host_disk(client, "extra-disk", "10G", lht_hostId)
    extra_disk = {"path": extra_disk_path, "allowScheduling": True}
    update_disks = get_update_disks(node["disks"])
    update_disks.append(extra_disk)
    node = node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId, len(update_disks))
    assert len(node["disks"]) == len(update_disks)

    extra_disk_fsid = ""
    for fsid, disk in node["disks"].iteritems():
        if disk["path"] == extra_disk_path:
            extra_disk_fsid = fsid
            break

    for node in nodes:
        # disable all the disks except the ones on the current node
        if node["name"] == lht_hostId:
            continue
        for fsid, disk in node["disks"].iteritems():
            break
        disk["allowScheduling"] = False
        update_disks = get_update_disks(node["disks"])
        node.diskUpdate(disks=update_disks)
        node = wait_for_disk_status(client, node["name"], fsid,
                                    "allowScheduling", False)

    vol_name = common.generate_volume_name()
    # more replicas, make sure both default and extra disk will get one
    volume = create_volume(client, vol_name, str(Gi), lht_hostId, 5)
    data_paths = []
    for replica in volume["replicas"]:
        data_paths.append(replica["dataPath"])

    # data path should exist now
    for data_path in data_paths:
        assert exec_nsenter("ls {}".format(data_path))

    cleanup_volume(client, vol_name)

    # data path should be gone due to the cleanup of replica
    for data_path in data_paths:
        with pytest.raises(subprocess.CalledProcessError):
            exec_nsenter("ls {}".format(data_path))

    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    disk = disks[extra_disk_fsid]
    disk["allowScheduling"] = False
    update_disks = get_update_disks(disks)
    node = node.diskUpdate(disks=update_disks)
    node = wait_for_disk_status(client, lht_hostId, extra_disk_fsid,
                                "allowScheduling", False)
    wait_for_disk_status(client, lht_hostId, extra_disk_fsid,
                         "storageScheduled", 0)

    disks = node["disks"]
    disk = disks[extra_disk_fsid]
    assert not disk["allowScheduling"]
    disks.pop(extra_disk_fsid)
    update_disks = get_update_disks(disks)
    node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId, len(update_disks))

    cleanup_host_disk(client, 'extra-disk')
コード例 #20
0
ファイル: test_node.py プロジェクト: rancher/longhorn-tests
def test_replica_cleanup(client):  # NOQA
    nodes = client.list_node()
    lht_hostId = get_self_host_id()

    node = client.by_id_node(lht_hostId)
    extra_disk_path = create_host_disk(client, "extra-disk",
                                       "10G", lht_hostId)
    extra_disk = {"path": extra_disk_path, "allowScheduling": True}
    update_disks = get_update_disks(node["disks"])
    update_disks.append(extra_disk)
    node = node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId,
                                       len(update_disks))
    assert len(node["disks"]) == len(update_disks)

    extra_disk_fsid = ""
    for fsid, disk in node["disks"].iteritems():
        if disk["path"] == extra_disk_path:
            extra_disk_fsid = fsid
            break

    for node in nodes:
        # disable all the disks except the ones on the current node
        if node["name"] == lht_hostId:
            continue
        for fsid, disk in node["disks"].iteritems():
            break
        disk["allowScheduling"] = False
        update_disks = get_update_disks(node["disks"])
        node.diskUpdate(disks=update_disks)
        node = wait_for_disk_status(client, node["name"],
                                    fsid,
                                    "allowScheduling", False)

    vol_name = common.generate_volume_name()
    # more replicas, make sure both default and extra disk will get one
    volume = create_volume(client, vol_name, str(Gi), lht_hostId, 5)
    data_paths = []
    for replica in volume["replicas"]:
        data_paths.append(replica["dataPath"])

    # data path should exist now
    for data_path in data_paths:
        assert exec_nsenter("ls {}".format(data_path))

    cleanup_volume(client, vol_name)

    # data path should be gone due to the cleanup of replica
    for data_path in data_paths:
        with pytest.raises(subprocess.CalledProcessError):
            exec_nsenter("ls {}".format(data_path))

    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    disk = disks[extra_disk_fsid]
    disk["allowScheduling"] = False
    update_disks = get_update_disks(disks)
    node = node.diskUpdate(disks=update_disks)
    node = wait_for_disk_status(client, lht_hostId,
                                extra_disk_fsid,
                                "allowScheduling", False)
    wait_for_disk_status(client, lht_hostId, extra_disk_fsid,
                         "storageScheduled", 0)

    disks = node["disks"]
    disk = disks[extra_disk_fsid]
    assert not disk["allowScheduling"]
    disks.pop(extra_disk_fsid)
    update_disks = get_update_disks(disks)
    node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId,
                                       len(update_disks))

    cleanup_host_disk(client, 'extra-disk')
コード例 #21
0
ファイル: test_node.py プロジェクト: rancher/longhorn-tests
def test_node_delete_umount_disks(client):  # NOQA
    # create test disks for node
    disk_volume_name = 'vol-disk-1'
    lht_hostId = get_self_host_id()
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    disk_path1 = create_host_disk(client, disk_volume_name,
                                  str(Gi), lht_hostId)
    disk1 = {"path": disk_path1, "allowScheduling": True,
             "storageReserved": SMALL_DISK_SIZE}

    update_disk = get_update_disks(disks)
    for disk in update_disk:
        disk["allowScheduling"] = False
    # add new disk for node
    update_disk.append(disk1)
    # save disks to node
    node = node.diskUpdate(disks=update_disk)
    node = common.wait_for_disk_update(client, lht_hostId,
                                       len(update_disk))
    assert len(node["disks"]) == len(update_disk)
    node = client.by_id_node(lht_hostId)
    assert len(node["disks"]) == len(update_disk)

    disks = node["disks"]
    # wait for node controller to update disk status
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid,
                                 "allowScheduling", True)
            wait_for_disk_status(client, lht_hostId, fsid,
                                 "storageReserved", SMALL_DISK_SIZE)
            free, total = common.get_host_disk_size(disk_path1)
            wait_for_disk_status(client, lht_hostId, fsid,
                                 "storageAvailable", free)
            wait_for_disk_status(client, lht_hostId, fsid,
                                 "storageMaximum", total)

    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for key, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            assert disk["allowScheduling"]
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == 0
            free, total = common.get_host_disk_size(disk_path1)
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
        else:
            assert not disk["allowScheduling"]

    # create a volume
    nodes = client.list_node()
    vol_name = common.generate_volume_name()
    volume = create_volume(client, vol_name, str(SMALL_DISK_SIZE),
                           lht_hostId, len(nodes))
    replicas = volume["replicas"]
    for replica in replicas:
        id = replica["hostId"]
        assert id != ""
        assert replica["running"]
        if id == lht_hostId:
            assert replica["dataPath"].startswith(disk_path1)

    # umount the disk
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    common.umount_disk(mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "allowScheduling", False)
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "storageMaximum", 0)
            wait_for_disk_conditions(client, lht_hostId, fsid,
                                     DISK_CONDITION_READY,
                                     CONDITION_STATUS_FALSE)

    # check result
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    update_disks = []
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            assert not disk["allowScheduling"]
            assert disk["storageMaximum"] == 0
            assert disk["storageAvailable"] == 0
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == SMALL_DISK_SIZE
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_FALSE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_FALSE
        else:
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
            update_disks.append(disk)

    # delete umount disk exception
    with pytest.raises(Exception) as e:
        node.diskUpdate(disks=update_disks)
    assert "disable the disk" in str(e.value)

    # update other disks
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            disk["allowScheduling"] = True
    test_update = get_update_disks(disks)
    node = node.diskUpdate(disks=test_update)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "allowScheduling", True)
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            assert disk["allowScheduling"]

    # mount the disk back
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    disk_volume = client.by_id_volume(disk_volume_name)
    dev = get_volume_endpoint(disk_volume)
    common.mount_disk(dev, mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "allowScheduling", False)
            wait_for_disk_conditions(client, lht_hostId, fsid,
                                     DISK_CONDITION_READY,
                                     CONDITION_STATUS_TRUE)

    # check result
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            free, total = common.get_host_disk_size(disk_path1)
            assert not disk["allowScheduling"]
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == SMALL_DISK_SIZE
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
        else:
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE

    # delete volume and umount disk
    cleanup_volume(client, vol_name)
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    common.umount_disk(mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "allowScheduling", False)
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "storageScheduled", 0)
            wait_for_disk_status(client, lht_hostId,
                                 fsid, "storageMaximum", 0)

    # test delete the umount disk
    node = client.by_id_node(lht_hostId)
    node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId,
                                       len(update_disks))
    assert len(node["disks"]) == len(update_disks)
    cmd = ['rm', '-r', mount_path]
    subprocess.check_call(cmd)
コード例 #22
0
ファイル: test_node.py プロジェクト: rancher/longhorn-tests
def test_node_disk_update(client):  # NOQA
    lht_hostId = get_self_host_id()
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    # test add same disk by different mount path exception
    with pytest.raises(Exception) as e:
        disk = {"path": "/var/lib", "allowScheduling": True,
                "storageReserved": 2 * Gi}
        update_disk = get_update_disks(disks)
        update_disk.append(disk)
        node = node.diskUpdate(disks=update_disk)
    assert "the same file system" in str(e.value)

    # test delete disk exception
    with pytest.raises(Exception) as e:
        node.diskUpdate(disks=[])
    assert "disable the disk" in str(e.value)

    # test storageReserved invalid exception
    with pytest.raises(Exception) as e:
        for fsid, disk in disks.iteritems():
            disk["storageReserved"] = disk["storageMaximum"] + 1*Gi
        update_disk = get_update_disks(disks)
        node.diskUpdate(disks=update_disk)
    assert "storageReserved setting of disk" in str(e.value)

    # create multiple disks for node
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    disk_path1 = create_host_disk(client, 'vol-disk-1',
                                  str(Gi), lht_hostId)
    disk1 = {"path": disk_path1, "allowScheduling": True}
    disk_path2 = create_host_disk(client, 'vol-disk-2',
                                  str(Gi), lht_hostId)
    disk2 = {"path": disk_path2, "allowScheduling": True}

    update_disk = get_update_disks(disks)
    # add new disk for node
    update_disk.append(disk1)
    update_disk.append(disk2)

    # save disks to node
    node = node.diskUpdate(disks=update_disk)
    node = common.wait_for_disk_update(client, lht_hostId,
                                       len(update_disk))
    assert len(node["disks"]) == len(update_disk)
    node = client.by_id_node(lht_hostId)
    assert len(node["disks"]) == len(update_disk)

    # update disk
    disks = node["disks"]
    update_disk = get_update_disks(disks)
    for disk in update_disk:
        # keep default disk for other tests
        if disk["path"] == disk_path1 or disk["path"] == disk_path2:
            disk["allowScheduling"] = False
            disk["storageReserved"] = SMALL_DISK_SIZE
    node = node.diskUpdate(disks=update_disk)
    disks = node["disks"]
    # wait for node controller to update disk status
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1 or disk["path"] == disk_path2:
            wait_for_disk_status(client, lht_hostId, fsid,
                                 "allowScheduling", False)
            wait_for_disk_status(client, lht_hostId, fsid,
                                 "storageReserved", SMALL_DISK_SIZE)
            free, total = common.get_host_disk_size(disk_path1)
            wait_for_disk_status(client, lht_hostId, fsid,
                                 "storageAvailable", free)

    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for key, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            assert not disk["allowScheduling"]
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == 0
            free, total = common.get_host_disk_size(disk_path1)
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free
        elif disk["path"] == disk_path2:
            assert not disk["allowScheduling"]
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == 0
            free, total = common.get_host_disk_size(disk_path2)
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free

    # delete other disks, just remain default disk
    update_disk = get_update_disks(disks)
    remain_disk = []
    for disk in update_disk:
        if disk["path"] != disk_path1 and disk["path"] != disk_path2:
            remain_disk.append(disk)
    node = node.diskUpdate(disks=remain_disk)
    node = wait_for_disk_update(client, lht_hostId,
                                len(remain_disk))
    assert len(node["disks"]) == len(remain_disk)
    # cleanup disks
    cleanup_host_disk(client, 'vol-disk-1', 'vol-disk-2')
コード例 #23
0
ファイル: test_node.py プロジェクト: rancher/longhorn-tests
def test_replica_scheduler_just_under_over_provisioning(client):  # NOQA
    over_provisioning_setting = client.by_id_setting(
        SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE)
    old_provisioning_setting = over_provisioning_setting["value"]
    # set storage over provisioning percentage to 100
    over_provisioning_setting = client.update(over_provisioning_setting,
                                              value="100")

    lht_hostId = get_self_host_id()
    nodes = client.list_node()
    expect_node_disk = {}
    max_size_array = []
    for node in nodes:
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            if disk["path"] == DEFAULT_DISK_PATH:
                expect_disk = disk
                expect_disk["fsid"] = fsid
                expect_node_disk[node["name"]] = expect_disk
                max_size_array.append(disk["storageMaximum"])
            disk["storageReserved"] = 0
            update_disks = get_update_disks(disks)
            node = node.diskUpdate(disks=update_disks)
            disks = node["disks"]
            for fsid, disk in disks.iteritems():
                wait_for_disk_status(client, node["name"],
                                     fsid, "storageReserved", 0)

    max_size = min(max_size_array)
    # test just under over provisioning limit could be scheduled
    vol_name = common.generate_volume_name()
    volume = client.create_volume(name=vol_name,
                                  size=str(max_size),
                                  numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(client, vol_name,
                                                        "status",
                                                        CONDITION_STATUS_TRUE)
    volume = common.wait_for_volume_detached(client, vol_name)
    assert volume["state"] == "detached"
    assert volume["created"] != ""

    volume.attach(hostId=lht_hostId)
    volume = common.wait_for_volume_healthy(client, vol_name)
    nodes = client.list_node()
    node_hosts = []
    for node in nodes:
        node_hosts.append(node["name"])
    # check all replica should be scheduled to default disk
    for replica in volume["replicas"]:
        id = replica["hostId"]
        assert id != ""
        assert replica["running"]
        expect_disk = expect_node_disk[id]
        assert replica["diskID"] == expect_disk["fsid"]
        assert expect_disk["path"] in replica["dataPath"]
        node_hosts = filter(lambda x: x != id, node_hosts)
    assert len(node_hosts) == 0

    # clean volume and disk
    cleanup_volume(client, vol_name)
    client.update(over_provisioning_setting, value=old_provisioning_setting)
コード例 #24
0
def test_disk_evicted(client, volume_name, request):  # NOQA
    """
    Test the orphan CR is deleted in background but on-disk data still exists
    if the disk is evicted
    1. Create a new disk for holding valid and invalid orphaned
       replica directories
    2. Create a volume and attach to the current node
    3. Create a valid orphaned replica directories by copying the active
       replica directory
    4. Clean up volume
    5. Verify orphan list contains the valid orphaned replica directory
    6. Evict the disk containing the orphaned replica directory
    7. Verify the orphan CR is deleted in background, but the on-disk orphaned
       replica directory still exists
    8. Set the disk scheduleable again
    9. Verify the orphan CR is created again and the on-disk orphaned replica
       directory still exists
    """

    disk_names = ["vol-disk-" + generate_random_id(4)]

    lht_hostId = get_self_host_id()

    # Step 1
    cleanup_node_disks(client, lht_hostId)
    disk_paths = crate_disks_on_host(client, disk_names, request)

    # Step 2
    volume = create_volume_with_replica_on_host(client, volume_name)

    # Step 3
    create_orphaned_directories_on_host(volume, disk_paths, 1)

    # Step 4
    cleanup_volume_by_name(client, volume_name)

    # Step 5
    assert wait_for_orphan_count(client, 1, 180) == 1

    # Step 6: Request disk eviction evictionRequested
    node = client.by_id_node(lht_hostId)
    update_disks = get_update_disks(node.disks)

    update_disks[disk_names[0]].allowScheduling = False
    update_disks[disk_names[0]].evictionRequested = True
    node = node.diskUpdate(disks=update_disks)
    node = wait_for_disk_update(client, node.name, len(update_disks))

    # Step 7
    assert wait_for_orphan_count(client, 0, 180) == 0

    assert wait_for_file_count(os.path.join(disk_paths[0], "replicas"), 1,
                               180) == 1

    # Step 8: Set disk allowScheduling to true and evictionRequested to false
    node = client.by_id_node(lht_hostId)
    update_disks = get_update_disks(node.disks)

    update_disks[disk_names[0]].allowScheduling = True
    update_disks[disk_names[0]].evictionRequested = False
    node = node.diskUpdate(disks=update_disks)
    node = wait_for_disk_update(client, node.name, len(update_disks))

    # Step 9
    assert wait_for_orphan_count(client, 1, 180) == 1

    assert wait_for_file_count(os.path.join(disk_paths[0], "replicas"), 1,
                               180) == 1
コード例 #25
0
def test_node_default_disk_labeled(
        client,
        core_api,
        random_disk_path,
        reset_default_disk_label,  # NOQA
        reset_disk_settings):  # NOQA
    """
    Test that only Nodes with the proper label applied get a default Disk
    created on them when one doesn't already exist. Makes sure the created
    Disk matches the Default Data Path Setting.
    """
    # Set up cases.
    cases = {"disk_exists": None, "labeled": None, "unlabeled": None}
    nodes = client.list_node()
    assert len(nodes) >= 3

    node = nodes[0]
    cases["disk_exists"] = node["id"]
    core_api.patch_node(
        node["id"],
        {"metadata": {
            "labels": {
                CREATE_DEFAULT_DISK_LABEL: "true"
            }
        }})

    node = nodes[1]
    cases["labeled"] = node["id"]
    core_api.patch_node(
        node["id"],
        {"metadata": {
            "labels": {
                CREATE_DEFAULT_DISK_LABEL: "true"
            }
        }})
    disks = node["disks"]
    for _, disk in disks.iteritems():
        disk["allowScheduling"] = False
    update_disks = get_update_disks(disks)
    node = node.diskUpdate(disks=update_disks)
    node = node.diskUpdate(disks=[])
    wait_for_disk_update(client, node["id"], 0)

    node = nodes[2]
    cases["unlabeled"] = node["id"]
    disks = node["disks"]
    for _, disk in disks.iteritems():
        disk["allowScheduling"] = False
    update_disks = get_update_disks(disks)
    node = node.diskUpdate(disks=update_disks)
    node = node.diskUpdate(disks=[])
    wait_for_disk_update(client, node["id"], 0)

    # Set disk creation and path Settings.
    setting = client.by_id_setting(DEFAULT_DATA_PATH_SETTING)
    client.update(setting, value=random_disk_path)
    setting = client.by_id_setting(CREATE_DEFAULT_DISK_SETTING)
    client.update(setting, value="true")
    wait_for_disk_update(client, cases["labeled"], 1)

    # Check each case.
    node = client.by_id_node(cases["disk_exists"])
    assert len(node["disks"]) == 1
    assert get_update_disks(node["disks"])[0]["path"] == \
        DEFAULT_DISK_PATH

    node = client.by_id_node(cases["labeled"])
    assert len(node["disks"]) == 1
    assert get_update_disks(node["disks"])[0]["path"] == \
        random_disk_path

    node = client.by_id_node(cases["unlabeled"])
    assert len(node["disks"]) == 0
コード例 #26
0
def test_node_delete_umount_disks(client):  # NOQA
    # create test disks for node
    disk_volume_name = 'vol-disk-1'
    lht_hostId = get_self_host_id()
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    disk_path1 = create_host_disk(client, disk_volume_name, str(Gi),
                                  lht_hostId)
    disk1 = {
        "path": disk_path1,
        "allowScheduling": True,
        "storageReserved": SMALL_DISK_SIZE
    }

    update_disk = get_update_disks(disks)
    for disk in update_disk:
        disk["allowScheduling"] = False
    # add new disk for node
    update_disk.append(disk1)
    # save disks to node
    node = node.diskUpdate(disks=update_disk)
    node = common.wait_for_disk_update(client, lht_hostId, len(update_disk))
    assert len(node["disks"]) == len(update_disk)
    node = client.by_id_node(lht_hostId)
    assert len(node["disks"]) == len(update_disk)

    disks = node["disks"]
    # wait for node controller to update disk status
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling",
                                 True)
            wait_for_disk_status(client, lht_hostId, fsid, "storageReserved",
                                 SMALL_DISK_SIZE)
            free, total = common.get_host_disk_size(disk_path1)
            wait_for_disk_status(client, lht_hostId, fsid, "storageAvailable",
                                 free)
            wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum",
                                 total)

    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for key, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            assert disk["allowScheduling"]
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == 0
            free, total = common.get_host_disk_size(disk_path1)
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
        else:
            assert not disk["allowScheduling"]

    # create a volume
    nodes = client.list_node()
    vol_name = common.generate_volume_name()
    volume = create_volume(client, vol_name, str(SMALL_DISK_SIZE), lht_hostId,
                           len(nodes))
    replicas = volume["replicas"]
    for replica in replicas:
        id = replica["hostId"]
        assert id != ""
        assert replica["running"]
        if id == lht_hostId:
            assert replica["dataPath"].startswith(disk_path1)

    # umount the disk
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    common.umount_disk(mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling",
                                 False)
            wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0)
            wait_for_disk_conditions(client, lht_hostId, fsid,
                                     DISK_CONDITION_READY,
                                     CONDITION_STATUS_FALSE)

    # check result
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    update_disks = []
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            assert not disk["allowScheduling"]
            assert disk["storageMaximum"] == 0
            assert disk["storageAvailable"] == 0
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == SMALL_DISK_SIZE
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_FALSE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_FALSE
        else:
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
            update_disks.append(disk)

    # delete umount disk exception
    with pytest.raises(Exception) as e:
        node.diskUpdate(disks=update_disks)
    assert "disable the disk" in str(e.value)

    # update other disks
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            disk["allowScheduling"] = True
    test_update = get_update_disks(disks)
    node = node.diskUpdate(disks=test_update)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling",
                                 True)
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] != disk_path1:
            assert disk["allowScheduling"]

    # mount the disk back
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    disk_volume = client.by_id_volume(disk_volume_name)
    dev = get_volume_endpoint(disk_volume)
    common.mount_disk(dev, mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling",
                                 False)
            wait_for_disk_conditions(client, lht_hostId, fsid,
                                     DISK_CONDITION_READY,
                                     CONDITION_STATUS_TRUE)

    # check result
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            free, total = common.get_host_disk_size(disk_path1)
            assert not disk["allowScheduling"]
            assert disk["storageMaximum"] == total
            assert disk["storageAvailable"] == free
            assert disk["storageReserved"] == SMALL_DISK_SIZE
            assert disk["storageScheduled"] == SMALL_DISK_SIZE
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE
        else:
            conditions = disk["conditions"]
            assert conditions[DISK_CONDITION_READY]["status"] == \
                CONDITION_STATUS_TRUE
            assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \
                CONDITION_STATUS_TRUE

    # delete volume and umount disk
    cleanup_volume(client, vol_name)
    mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name)
    common.umount_disk(mount_path)

    # wait for update node status
    node = client.by_id_node(lht_hostId)
    disks = node["disks"]
    for fsid, disk in disks.iteritems():
        if disk["path"] == disk_path1:
            wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling",
                                 False)
            wait_for_disk_status(client, lht_hostId, fsid, "storageScheduled",
                                 0)
            wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0)

    # test delete the umount disk
    node = client.by_id_node(lht_hostId)
    node.diskUpdate(disks=update_disks)
    node = common.wait_for_disk_update(client, lht_hostId, len(update_disks))
    assert len(node["disks"]) == len(update_disks)
    cmd = ['rm', '-r', mount_path]
    subprocess.check_call(cmd)
コード例 #27
0
ファイル: test_node.py プロジェクト: rancher/longhorn-tests
def test_replica_scheduler_too_large_volume_fit_any_disks(client):  # NOQA
    nodes = client.list_node()
    lht_hostId = get_self_host_id()
    expect_node_disk = {}
    for node in nodes:
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            if disk["path"] == DEFAULT_DISK_PATH:
                expect_disk = disk
                expect_disk["fsid"] = fsid
                expect_node_disk[node["name"]] = expect_disk
            disk["storageReserved"] = disk["storageMaximum"]
        update_disks = get_update_disks(disks)
        node.diskUpdate(disks=update_disks)

    # volume is too large to fill into any disks
    vol_name = common.generate_volume_name()
    volume = client.create_volume(name=vol_name,
                                  size=str(4*Gi), numberOfReplicas=len(nodes))
    volume = common.wait_for_volume_condition_scheduled(client, vol_name,
                                                        "status",
                                                        CONDITION_STATUS_FALSE)

    # reduce StorageReserved of each default disk
    nodes = client.list_node()
    for node in nodes:
        disks = node["disks"]
        update_disks = get_update_disks(disks)
        for disk in update_disks:
            disk["storageReserved"] = 0
        node = node.diskUpdate(disks=update_disks)
        disks = node["disks"]
        for fsid, disk in disks.iteritems():
            wait_for_disk_status(client, node["name"],
                                 fsid, "storageReserved", 0)

    # check volume status
    volume = common.wait_for_volume_condition_scheduled(client, vol_name,
                                                        "status",
                                                        CONDITION_STATUS_TRUE)
    volume = common.wait_for_volume_detached(client, vol_name)
    assert volume["state"] == "detached"
    assert volume["created"] != ""

    volume.attach(hostId=lht_hostId)
    volume = common.wait_for_volume_healthy(client, vol_name)
    nodes = client.list_node()
    node_hosts = []
    for node in nodes:
        node_hosts.append(node["name"])
    # check all replica should be scheduled to default disk
    for replica in volume["replicas"]:
        id = replica["hostId"]
        assert id != ""
        assert replica["running"]
        expect_disk = expect_node_disk[id]
        assert replica["diskID"] == expect_disk["fsid"]
        assert expect_disk["path"] in replica["dataPath"]
        node_hosts = filter(lambda x: x != id, node_hosts)
    assert len(node_hosts) == 0

    # clean volume and disk
    cleanup_volume(client, vol_name)