def test_replica_scheduler_just_under_over_provisioning(client): # NOQA over_provisioning_setting = client.by_id_setting( SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE) old_provisioning_setting = over_provisioning_setting["value"] # set storage over provisioning percentage to 100 over_provisioning_setting = client.update(over_provisioning_setting, value="100") lht_hostId = get_self_host_id() nodes = client.list_node() expect_node_disk = {} max_size_array = [] for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk max_size_array.append(disk["storageMaximum"]) disk["storageReserved"] = 0 update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_status(client, node["name"], fsid, "storageReserved", 0) max_size = min(max_size_array) # test just under over provisioning limit could be scheduled vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=str(max_size), numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled( client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name) client.update(over_provisioning_setting, value=old_provisioning_setting)
def k8s_node_zone_tags(client, core_api): # NOQA k8s_zone_label = get_k8s_zone_label() lh_nodes = client.list_node() node_index = 0 for node in lh_nodes: node_name = node.name if node_index % 2 == 0: zone = ZONE1 else: zone = ZONE2 payload = {"metadata": {"labels": {k8s_zone_label: zone}}} core_api.patch_node(node_name, body=payload) node_index += 1 yield lh_nodes = client.list_node() node_index = 0 for node in lh_nodes: node_name = node.name payload = {"metadata": {"labels": {k8s_zone_label: None}}} core_api.patch_node(node_name, body=payload)
def test_replica_scheduler_large_volume_fit_small_disk(client): # NOQA nodes = client.list_node() # create a small size disk on current node lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) small_disk_path = create_host_disk(client, "vol-small", SIZE, lht_hostId) small_disk = {"path": small_disk_path, "allowScheduling": True} update_disks = get_update_disks(node["disks"]) update_disks.append(small_disk) node = node.diskUpdate(disks=update_disks) node = common.wait_for_disk_update(client, lht_hostId, len(update_disks)) assert len(node["disks"]) == len(update_disks) unexpected_disk = {} for fsid, disk in node["disks"].iteritems(): if disk["path"] == small_disk_path: unexpected_disk["fsid"] = fsid unexpected_disk["path"] = disk["path"] break # volume is too large to fill into small size disk on current node vol_name = common.generate_volume_name() volume = create_volume(client, vol_name, str(Gi), lht_hostId, len(nodes)) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check replica on current node shouldn't schedule to small disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] if id == lht_hostId: assert replica["diskID"] != unexpected_disk["fsid"] assert replica["dataPath"] != unexpected_disk["path"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 cleanup_volume(client, vol_name) # cleanup test disks node = client.by_id_node(lht_hostId) disks = node["disks"] disk = disks[unexpected_disk["fsid"]] disk["allowScheduling"] = False update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) node = wait_for_disk_status(client, lht_hostId, unexpected_disk["fsid"], "allowScheduling", False) disks = node["disks"] disk = disks[unexpected_disk["fsid"]] assert not disk["allowScheduling"] disks.pop(unexpected_disk["fsid"]) update_disks = get_update_disks(disks) node.diskUpdate(disks=update_disks) cleanup_host_disk(client, 'vol-small')
def test_replica_scheduler_update_over_provisioning(client): # NOQA nodes = client.list_node() lht_hostId = get_self_host_id() expect_node_disk = {} for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk over_provisioning_setting = client.by_id_setting( SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE) old_provisioning_setting = over_provisioning_setting["value"] # set storage over provisioning percentage to 0 # to test all replica couldn't be scheduled over_provisioning_setting = client.update(over_provisioning_setting, value="0") vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=SIZE, numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_FALSE) # set storage over provisioning percentage to 100 over_provisioning_setting = client.update(over_provisioning_setting, value="100") # check volume status volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name) client.update(over_provisioning_setting, value=old_provisioning_setting)
def test_replica_scheduler_no_disks(client): # NOQA nodes = client.list_node() # delete all disks on each node for node in nodes: disks = node["disks"] name = node["name"] # set allowScheduling to false for fsid, disk in disks.iteritems(): disk["allowScheduling"] = False update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) for fsid, disk in node["disks"].iteritems(): # wait for node controller update disk status wait_for_disk_status(client, name, fsid, "allowScheduling", False) wait_for_disk_status(client, name, fsid, "storageScheduled", 0) node = client.by_id_node(name) for fsid, disk in node["disks"].iteritems(): assert not disk["allowScheduling"] node = node.diskUpdate(disks=[]) node = common.wait_for_disk_update(client, name, 0) assert len(node["disks"]) == 0 # test there's no disk fit for volume vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=SIZE, numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled( client, vol_name, "status", CONDITION_STATUS_FALSE) client.delete(volume) common.wait_for_volume_delete(client, vol_name)
def test_replica_scheduler_exceed_over_provisioning(client): # NOQA over_provisioning_setting = client.by_id_setting( SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE) old_provisioning_setting = over_provisioning_setting["value"] # set storage over provisioning percentage to 100 over_provisioning_setting = client.update(over_provisioning_setting, value="100") # test exceed over provisioning limit couldn't be scheduled nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): disk["storageReserved"] = \ disk["storageMaximum"] - 1*Gi update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_status(client, node["name"], fsid, "storageReserved", disk["storageMaximum"] - 1 * Gi) vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=str(2 * Gi), numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled( client, vol_name, "status", CONDITION_STATUS_FALSE) client.delete(volume) common.wait_for_volume_delete(client, vol_name) client.update(over_provisioning_setting, value=old_provisioning_setting)
def test_replica_scheduler_no_disks(client): # NOQA nodes = client.list_node() # delete all disks on each node for node in nodes: disks = node["disks"] name = node["name"] # set allowScheduling to false for fsid, disk in disks.iteritems(): disk["allowScheduling"] = False update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) for fsid, disk in node["disks"].iteritems(): # wait for node controller update disk status wait_for_disk_status(client, name, fsid, "allowScheduling", False) wait_for_disk_status(client, name, fsid, "storageScheduled", 0) node = client.by_id_node(name) for fsid, disk in node["disks"].iteritems(): assert not disk["allowScheduling"] node = node.diskUpdate(disks=[]) node = common.wait_for_disk_update(client, name, 0) assert len(node["disks"]) == 0 # test there's no disk fit for volume vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=SIZE, numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_FALSE) client.delete(volume) common.wait_for_volume_delete(client, vol_name)
def test_replica_scheduler_update_over_provisioning(client): # NOQA nodes = client.list_node() lht_hostId = get_self_host_id() expect_node_disk = {} for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk over_provisioning_setting = client.by_id_setting( SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE) old_provisioning_setting = over_provisioning_setting["value"] # set storage over provisioning percentage to 0 # to test all replica couldn't be scheduled over_provisioning_setting = client.update(over_provisioning_setting, value="0") vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=SIZE, numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_FALSE) # set storage over provisioning percentage to 100 over_provisioning_setting = client.update(over_provisioning_setting, value="100") # check volume status volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name) client.update(over_provisioning_setting, value=old_provisioning_setting)
def test_replica_zone_anti_affinity(client, core_api, volume_name, k8s_node_zone_tags): # NOQA """ Test replica scheduler with zone anti-affinity 1. Set zone anti-affinity to hard. 2. Label nodes 1 & 2 with same zone label "zone1". Label node 3 with zone label "zone2". 3. Create a volume with 3 replicas. 4. Wait for volume condition `scheduled` to be false. 5. Label node 2 with zone label "zone3". 6. Wait for volume condition `scheduled` to be success. 7. Clear the volume. 8. Set zone anti-affinity to soft. 9. Change the zone labels on node 1 & 2 & 3 to "zone1". 10. Create a volume. 11. Wait for volume condition `scheduled` to be success. 12. Clean up the replica count, the zone labels and the volume. """ wait_longhorn_node_zone_updated(client) replica_node_soft_anti_affinity_setting = \ client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY) client.update(replica_node_soft_anti_affinity_setting, value="false") replica_zone_soft_anti_affinity_setting = \ client.by_id_setting(SETTING_REPLICA_ZONE_SOFT_ANTI_AFFINITY) client.update(replica_zone_soft_anti_affinity_setting, value="false") volume = create_and_check_volume(client, volume_name) lh_nodes = client.list_node() count = 0 for node in lh_nodes: count += 1 set_k8s_node_zone_label(core_api, node.name, "lh-zone" + str(count)) wait_longhorn_node_zone_updated(client) wait_for_volume_condition_scheduled(client, volume_name, "status", CONDITION_STATUS_TRUE) replica_zone_soft_anti_affinity_setting = \ client.by_id_setting(SETTING_REPLICA_ZONE_SOFT_ANTI_AFFINITY) client.update(replica_zone_soft_anti_affinity_setting, value="true") volume = client.by_id_volume(volume_name) client.delete(volume) wait_for_volume_delete(client, volume_name) for node in lh_nodes: set_k8s_node_zone_label(core_api, node.name, "lh-zone1") wait_longhorn_node_zone_updated(client) volume = create_and_check_volume(client, volume_name) wait_for_volume_condition_scheduled(client, volume_name, "status", CONDITION_STATUS_TRUE)
def test_replica_scheduler_exceed_over_provisioning(client): # NOQA over_provisioning_setting = client.by_id_setting( SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE) old_provisioning_setting = over_provisioning_setting["value"] # set storage over provisioning percentage to 100 over_provisioning_setting = client.update(over_provisioning_setting, value="100") # test exceed over provisioning limit couldn't be scheduled nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): disk["storageReserved"] = \ disk["storageMaximum"] - 1*Gi update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_status(client, node["name"], fsid, "storageReserved", disk["storageMaximum"] - 1*Gi) vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=str(2*Gi), numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_FALSE) client.delete(volume) common.wait_for_volume_delete(client, vol_name) client.update(over_provisioning_setting, value=old_provisioning_setting)
def create_volume_with_replica_on_host(client, volume_name): # NOQA lht_hostId = get_self_host_id() nodes = client.list_node() volume = create_and_check_volume(client, volume_name, len(nodes), SIZE) volume.attach(hostId=lht_hostId, disableFrontend=False) wait_for_volume_healthy(client, volume_name) return volume
def test_node_controller_sync_disk_state(client): # NOQA # update StorageMinimalAvailablePercentage to test Disk State setting = client.by_id_setting( SETTING_STORAGE_MINIMAL_AVAILABLE_PERCENTAGE) old_minimal_available_percentage = setting["value"] setting = client.update(setting, value="100") assert setting["value"] == "100" nodes = client.list_node() # wait for node controller to update disk state for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_conditions(client, node["name"], fsid, DISK_CONDITION_SCHEDULABLE, CONDITION_STATUS_FALSE) nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): conditions = disk["conditions"] assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_FALSE setting = client.update(setting, value=old_minimal_available_percentage) assert setting["value"] == old_minimal_available_percentage # wait for node controller to update disk state nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_conditions(client, node["name"], fsid, DISK_CONDITION_SCHEDULABLE, CONDITION_STATUS_TRUE) nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): conditions = disk["conditions"] assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE
def test_node_controller_sync_disk_state(client): # NOQA # update StorageMinimalAvailablePercentage to test Disk State setting = client.by_id_setting( SETTING_STORAGE_MINIMAL_AVAILABLE_PERCENTAGE) old_minimal_available_percentage = setting["value"] setting = client.update(setting, value="100") assert setting["value"] == "100" nodes = client.list_node() # wait for node controller to update disk state for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_conditions(client, node["name"], fsid, DISK_CONDITION_SCHEDULABLE, CONDITION_STATUS_FALSE) nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): conditions = disk["conditions"] assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_FALSE setting = client.update(setting, value=old_minimal_available_percentage) assert setting["value"] == old_minimal_available_percentage # wait for node controller to update disk state nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_conditions(client, node["name"], fsid, DISK_CONDITION_SCHEDULABLE, CONDITION_STATUS_TRUE) nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): conditions = disk["conditions"] assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE
def test_node_controller_sync_storage_scheduled(client): # NOQA lht_hostId = get_self_host_id() nodes = client.list_node() for node in nodes: for fsid, disk in node["disks"].iteritems(): # wait for node controller update disk status wait_for_disk_status(client, node["name"], fsid, "storageScheduled", 0) # create a volume and test update StorageScheduled of each node vol_name = common.generate_volume_name() volume = create_volume(client, vol_name, str(SMALL_DISK_SIZE), lht_hostId, len(nodes)) replicas = volume["replicas"] for replica in replicas: id = replica["hostId"] assert id != "" assert replica["running"] # wait for node controller to update disk status for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_status(client, node["name"], fsid, "storageScheduled", SMALL_DISK_SIZE) nodes = client.list_node() for node in nodes: disks = node["disks"] for replica in replicas: if replica["hostId"] == node["name"]: disk = disks[replica["diskID"]] conditions = disk["conditions"] assert disk["storageScheduled"] == SMALL_DISK_SIZE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE break # clean volumes cleanup_volume(client, vol_name)
def test_node_controller_sync_storage_scheduled(client): # NOQA lht_hostId = get_self_host_id() nodes = client.list_node() for node in nodes: for fsid, disk in node["disks"].iteritems(): # wait for node controller update disk status wait_for_disk_status(client, node["name"], fsid, "storageScheduled", 0) # create a volume and test update StorageScheduled of each node vol_name = common.generate_volume_name() volume = create_volume(client, vol_name, str(SMALL_DISK_SIZE), lht_hostId, len(nodes)) replicas = volume["replicas"] for replica in replicas: id = replica["hostId"] assert id != "" assert replica["running"] # wait for node controller to update disk status for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_status(client, node["name"], fsid, "storageScheduled", SMALL_DISK_SIZE) nodes = client.list_node() for node in nodes: disks = node["disks"] for replica in replicas: if replica["hostId"] == node["name"]: disk = disks[replica["diskID"]] conditions = disk["conditions"] assert disk["storageScheduled"] == SMALL_DISK_SIZE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE break # clean volumes cleanup_volume(client, vol_name)
def wait_longhorn_node_zone_updated(client): # NOQA lh_nodes = client.list_node() node_names = map(lambda node: node.name, lh_nodes) for node_name in node_names: for j in range(RETRY_COUNTS): lh_node = client.by_id_node(node_name) if lh_node.zone != '': break time.sleep(RETRY_INTERVAL) assert lh_node.zone != ''
def test_tag_scheduling_on_update(client, node_default_tags, volume_name): # NOQA """ Test that Replicas get scheduled if a Node/Disk disks updated with the proper Tags. """ tag_spec = { "disk": ["ssd", "m2"], "expected": 1, "node": ["main", "fallback"] } client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=3, diskSelector=tag_spec["disk"], nodeSelector=tag_spec["node"]) volume = wait_for_volume_detached(client, volume_name) assert volume["diskSelector"] == tag_spec["disk"] assert volume["nodeSelector"] == tag_spec["node"] wait_scheduling_failure(client, volume_name) host_id = get_self_host_id() node = client.by_id_node(host_id) update_disks = get_update_disks(node["disks"]) update_disks[0]["tags"] = tag_spec["disk"] node = node.diskUpdate(disks=update_disks) set_node_tags(client, node, tag_spec["node"]) scheduled = False for i in range(RETRY_COUNTS): v = client.by_id_volume(volume_name) if v["conditions"]["scheduled"]["status"] == "True": scheduled = True if scheduled: break sleep(RETRY_INTERVAL) assert scheduled volume.attach(hostId=host_id) volume = wait_for_volume_healthy(client, volume_name) nodes = client.list_node() node_mapping = {node["id"]: { "disk": get_update_disks(node["disks"])[0]["tags"], "node": node["tags"] } for node in nodes} assert len(volume["replicas"]) == 3 check_volume_replicas(volume, tag_spec, node_mapping) cleanup_volume(client, volume)
def test_update_node(client): # NOQA # test node update nodes = client.list_node() assert len(nodes) > 0 lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) node = client.update(node, allowScheduling=False) node = common.wait_for_node_update(client, lht_hostId, "allowScheduling", False) assert not node["allowScheduling"] node = client.by_id_node(lht_hostId) assert not node["allowScheduling"] node = client.update(node, allowScheduling=True) node = common.wait_for_node_update(client, lht_hostId, "allowScheduling", True) assert node["allowScheduling"] node = client.by_id_node(lht_hostId) assert node["allowScheduling"]
def test_update_node(client): # NOQA # test node update nodes = client.list_node() assert len(nodes) > 0 lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) node = client.update(node, allowScheduling=False) node = common.wait_for_node_update(client, lht_hostId, "allowScheduling", False) assert not node["allowScheduling"] node = client.by_id_node(lht_hostId) assert not node["allowScheduling"] node = client.update(node, allowScheduling=True) node = common.wait_for_node_update(client, lht_hostId, "allowScheduling", True) assert node["allowScheduling"] node = client.by_id_node(lht_hostId) assert node["allowScheduling"]
def test_zone_tags(client, core_api, volume_name, k8s_node_zone_tags): # NOQA """ Test anti affinity zone feature 1. Add Kubernetes zone labels to the nodes 1. Only two zones now: zone1 and zone2 2. Create a volume with two replicas 3. Verify zone1 and zone2 either has one replica. 4. Remove a random replica and wait for volume to finish rebuild 5. Verify zone1 and zone2 either has one replica. 6. Repeat step 4-5 a few times. 7. Update volume to 3 replicas, make sure they're scheduled on 3 nodes 8. Remove a random replica and wait for volume to finish rebuild 9. Make sure replicas are on different nodes 10. Repeat step 8-9 a few times """ wait_longhorn_node_zone_updated(client) volume = create_and_check_volume(client, volume_name, num_of_replicas=2) host_id = get_self_host_id() volume.attach(hostId=host_id) volume = wait_for_volume_healthy(client, volume_name) volume = client.by_id_volume(volume_name) zone1_replica_count = get_zone_replica_count(client, volume_name, ZONE1) zone2_replica_count = get_zone_replica_count(client, volume_name, ZONE2) assert zone1_replica_count == zone2_replica_count for i in range(randrange(3, 5)): volume = client.by_id_volume(volume_name) replica_count = len(volume.replicas) assert replica_count == 2 replica_id = randrange(0, replica_count) replica_name = volume.replicas[replica_id].name volume.replicaRemove(name=replica_name) wait_for_volume_degraded(client, volume_name) wait_for_volume_healthy(client, volume_name) wait_for_volume_replica_count(client, volume_name, replica_count) volume = client.by_id_volume(volume_name) replica_names = map(lambda replica: replica.name, volume["replicas"]) wait_new_replica_ready(client, volume_name, replica_names) zone1_replica_count = \ get_zone_replica_count(client, volume_name, ZONE1) zone2_replica_count = \ get_zone_replica_count(client, volume_name, ZONE2) assert zone1_replica_count == zone2_replica_count volume.updateReplicaCount(replicaCount=3) wait_for_volume_degraded(client, volume_name) wait_for_volume_replica_count(client, volume_name, 3) wait_for_volume_healthy(client, volume_name) volume = client.by_id_volume(volume_name) lh_node_names = list(map(lambda node: node.name, client.list_node())) for replica in volume.replicas: lh_node_names.remove(replica.hostId) assert lh_node_names == [] for i in range(randrange(3, 5)): volume = client.by_id_volume(volume_name) replica_count = len(volume.replicas) assert replica_count == 3 replica_id = randrange(0, replica_count) replica_name = volume.replicas[replica_id].name volume.replicaRemove(name=replica_name) wait_for_volume_degraded(client, volume_name) wait_for_volume_healthy(client, volume_name) wait_for_volume_replica_count(client, volume_name, replica_count) volume = client.by_id_volume(volume_name) lh_node_names = list(map(lambda node: node.name, client.list_node())) for replica in volume.replicas: lh_node_names.remove(replica.hostId) assert lh_node_names == []
def test_setting_toleration(): """ Test toleration setting 1. Set `taint-toleration` to "key1=value1:NoSchedule; key2:InvalidEffect". 2. Verify the request fails. 3. Create a volume and attach it. 4. Set `taint-toleration` to "key1=value1:NoSchedule; key2:NoExecute". 5. Verify that cannot update toleration setting when any volume is attached. 6. Generate and write `data1` into the volume. 7. Detach the volume. 8. Set `taint-toleration` to "key1=value1:NoSchedule; key2:NoExecute". 9. Wait for all the Longhorn system components to restart with new toleration. 10. Verify that UI, manager, and drive deployer don't restart and don't have new toleration. 11. Attach the volume again and verify the volume `data1`. 12. Generate and write `data2` to the volume. 13. Detach the volume. 14. Clean the `toleration` setting. 15. Wait for all the Longhorn system components to restart with no toleration. 16. Attach the volume and validate `data2`. 17. Generate and write `data3` to the volume. """ client = get_longhorn_api_client() # NOQA apps_api = get_apps_api_client() # NOQA core_api = get_core_api_client() # NOQA count = len(client.list_node()) setting = client.by_id_setting(SETTING_TAINT_TOLERATION) with pytest.raises(Exception) as e: client.update(setting, value="key1=value1:NoSchedule; key2:InvalidEffect") assert 'invalid effect' in str(e.value) volume_name = "test-toleration-vol" # NOQA volume = create_and_check_volume(client, volume_name) volume.attach(hostId=get_self_host_id()) volume = wait_for_volume_healthy(client, volume_name) setting_value_str = "key1=value1:NoSchedule; key2:NoExecute" setting_value_dicts = [ { "key": "key1", "value": "value1", "operator": "Equal", "effect": "NoSchedule" }, { "key": "key2", "value": None, "operator": "Exists", "effect": "NoExecute" }, ] with pytest.raises(Exception) as e: client.update(setting, value=setting_value_str) assert 'cannot modify toleration setting before all volumes are detached' \ in str(e.value) data1 = write_volume_random_data(volume) check_volume_data(volume, data1) volume.detach(hostId="") wait_for_volume_detached(client, volume_name) setting = client.update(setting, value=setting_value_str) assert setting.value == setting_value_str wait_for_toleration_update(core_api, apps_api, count, setting_value_dicts) client, node = wait_for_longhorn_node_ready() volume = client.by_id_volume(volume_name) volume.attach(hostId=node) volume = wait_for_volume_healthy(client, volume_name) check_volume_data(volume, data1) data2 = write_volume_random_data(volume) check_volume_data(volume, data2) volume.detach(hostId="") wait_for_volume_detached(client, volume_name) # cleanup setting_value_str = "" setting_value_dicts = [] setting = client.by_id_setting(SETTING_TAINT_TOLERATION) setting = client.update(setting, value=setting_value_str) assert setting.value == setting_value_str wait_for_toleration_update(core_api, apps_api, count, setting_value_dicts) client, node = wait_for_longhorn_node_ready() volume = client.by_id_volume(volume_name) volume.attach(hostId=node) volume = wait_for_volume_healthy(client, volume_name) check_volume_data(volume, data2) data3 = write_volume_random_data(volume) check_volume_data(volume, data3) cleanup_volume(client, volume)
def test_setting_priority_class(core_api, apps_api, scheduling_api, priority_class, volume_name): # NOQA """ Test that the Priority Class setting is validated and utilized correctly. 1. Verify that the name of a non-existent Priority Class cannot be used for the Setting. 2. Create a new Priority Class in Kubernetes. 3. Create and attach a Volume. 4. Verify that the Priority Class Setting cannot be updated with an attached Volume. 5. Generate and write `data1`. 6. Detach the Volume. 7. Update the Priority Class Setting to the new Priority Class. 8. Wait for all the Longhorn system components to restart with the new Priority Class. 9. Verify that UI, manager, and drive deployer don't have Priority Class 10. Attach the Volume and verify `data1`. 11. Generate and write `data2`. 12. Unset the Priority Class Setting. 13. Wait for all the Longhorn system components to restart with the new Priority Class. 14. Verify that UI, manager, and drive deployer don't have Priority Class 15. Attach the Volume and verify `data2`. 16. Generate and write `data3`. Note: system components are workloads other than UI, manager, driver deployer """ client = get_longhorn_api_client() # NOQA count = len(client.list_node()) name = priority_class['metadata']['name'] setting = client.by_id_setting(SETTING_PRIORITY_CLASS) with pytest.raises(Exception) as e: client.update(setting, value=name) assert 'failed to get priority class ' in str(e.value) scheduling_api.create_priority_class(priority_class) volume = create_and_check_volume(client, volume_name) volume.attach(hostId=get_self_host_id()) volume = wait_for_volume_healthy(client, volume_name) with pytest.raises(Exception) as e: client.update(setting, value=name) assert 'cannot modify priority class setting before all volumes are ' \ 'detached' in str(e.value) data1 = write_volume_random_data(volume) check_volume_data(volume, data1) volume.detach(hostId="") wait_for_volume_detached(client, volume_name) setting = client.update(setting, value=name) assert setting.value == name wait_for_priority_class_update(core_api, apps_api, count, priority_class) client, node = wait_for_longhorn_node_ready() volume = client.by_id_volume(volume_name) volume.attach(hostId=node) volume = wait_for_volume_healthy(client, volume_name) check_volume_data(volume, data1) data2 = write_volume_random_data(volume) check_volume_data(volume, data2) volume.detach(hostId="") wait_for_volume_detached(client, volume_name) setting = client.by_id_setting(SETTING_PRIORITY_CLASS) setting = client.update(setting, value='') assert setting.value == '' wait_for_priority_class_update(core_api, apps_api, count) client, node = wait_for_longhorn_node_ready() volume = client.by_id_volume(volume_name) volume.attach(hostId=node) volume = wait_for_volume_healthy(client, volume_name) check_volume_data(volume, data2) data3 = write_volume_random_data(volume) check_volume_data(volume, data3) cleanup_volume(client, volume)
def test_node_default_disk_labeled( client, core_api, random_disk_path, reset_default_disk_label, # NOQA reset_disk_settings): # NOQA """ Test that only Nodes with the proper label applied get a default Disk created on them when one doesn't already exist. Makes sure the created Disk matches the Default Data Path Setting. """ # Set up cases. cases = {"disk_exists": None, "labeled": None, "unlabeled": None} nodes = client.list_node() assert len(nodes) >= 3 node = nodes[0] cases["disk_exists"] = node["id"] core_api.patch_node( node["id"], {"metadata": { "labels": { CREATE_DEFAULT_DISK_LABEL: "true" } }}) node = nodes[1] cases["labeled"] = node["id"] core_api.patch_node( node["id"], {"metadata": { "labels": { CREATE_DEFAULT_DISK_LABEL: "true" } }}) disks = node["disks"] for _, disk in disks.iteritems(): disk["allowScheduling"] = False update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) node = node.diskUpdate(disks=[]) wait_for_disk_update(client, node["id"], 0) node = nodes[2] cases["unlabeled"] = node["id"] disks = node["disks"] for _, disk in disks.iteritems(): disk["allowScheduling"] = False update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) node = node.diskUpdate(disks=[]) wait_for_disk_update(client, node["id"], 0) # Set disk creation and path Settings. setting = client.by_id_setting(DEFAULT_DATA_PATH_SETTING) client.update(setting, value=random_disk_path) setting = client.by_id_setting(CREATE_DEFAULT_DISK_SETTING) client.update(setting, value="true") wait_for_disk_update(client, cases["labeled"], 1) # Check each case. node = client.by_id_node(cases["disk_exists"]) assert len(node["disks"]) == 1 assert get_update_disks(node["disks"])[0]["path"] == \ DEFAULT_DISK_PATH node = client.by_id_node(cases["labeled"]) assert len(node["disks"]) == 1 assert get_update_disks(node["disks"])[0]["path"] == \ random_disk_path node = client.by_id_node(cases["unlabeled"]) assert len(node["disks"]) == 0
def test_replica_auto_balance_zone_best_effort_with_uneven_node_in_zones( client, core_api, volume_name, pod): # NOQA """ Given set `replica-soft-anti-affinity` to `true`. And set `replica-zone-soft-anti-affinity` to `true`. And set `replicaAutoBalance` to `best-effort`. And set node-1 to zone-1. set node-2 to zone-1. set node-3 to zone-1. set node-4 to zone-2. set node-5 to zone-2. And disable scheduling for node-2. disable scheduling for node-3. disable scheduling for node-4. disable scheduling for node-5. And create volume with 4 replicas. And attach the volume to node-1. Scenario: replica auto-balance zones with best-effort should balance replicas in zone. Given 4 replica running on node-1. 0 replica running on node-2. 0 replica running on node-3. 0 replica running on node-4. 0 replica running on node-5. When enable scheduling for node-4. Then count replicas on each zones. And 2 replica running on zode-1. 2 replica running on zode-2. When enable scheduling for node-2. enable scheduling for node-3. Then count replicas on each nodes. And 1 replica running on node-1. 1 replica running on node-2. 1 replica running on node-3. 1 replica running on node-4. 0 replica running on node-5. When enable scheduling for node-5. Then count replicas on each zones. And 2 replica running on zode-1. 2 replica running on zode-2. """ common.update_setting(client, SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY, "true") common.update_setting(client, SETTING_REPLICA_ZONE_SOFT_ANTI_AFFINITY, "true") common.update_setting(client, SETTING_DEFAULT_DATA_LOCALITY, "best-effort") common.update_setting(client, SETTING_REPLICA_AUTO_BALANCE, "best-effort") n1, n2, n3, n4, n5 = client.list_node() set_k8s_node_zone_label(core_api, n1.name, ZONE1) set_k8s_node_zone_label(core_api, n2.name, ZONE1) set_k8s_node_zone_label(core_api, n3.name, ZONE1) set_k8s_node_zone_label(core_api, n4.name, ZONE2) set_k8s_node_zone_label(core_api, n5.name, ZONE2) wait_longhorn_node_zone_updated(client) client.update(n2, allowScheduling=False) client.update(n3, allowScheduling=False) client.update(n4, allowScheduling=False) client.update(n5, allowScheduling=False) n_replicas = 4 volume = create_and_check_volume(client, volume_name, num_of_replicas=n_replicas) volume.attach(hostId=n1.name) for _ in range(RETRY_COUNTS): n1_r_count = common.get_host_replica_count(client, volume_name, n1.name, chk_running=True) n2_r_count = common.get_host_replica_count(client, volume_name, n2.name, chk_running=False) n3_r_count = common.get_host_replica_count(client, volume_name, n3.name, chk_running=False) n4_r_count = common.get_host_replica_count(client, volume_name, n4.name, chk_running=False) n5_r_count = common.get_host_replica_count(client, volume_name, n5.name, chk_running=False) if n1_r_count == 4 and \ n2_r_count == n3_r_count == n4_r_count == n5_r_count == 0: break time.sleep(RETRY_INTERVAL) assert n1_r_count == 4 assert n2_r_count == 0 assert n3_r_count == 0 assert n4_r_count == 0 assert n5_r_count == 0 client.update(n4, allowScheduling=True) for _ in range(RETRY_COUNTS): z1_r_count = get_zone_replica_count(client, volume_name, ZONE1, chk_running=True) z2_r_count = get_zone_replica_count(client, volume_name, ZONE2, chk_running=True) if z1_r_count == z2_r_count == 2: break time.sleep(RETRY_INTERVAL) assert z1_r_count == 2 assert z2_r_count == 2 client.update(n2, allowScheduling=True) client.update(n3, allowScheduling=True) for _ in range(RETRY_COUNTS): n1_r_count = common.get_host_replica_count(client, volume_name, n1.name, chk_running=True) n2_r_count = common.get_host_replica_count(client, volume_name, n2.name, chk_running=True) n3_r_count = common.get_host_replica_count(client, volume_name, n3.name, chk_running=True) n4_r_count = common.get_host_replica_count(client, volume_name, n4.name, chk_running=True) n5_r_count = common.get_host_replica_count(client, volume_name, n5.name, chk_running=False) if n1_r_count == n2_r_count == n3_r_count == n4_r_count == 1 and \ n5_r_count == 0: break time.sleep(RETRY_INTERVAL) assert n1_r_count == 1 assert n2_r_count == 1 assert n3_r_count == 1 assert n4_r_count == 1 assert n5_r_count == 0 client.update(n5, allowScheduling=True) for _ in range(RETRY_COUNTS): z1_r_count = get_zone_replica_count(client, volume_name, ZONE1, chk_running=True) z2_r_count = get_zone_replica_count(client, volume_name, ZONE2, chk_running=True) if z1_r_count == z2_r_count == 2: break time.sleep(RETRY_INTERVAL) assert z1_r_count == 2 assert z2_r_count == 2
def test_replica_auto_balance_node_duplicates_in_multiple_zones( client, core_api, volume_name): # NOQA """ Scenario: replica auto-balance to nodes with duplicated replicas in the zone. Given set `replica-soft-anti-affinity` to `true`. And set `replica-zone-soft-anti-affinity` to `true`. And set volume spec `replicaAutoBalance` to `least-effort`. And set node-1 to zone-1. set node-2 to zone-2. And disable scheduling for node-3. And create a volume with 3 replicas. And attach the volume to self-node. And zone-1 and zone-2 should contain 3 replica in total. When set node-3 to the zone with duplicated replicas. And enable scheduling for node-3. Then count replicas running on each node. And 1 replica running on node-1 1 replica running on node-2 1 replica running on node-3. And count replicas running in each zone. And total of 3 replicas running in zone-1 and zone-2. """ common.update_setting(client, SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY, "true") common.update_setting(client, SETTING_REPLICA_ZONE_SOFT_ANTI_AFFINITY, "true") common.update_setting(client, SETTING_REPLICA_AUTO_BALANCE, "least-effort") n1, n2, n3 = client.list_node() set_k8s_node_zone_label(core_api, n1.name, ZONE1) set_k8s_node_zone_label(core_api, n2.name, ZONE2) set_k8s_node_zone_label(core_api, n3.name, "temp") wait_longhorn_node_zone_updated(client) client.update(n3, allowScheduling=False) n_replicas = 3 volume = create_and_check_volume(client, volume_name, num_of_replicas=n_replicas) volume.attach(hostId=get_self_host_id()) z1_r_count = get_zone_replica_count(client, volume_name, ZONE1) z2_r_count = get_zone_replica_count(client, volume_name, ZONE2) assert z1_r_count + z2_r_count == n_replicas if z1_r_count == 2: set_k8s_node_zone_label(core_api, n3.name, ZONE1) else: set_k8s_node_zone_label(core_api, n3.name, ZONE2) client.update(n3, allowScheduling=True) for _ in range(RETRY_COUNTS): n1_r_count = common.get_host_replica_count(client, volume_name, n1.name, chk_running=True) n2_r_count = common.get_host_replica_count(client, volume_name, n2.name, chk_running=True) n3_r_count = common.get_host_replica_count(client, volume_name, n3.name, chk_running=True) if n1_r_count == n2_r_count == n3_r_count == 1: break time.sleep(RETRY_INTERVAL) assert n1_r_count == 1 assert n2_r_count == 1 assert n3_r_count == 1 z1_r_count = get_zone_replica_count(client, volume_name, ZONE1, chk_running=True) z2_r_count = get_zone_replica_count(client, volume_name, ZONE2, chk_running=True) assert z1_r_count + z2_r_count == n_replicas
def test_replica_auto_balance_zone_best_effort(client, core_api, volume_name): # NOQA """ Scenario: replica auto-balance zones with best-effort. Given set `replica-soft-anti-affinity` to `true`. And set `replica-zone-soft-anti-affinity` to `true`. And set volume spec `replicaAutoBalance` to `best-effort`. And set node-1 to zone-1. set node-2 to zone-2. set node-3 to zone-3. And disable scheduling for node-2. disable scheduling for node-3. And create a volume with 6 replicas. And attach the volume to self-node. And 6 replicas running in zone-1. 0 replicas running in zone-2. 0 replicas running in zone-3. When enable scheduling for node-2. Then count replicas running on each node. And 3 replicas running in zone-1. 3 replicas running in zone-2. 0 replicas running in zone-3. When enable scheduling for node-3. Then count replicas running on each node. And 2 replicas running in zone-1. 2 replicas running in zone-2. 2 replicas running in zone-3. """ common.update_setting(client, SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY, "true") common.update_setting(client, SETTING_REPLICA_ZONE_SOFT_ANTI_AFFINITY, "true") common.update_setting(client, SETTING_REPLICA_AUTO_BALANCE, "best-effort") n1, n2, n3 = client.list_node() set_k8s_node_zone_label(core_api, n1.name, ZONE1) set_k8s_node_zone_label(core_api, n2.name, ZONE2) set_k8s_node_zone_label(core_api, n3.name, ZONE3) wait_longhorn_node_zone_updated(client) client.update(n2, allowScheduling=False) client.update(n3, allowScheduling=False) n_replicas = 6 volume = create_and_check_volume(client, volume_name, num_of_replicas=n_replicas) volume.attach(hostId=get_self_host_id()) for _ in range(RETRY_COUNTS): z1_r_count = get_zone_replica_count(client, volume_name, ZONE1, chk_running=True) z2_r_count = get_zone_replica_count(client, volume_name, ZONE2, chk_running=True) z3_r_count = get_zone_replica_count(client, volume_name, ZONE3, chk_running=True) if z1_r_count == 6 and z2_r_count == z3_r_count == 0: break time.sleep(RETRY_INTERVAL) assert z1_r_count == 6 assert z2_r_count == 0 assert z3_r_count == 0 client.update(n2, allowScheduling=True) for _ in range(RETRY_COUNTS): z1_r_count = get_zone_replica_count(client, volume_name, ZONE1, chk_running=True) z2_r_count = get_zone_replica_count(client, volume_name, ZONE2, chk_running=True) z3_r_count = get_zone_replica_count(client, volume_name, ZONE3, chk_running=True) if z1_r_count == z2_r_count == 3 and z3_r_count == 0: break time.sleep(RETRY_INTERVAL_LONG) assert z1_r_count == 3 assert z2_r_count == 3 assert z3_r_count == 0 client.update(n3, allowScheduling=True) for _ in range(RETRY_COUNTS): z1_r_count = get_zone_replica_count(client, volume_name, ZONE1, chk_running=True) z2_r_count = get_zone_replica_count(client, volume_name, ZONE2, chk_running=True) z3_r_count = get_zone_replica_count(client, volume_name, ZONE3, chk_running=True) if z1_r_count == z2_r_count == z3_r_count == 2: break time.sleep(RETRY_INTERVAL_LONG) assert z1_r_count == 2 assert z2_r_count == 2 assert z3_r_count == 2
def test_replica_scheduler_just_under_over_provisioning(client): # NOQA over_provisioning_setting = client.by_id_setting( SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE) old_provisioning_setting = over_provisioning_setting["value"] # set storage over provisioning percentage to 100 over_provisioning_setting = client.update(over_provisioning_setting, value="100") lht_hostId = get_self_host_id() nodes = client.list_node() expect_node_disk = {} max_size_array = [] for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk max_size_array.append(disk["storageMaximum"]) disk["storageReserved"] = 0 update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_status(client, node["name"], fsid, "storageReserved", 0) max_size = min(max_size_array) # test just under over provisioning limit could be scheduled vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=str(max_size), numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name) client.update(over_provisioning_setting, value=old_provisioning_setting)
def test_replica_scheduler_update_minimal_available(client): # NOQA minimal_available_setting = client.by_id_setting( SETTING_STORAGE_MINIMAL_AVAILABLE_PERCENTAGE) old_minimal_setting = minimal_available_setting["value"] nodes = client.list_node() expect_node_disk = {} for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk # set storage minimal available percentage to 100 # to test all replica couldn't be scheduled minimal_available_setting = client.update(minimal_available_setting, value="100") # wait for disks state nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_conditions(client, node["name"], fsid, DISK_CONDITION_SCHEDULABLE, CONDITION_STATUS_FALSE) lht_hostId = get_self_host_id() vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=SIZE, numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled( client, vol_name, "status", CONDITION_STATUS_FALSE) # set storage minimal available percentage to default value(10) minimal_available_setting = client.update(minimal_available_setting, value=old_minimal_setting) # wait for disks state nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_conditions(client, node["name"], fsid, DISK_CONDITION_SCHEDULABLE, CONDITION_STATUS_TRUE) # check volume status volume = common.wait_for_volume_condition_scheduled( client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name)
def test_replica_cleanup(client): # NOQA nodes = client.list_node() lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) extra_disk_path = create_host_disk(client, "extra-disk", "10G", lht_hostId) extra_disk = {"path": extra_disk_path, "allowScheduling": True} update_disks = get_update_disks(node["disks"]) update_disks.append(extra_disk) node = node.diskUpdate(disks=update_disks) node = common.wait_for_disk_update(client, lht_hostId, len(update_disks)) assert len(node["disks"]) == len(update_disks) extra_disk_fsid = "" for fsid, disk in node["disks"].iteritems(): if disk["path"] == extra_disk_path: extra_disk_fsid = fsid break for node in nodes: # disable all the disks except the ones on the current node if node["name"] == lht_hostId: continue for fsid, disk in node["disks"].iteritems(): break disk["allowScheduling"] = False update_disks = get_update_disks(node["disks"]) node.diskUpdate(disks=update_disks) node = wait_for_disk_status(client, node["name"], fsid, "allowScheduling", False) vol_name = common.generate_volume_name() # more replicas, make sure both default and extra disk will get one volume = create_volume(client, vol_name, str(Gi), lht_hostId, 5) data_paths = [] for replica in volume["replicas"]: data_paths.append(replica["dataPath"]) # data path should exist now for data_path in data_paths: assert exec_nsenter("ls {}".format(data_path)) cleanup_volume(client, vol_name) # data path should be gone due to the cleanup of replica for data_path in data_paths: with pytest.raises(subprocess.CalledProcessError): exec_nsenter("ls {}".format(data_path)) node = client.by_id_node(lht_hostId) disks = node["disks"] disk = disks[extra_disk_fsid] disk["allowScheduling"] = False update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) node = wait_for_disk_status(client, lht_hostId, extra_disk_fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, extra_disk_fsid, "storageScheduled", 0) disks = node["disks"] disk = disks[extra_disk_fsid] assert not disk["allowScheduling"] disks.pop(extra_disk_fsid) update_disks = get_update_disks(disks) node.diskUpdate(disks=update_disks) node = common.wait_for_disk_update(client, lht_hostId, len(update_disks)) cleanup_host_disk(client, 'extra-disk')
def test_replica_scheduler_large_volume_fit_small_disk(client): # NOQA nodes = client.list_node() # create a small size disk on current node lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) small_disk_path = create_host_disk(client, "vol-small", SIZE, lht_hostId) small_disk = {"path": small_disk_path, "allowScheduling": True} update_disks = get_update_disks(node["disks"]) update_disks.append(small_disk) node = node.diskUpdate(disks=update_disks) node = common.wait_for_disk_update(client, lht_hostId, len(update_disks)) assert len(node["disks"]) == len(update_disks) unexpected_disk = {} for fsid, disk in node["disks"].iteritems(): if disk["path"] == small_disk_path: unexpected_disk["fsid"] = fsid unexpected_disk["path"] = disk["path"] break # volume is too large to fill into small size disk on current node vol_name = common.generate_volume_name() volume = create_volume(client, vol_name, str(Gi), lht_hostId, len(nodes)) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check replica on current node shouldn't schedule to small disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] if id == lht_hostId: assert replica["diskID"] != unexpected_disk["fsid"] assert replica["dataPath"] != unexpected_disk["path"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 cleanup_volume(client, vol_name) # cleanup test disks node = client.by_id_node(lht_hostId) disks = node["disks"] disk = disks[unexpected_disk["fsid"]] disk["allowScheduling"] = False update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) node = wait_for_disk_status(client, lht_hostId, unexpected_disk["fsid"], "allowScheduling", False) disks = node["disks"] disk = disks[unexpected_disk["fsid"]] assert not disk["allowScheduling"] disks.pop(unexpected_disk["fsid"]) update_disks = get_update_disks(disks) node.diskUpdate(disks=update_disks) cleanup_host_disk(client, 'vol-small')
def test_replica_scheduler_too_large_volume_fit_any_disks(client): # NOQA nodes = client.list_node() lht_hostId = get_self_host_id() expect_node_disk = {} for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk disk["storageReserved"] = disk["storageMaximum"] update_disks = get_update_disks(disks) node.diskUpdate(disks=update_disks) # volume is too large to fill into any disks volume_size = 4 * Gi vol_name = common.generate_volume_name() client.create_volume(name=vol_name, size=str(volume_size), numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled( client, vol_name, "status", CONDITION_STATUS_FALSE) # Reduce StorageReserved of each default disk so that each node can fit # only one replica. needed_for_scheduling = int( volume_size * 1.5 * 100 / int(DEFAULT_STORAGE_OVER_PROVISIONING_PERCENTAGE)) nodes = client.list_node() for node in nodes: disks = node["disks"] update_disks = get_update_disks(disks) for disk in update_disks: disk["storageReserved"] = \ disk["storageMaximum"] - needed_for_scheduling node = node.diskUpdate(disks=update_disks) disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_status( client, node["name"], fsid, "storageReserved", disk["storageMaximum"] - needed_for_scheduling) # check volume status volume = common.wait_for_volume_condition_scheduled( client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name)
def test_data_locality_basic(client, core_api, volume_name, pod, settings_reset): # NOQA """ Test data locality basic feature Context: Data Locality feature allows users to have an option to keep a local replica on the same node as the consuming pod. Longhorn is currently supporting 2 modes: - disabled: Longhorn does not try to keep a local replica - best-effort: Longhorn try to keep a local replica See manual tests at: https://github.com/longhorn/longhorn/issues/1045#issuecomment-680706283 Steps: Case 1: Test that Longhorn builds a local replica on the engine node 1. Create a volume(1) with 1 replica and dataLocality set to disabled 2. Find node where the replica is located on. Let's call the node is replica-node 3. Attach the volume to a node different than replica-node. Let call the node is engine-node 4. Write 200MB data to volume(1) 5. Use a retry loop to verify that Longhorn does not create a replica on the engine-node 6. Update dataLocality to best-effort for volume(1) 7. Use a retry loop to verify that Longhorn creates and rebuilds a replica on the engine-node and remove the other replica 8. detach the volume(1) and attach it to a different node. Let's call the new node is new-engine-node and the old node is old-engine-node 9. Wait for volume(1) to finish attaching 10. Use a retry loop to verify that Longhorn creates and rebuilds a replica on the new-engine-node and remove the replica on old-engine-node Case 2: Test that Longhorn prioritizes deleting replicas on the same node 1. Add the tag AVAIL to node-1 and node-2 2. Set node soft anti-affinity to `true`. 3. Create a volume(2) with 3 replicas and dataLocality set to best-effort 4. Use a retry loop to verify that all 3 replicas are on node-1 and node-2, no replica is on node-3 5. Attach volume(2) to node-3 6. User a retry loop to verify that there is no replica on node-3 and we can still read/write to volume(2) 7. Find the node which contains 2 replicas. Let call the node is most-replica-node 8. Set the replica count to 2 for volume(2) 9. Verify that Longhorn remove one replica from most-replica-node Case 3: Test that the volume is not corrupted if there is an unexpected detachment during building local replica 1. Remove the tag AVAIL from node-1 and node-2 Set node soft anti-affinity to `false`. 2. Create a volume(3) with 1 replicas and dataLocality set to best-effort 3. Attach volume(3) to node-3. 4. Use a retry loop to verify that volume(3) has only 1 replica on node-3 5. Write 800MB data to volume(3) 6. Detach volume(3) 7. Attach volume(3) to node-1 8. Use a retry loop to: Wait until volume(3) finishes attaching. Wait until Longhorn start rebuilding a replica on node-1 Immediately detach volume(3) 9. Verify that the replica on node-1 is in ERR state. 10. Attach volume(3) to node-1 11. Wait until volume(3) finishes attaching. 12. Use a retry loop to verify the Longhorn cleanup the ERR replica, rebuild a new replica on node-1, and remove the replica on node-3 Case 4: Make sure failed to schedule local replica doesn't block the the creation of other replicas. 1. Disable scheduling for node-3 2. Create a vol with 1 replica, `dataLocality = best-effort`. The replica is scheduled on a node (say node-1) 3. Attach vol to node-3. There is a fail-to-schedule replica with Spec.HardNodeAffinity=node-3 4. Increase numberOfReplica to 3. Verify that the replica set contains: one on node-1, one on node-2, one failed replica with Spec.HardNodeAffinity=node-3. 5. Decrease numberOfReplica to 2. Verify that the replica set contains: one on node-1, one on node-2, one failed replica with Spec.HardNodeAffinity=node-3. 6. Decrease numberOfReplica to 1. Verify that the replica set contains: one on node-1 or node-2, one failed replica with Spec.HardNodeAffinity=node-3. 7. Decrease numberOfReplica to 2. Verify that the replica set contains: one on node-1, one on node-2, one failed replica with Spec.HardNodeAffinity=node-3. 8. Turn off data locality by set `dataLocality=disabled` for the vol. Verify that the replica set contains: one on node-1, one on node-2 9. clean up """ # Case 1: Test that Longhorn builds a local replica on the engine node nodes = client.list_node() default_data_locality_setting = \ client.by_id_setting(SETTING_DEFAULT_DATA_LOCALITY) try: client.update(default_data_locality_setting, value="disabled") except Exception as e: print("Exception when update Default Data Locality setting", default_data_locality_setting, e) volume1_name = volume_name + "-1" volume1_size = str(500 * Mi) volume1_data_path = "/data/test" pv1_name = volume1_name + "-pv" pvc1_name = volume1_name + "-pvc" pod1_name = volume1_name + "-pod" pod1 = pod pod1['metadata']['name'] = pod1_name volume1 = create_and_check_volume(client, volume1_name, num_of_replicas=1, size=volume1_size) volume1 = client.by_id_volume(volume1_name) create_pv_for_volume(client, core_api, volume1, pv1_name) create_pvc_for_volume(client, core_api, volume1, pvc1_name) volume1 = client.by_id_volume(volume1_name) volume1_replica_node = volume1.replicas[0]['hostId'] volume1_attached_node = None for node in nodes: if node.name != volume1_replica_node: volume1_attached_node = node.name break assert volume1_attached_node is not None pod1['spec']['volumes'] = [{ "name": "pod-data", "persistentVolumeClaim": { "claimName": pvc1_name } }] pod1['spec']['nodeSelector'] = \ {"kubernetes.io/hostname": volume1_attached_node} create_and_wait_pod(core_api, pod1) write_pod_volume_random_data(core_api, pod1_name, volume1_data_path, DATA_SIZE_IN_MB_2) for i in range(10): volume1 = client.by_id_volume(volume1_name) assert len(volume1.replicas) == 1 assert volume1.replicas[0]['hostId'] != volume1_attached_node time.sleep(1) volume1 = client.by_id_volume(volume1_name) volume1.updateDataLocality(dataLocality="best-effort") for _ in range(RETRY_COUNTS): volume1 = client.by_id_volume(volume1_name) assert volume1[VOLUME_FIELD_ROBUSTNESS] == VOLUME_ROBUSTNESS_HEALTHY if len(volume1.replicas) == 1 and \ volume1.replicas[0]['hostId'] == volume1_attached_node: break time.sleep(RETRY_INTERVAL) assert len(volume1.replicas) == 1 assert volume1.replicas[0]['hostId'] == volume1_attached_node delete_and_wait_pod(core_api, pod1_name) volume1 = wait_for_volume_detached(client, volume1_name) volume1_replica_node = volume1.replicas[0]['hostId'] volume1_attached_node = None for node in nodes: if node.name != volume1_replica_node: volume1_attached_node = node.name break assert volume1_attached_node is not None pod1['spec']['nodeSelector'] = \ {"kubernetes.io/hostname": volume1_attached_node} create_and_wait_pod(core_api, pod1) for _ in range(RETRY_COUNTS): volume1 = client.by_id_volume(volume1_name) assert volume1[VOLUME_FIELD_ROBUSTNESS] == VOLUME_ROBUSTNESS_HEALTHY if len(volume1.replicas) == 1 and \ volume1.replicas[0]['hostId'] == volume1_attached_node: break time.sleep(RETRY_INTERVAL) assert len(volume1.replicas) == 1 assert volume1.replicas[0]['hostId'] == volume1_attached_node delete_and_wait_pod(core_api, pod1_name) wait_for_volume_detached(client, volume1_name) # Case 2: Test that Longhorn prioritizes deleting replicas on the same node node1 = nodes[0] node2 = nodes[1] node3 = nodes[2] client.update(node1, allowScheduling=True, tags=["AVAIL"]) client.update(node2, allowScheduling=True, tags=["AVAIL"]) replica_node_soft_anti_affinity_setting = \ client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY) try: client.update(replica_node_soft_anti_affinity_setting, value="true") except Exception as e: print( "Exception when update " "Replica Node Level Soft Anti-Affinity setting", replica_node_soft_anti_affinity_setting, e) volume2_name = volume_name + "-2" volume2_size = str(500 * Mi) pv2_name = volume2_name + "-pv" pvc2_name = volume2_name + "-pvc" pod2_name = volume2_name + "-pod" pod2 = pod pod2['metadata']['name'] = pod2_name volume2 = client.create_volume(name=volume2_name, size=volume2_size, numberOfReplicas=3, nodeSelector=["AVAIL"], dataLocality="best-effort") volume2 = wait_for_volume_detached(client, volume2_name) volume2 = client.by_id_volume(volume2_name) create_pv_for_volume(client, core_api, volume2, pv2_name) create_pvc_for_volume(client, core_api, volume2, pvc2_name) volume2 = client.by_id_volume(volume2_name) pod2['spec']['volumes'] = [{ "name": "pod-data", "persistentVolumeClaim": { "claimName": pvc2_name } }] pod2['spec']['nodeSelector'] = {"kubernetes.io/hostname": node3.name} create_and_wait_pod(core_api, pod2) volume2 = wait_for_volume_healthy(client, volume2_name) for replica in volume2.replicas: assert replica["hostId"] != node3.name volume2.updateReplicaCount(replicaCount=2) # 2 Healthy replicas and 1 replica failed to schedule # The failed to schedule replica is the local replica on node3 volume2 = wait_for_volume_replica_count(client, volume2_name, 3) volume2 = client.by_id_volume(volume2_name) volume2_healthy_replicas = [] for replica in volume2.replicas: if replica.running is True: volume2_healthy_replicas.append(replica) assert len(volume2_healthy_replicas) == 2 volume2_rep1 = volume2_healthy_replicas[0] volume2_rep2 = volume2_healthy_replicas[1] assert volume2_rep1["hostId"] != volume2_rep2["hostId"] delete_and_wait_pod(core_api, pod2_name) wait_for_volume_detached(client, volume2_name) # Case 3: Test that the volume is not corrupted if there is an unexpected # detachment during building local replica client.update(node1, allowScheduling=True, tags=[]) client.update(node2, allowScheduling=True, tags=[]) replica_node_soft_anti_affinity_setting = \ client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY) try: client.update(replica_node_soft_anti_affinity_setting, value="false") except Exception as e: print( "Exception when update " "Replica Node Level Soft Anti-Affinity setting", replica_node_soft_anti_affinity_setting, e) volume3_name = volume_name + "-3" volume3_size = str(1 * Gi) volume3_data_path = "/data/test" pv3_name = volume3_name + "-pv" pvc3_name = volume3_name + "-pvc" pod3_name = volume3_name + "-pod" pod3 = pod pod3['metadata']['name'] = pod3_name volume3 = client.create_volume(name=volume3_name, size=volume3_size, numberOfReplicas=1) volume3 = wait_for_volume_detached(client, volume3_name) volume3 = client.by_id_volume(volume3_name) create_pv_for_volume(client, core_api, volume3, pv3_name) create_pvc_for_volume(client, core_api, volume3, pvc3_name) volume3 = client.by_id_volume(volume3_name) pod3['spec']['volumes'] = [{ "name": "pod-data", "persistentVolumeClaim": { "claimName": pvc3_name } }] pod3['spec']['nodeSelector'] = {"kubernetes.io/hostname": node3.name} create_and_wait_pod(core_api, pod3) volume3 = wait_for_volume_healthy(client, volume3_name) write_pod_volume_random_data(core_api, pod3_name, volume3_data_path, DATA_SIZE_IN_MB_4) volume3.updateDataLocality(dataLocality="best-effort") volume3 = client.by_id_volume(volume3_name) if volume3.replicas[0]['hostId'] != node3.name: wait_for_rebuild_start(client, volume3_name) volume3 = client.by_id_volume(volume3_name) assert len(volume3.replicas) == 2 wait_for_rebuild_complete(client, volume3_name) volume3 = wait_for_volume_replica_count(client, volume3_name, 1) assert volume3.replicas[0]["hostId"] == node3.name delete_and_wait_pod(core_api, pod3_name) pod3['spec']['nodeSelector'] = {"kubernetes.io/hostname": node1.name} create_and_wait_pod(core_api, pod3) wait_for_rebuild_start(client, volume3_name) crash_engine_process_with_sigkill(client, core_api, volume3_name) delete_and_wait_pod(core_api, pod3_name) wait_for_volume_detached(client, volume3_name) volume3 = client.by_id_volume(volume3_name) assert len(volume3.replicas) == 1 assert volume3.replicas[0]["hostId"] == node3.name create_and_wait_pod(core_api, pod3) wait_for_rebuild_start(client, volume3_name) volume3 = client.by_id_volume(volume3_name) assert len(volume3.replicas) == 2 wait_for_rebuild_complete(client, volume3_name) # Wait for deletion of extra replica volume3 = wait_for_volume_replica_count(client, volume3_name, 1) assert volume3.replicas[0]["hostId"] == node1.name assert volume3.replicas[0]["mode"] == "RW" assert volume3.replicas[0]["running"] is True delete_and_wait_pod(core_api, pod3_name) wait_for_volume_detached(client, volume3_name) # Case 4: Make sure failed to schedule local replica doesn't block the # the creation of other replicas. replica_node_soft_anti_affinity_setting = \ client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY) try: client.update(replica_node_soft_anti_affinity_setting, value="false") except Exception as e: print( "Exception when update " "Replica Node Level Soft Anti-Affinity setting", replica_node_soft_anti_affinity_setting, e) client.update(node3, allowScheduling=False) volume4_name = volume_name + "-4" volume4_size = str(1 * Gi) volume4 = client.create_volume(name=volume4_name, size=volume4_size, numberOfReplicas=1, dataLocality="best-effort") volume4 = wait_for_volume_detached(client, volume4_name) volume4 = client.by_id_volume(volume4_name) volume4_replica_name = volume4.replicas[0]["name"] volume4.attach(hostId=node3.name) wait_for_volume_healthy(client, volume4_name) volume4 = client.by_id_volume(volume4_name) assert len(volume4.replicas) == 2 for replica in volume4.replicas: if replica["name"] == volume4_replica_name: assert replica["running"] is True assert replica["mode"] == "RW" else: assert replica["running"] is False assert replica["mode"] == "" assert volume4.conditions.scheduled.reason == \ "LocalReplicaSchedulingFailure" volume4 = volume4.updateReplicaCount(replicaCount=3) volume4 = wait_for_volume_degraded(client, volume4_name) v4_node1_replica_count = 0 v4_node2_replica_count = 0 v4_failed_replica_count = 0 for replica in volume4.replicas: if replica["hostId"] == node1.name: v4_node1_replica_count += 1 elif replica["hostId"] == node2.name: v4_node2_replica_count += 1 elif replica["hostId"] == "": v4_failed_replica_count += 1 assert v4_node1_replica_count == 1 assert v4_node2_replica_count == 1 assert v4_failed_replica_count > 0 volume4 = volume4.updateReplicaCount(replicaCount=2) volume4 = wait_for_volume_replica_count(client, volume4_name, 3) v4_node1_replica_count = 0 v4_node2_replica_count = 0 v4_failed_replica_count = 0 for replica in volume4.replicas: if replica["hostId"] == node1.name: v4_node1_replica_count += 1 elif replica["hostId"] == node2.name: v4_node2_replica_count += 1 elif replica["hostId"] == "": v4_failed_replica_count += 1 assert v4_node1_replica_count == 1 assert v4_node2_replica_count == 1 assert v4_failed_replica_count > 0 volume4 = volume4.updateReplicaCount(replicaCount=1) volume4 = wait_for_volume_replica_count(client, volume4_name, 2) v4_node1_replica_count = 0 v4_node2_replica_count = 0 v4_failed_replica_count = 0 for replica in volume4.replicas: if replica["hostId"] == node1.name: v4_node1_replica_count += 1 elif replica["hostId"] == node2.name: v4_node2_replica_count += 1 elif replica["hostId"] == "": v4_failed_replica_count += 1 assert v4_node1_replica_count + v4_node2_replica_count == 1 assert v4_failed_replica_count == 1 volume4 = volume4.updateDataLocality(dataLocality="disabled") volume4 = volume4.updateReplicaCount(replicaCount=2) running_replica_count = 0 for _ in range(RETRY_COUNTS): volume4 = client.by_id_volume(volume4_name) running_replica_count = 0 for r in volume4.replicas: if r.failedAt == "" and r.running is True: running_replica_count += 1 if running_replica_count == 2: break time.sleep(RETRY_INTERVAL) assert running_replica_count == 2 v4_node1_replica_count = 0 v4_node2_replica_count = 0 v4_node3_replica_count = 0 for replica in volume4.replicas: wait_for_replica_running(client, volume4_name, replica["name"]) if replica["hostId"] == node1.name: v4_node1_replica_count += 1 elif replica["hostId"] == node2.name: v4_node2_replica_count += 1 elif replica["hostId"] == node3.name: v4_node3_replica_count += 1 assert v4_node1_replica_count == 1 assert v4_node2_replica_count == 1 assert v4_node3_replica_count == 0
def test_replica_scheduler_too_large_volume_fit_any_disks(client): # NOQA nodes = client.list_node() lht_hostId = get_self_host_id() expect_node_disk = {} for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk disk["storageReserved"] = disk["storageMaximum"] update_disks = get_update_disks(disks) node.diskUpdate(disks=update_disks) # volume is too large to fill into any disks vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=str(4*Gi), numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_FALSE) # reduce StorageReserved of each default disk nodes = client.list_node() for node in nodes: disks = node["disks"] update_disks = get_update_disks(disks) for disk in update_disks: disk["storageReserved"] = 0 node = node.diskUpdate(disks=update_disks) disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_status(client, node["name"], fsid, "storageReserved", 0) # check volume status volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name)
def test_setting_toleration(): """ Test toleration setting 1. Verify that cannot use Kubernetes tolerations for Longhorn setting 2. Use "key1=value1:NoSchedule; key2:NoExecute" as toleration. 3. Create a volume and attach it. 4. Verify that cannot update toleration setting when any volume is attached 5. Generate and write `data1` into the volume 6. Detach the volume. 7. Update setting `toleration` to toleration. 8. Wait for all the Longhorn components to restart with new toleration 9. Attach the volume again and verify the volume `data1`. 10. Generate and write `data2` to the volume. 11. Detach the volume. 12. Clean the `toleration` setting. 13. Wait for all the Longhorn components to restart with no toleration 14. Attach the volume and validate `data2`. 15. Generate and write `data3` to the volume. """ client = get_longhorn_api_client() # NOQA apps_api = get_apps_api_client() # NOQA core_api = get_core_api_client() # NOQA count = len(client.list_node()) setting = client.by_id_setting(SETTING_TAINT_TOLERATION) with pytest.raises(Exception) as e: client.update(setting, value=KUBERNETES_DEFAULT_TOLERATION + ":NoSchedule") assert "is considered as the key of Kubernetes default tolerations" \ in str(e.value) with pytest.raises(Exception) as e: client.update(setting, value="key1=value1:NoSchedule; key2:InvalidEffect") assert 'invalid effect' in str(e.value) setting_value_str = "key1=value1:NoSchedule; key2:NoExecute" setting_value_dict = \ {"key1": {"key": "key1", "value": "value1", "operator": "Equal", "effect": "NoSchedule"}, "key2": {"key": "key2", "value": None, "operator": "Exists", "effect": "NoExecute"}, } volume_name = "test-toleration-vol" # NOQA volume = create_and_check_volume(client, volume_name) volume.attach(hostId=get_self_host_id()) volume = wait_for_volume_healthy(client, volume_name) with pytest.raises(Exception) as e: client.update(setting, value=setting_value_str) assert 'cannot modify toleration setting before all volumes are detached' \ in str(e.value) data1 = write_volume_random_data(volume) check_volume_data(volume, data1) volume.detach() wait_for_volume_detached(client, volume_name) setting = client.update(setting, value=setting_value_str) assert setting.value == setting_value_str wait_for_toleration_update(core_api, apps_api, count, setting_value_dict) client, node = wait_for_longhorn_node_ready() volume = client.by_id_volume(volume_name) volume.attach(hostId=node) volume = wait_for_volume_healthy(client, volume_name) check_volume_data(volume, data1) data2 = write_volume_random_data(volume) check_volume_data(volume, data2) volume.detach() wait_for_volume_detached(client, volume_name) # cleanup setting_value_str = "" setting_value_dict = {} setting = client.by_id_setting(SETTING_TAINT_TOLERATION) setting = client.update(setting, value=setting_value_str) assert setting.value == setting_value_str wait_for_toleration_update(core_api, apps_api, count, setting_value_dict) client, node = wait_for_longhorn_node_ready() volume = client.by_id_volume(volume_name) volume.attach(hostId=node) volume = wait_for_volume_healthy(client, volume_name) check_volume_data(volume, data2) data3 = write_volume_random_data(volume) check_volume_data(volume, data3) cleanup_volume(client, volume)
def test_tag_scheduling_on_update(client, node_default_tags, volume_name): # NOQA """ Test that Replicas get scheduled if a Node/Disk disks updated with the proper Tags. Test prerequisites: - set Replica Node Level Soft Anti-Affinity enabled 1. Create volume with tags that can not be satisfied 2. Wait for volume to fail scheduling 3. Update the node and disk with extra tags to satisfy the volume 4. Verify now volume has been scheduled 5. Attach the volume and check the replicas has been scheduled properly """ replica_node_soft_anti_affinity_setting = \ client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY) client.update(replica_node_soft_anti_affinity_setting, value="true") tag_spec = { "disk": ["ssd", "m2"], "expected": 1, "node": ["main", "fallback"] } client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=3, diskSelector=tag_spec["disk"], nodeSelector=tag_spec["node"]) volume = wait_for_volume_detached(client, volume_name) assert volume.diskSelector == tag_spec["disk"] assert volume.nodeSelector == tag_spec["node"] wait_scheduling_failure(client, volume_name) host_id = get_self_host_id() node = client.by_id_node(host_id) update_disks = get_update_disks(node.disks) update_disks[list(update_disks)[0]].tags = tag_spec["disk"] node = update_node_disks(client, node.name, disks=update_disks) set_node_tags(client, node, tag_spec["node"]) scheduled = False for i in range(RETRY_COUNTS): v = client.by_id_volume(volume_name) if v.conditions.scheduled.status == "True": scheduled = True if scheduled: break sleep(RETRY_INTERVAL) assert scheduled volume.attach(hostId=host_id) volume = wait_for_volume_healthy(client, volume_name) nodes = client.list_node() node_mapping = { node.id: { "disk": node.disks[list(node.disks)[0]].tags, "node": node.tags } for node in nodes } assert len(volume.replicas) == 3 check_volume_replicas(volume, tag_spec, node_mapping) cleanup_volume(client, volume)
def test_node_delete_umount_disks(client): # NOQA # create test disks for node disk_volume_name = 'vol-disk-1' lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) disks = node["disks"] disk_path1 = create_host_disk(client, disk_volume_name, str(Gi), lht_hostId) disk1 = {"path": disk_path1, "allowScheduling": True, "storageReserved": SMALL_DISK_SIZE} update_disk = get_update_disks(disks) for disk in update_disk: disk["allowScheduling"] = False # add new disk for node update_disk.append(disk1) # save disks to node node = node.diskUpdate(disks=update_disk) node = common.wait_for_disk_update(client, lht_hostId, len(update_disk)) assert len(node["disks"]) == len(update_disk) node = client.by_id_node(lht_hostId) assert len(node["disks"]) == len(update_disk) disks = node["disks"] # wait for node controller to update disk status for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", True) wait_for_disk_status(client, lht_hostId, fsid, "storageReserved", SMALL_DISK_SIZE) free, total = common.get_host_disk_size(disk_path1) wait_for_disk_status(client, lht_hostId, fsid, "storageAvailable", free) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", total) node = client.by_id_node(lht_hostId) disks = node["disks"] for key, disk in disks.iteritems(): if disk["path"] == disk_path1: assert disk["allowScheduling"] assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == 0 free, total = common.get_host_disk_size(disk_path1) assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE else: assert not disk["allowScheduling"] # create a volume nodes = client.list_node() vol_name = common.generate_volume_name() volume = create_volume(client, vol_name, str(SMALL_DISK_SIZE), lht_hostId, len(nodes)) replicas = volume["replicas"] for replica in replicas: id = replica["hostId"] assert id != "" assert replica["running"] if id == lht_hostId: assert replica["dataPath"].startswith(disk_path1) # umount the disk mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) common.umount_disk(mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0) wait_for_disk_conditions(client, lht_hostId, fsid, DISK_CONDITION_READY, CONDITION_STATUS_FALSE) # check result node = client.by_id_node(lht_hostId) disks = node["disks"] update_disks = [] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: assert not disk["allowScheduling"] assert disk["storageMaximum"] == 0 assert disk["storageAvailable"] == 0 assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == SMALL_DISK_SIZE conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_FALSE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_FALSE else: conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE update_disks.append(disk) # delete umount disk exception with pytest.raises(Exception) as e: node.diskUpdate(disks=update_disks) assert "disable the disk" in str(e.value) # update other disks disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: disk["allowScheduling"] = True test_update = get_update_disks(disks) node = node.diskUpdate(disks=test_update) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", True) node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: assert disk["allowScheduling"] # mount the disk back mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) disk_volume = client.by_id_volume(disk_volume_name) dev = get_volume_endpoint(disk_volume) common.mount_disk(dev, mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_conditions(client, lht_hostId, fsid, DISK_CONDITION_READY, CONDITION_STATUS_TRUE) # check result node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: free, total = common.get_host_disk_size(disk_path1) assert not disk["allowScheduling"] assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == SMALL_DISK_SIZE conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE else: conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE # delete volume and umount disk cleanup_volume(client, vol_name) mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) common.umount_disk(mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, fsid, "storageScheduled", 0) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0) # test delete the umount disk node = client.by_id_node(lht_hostId) node.diskUpdate(disks=update_disks) node = common.wait_for_disk_update(client, lht_hostId, len(update_disks)) assert len(node["disks"]) == len(update_disks) cmd = ['rm', '-r', mount_path] subprocess.check_call(cmd)
def test_replica_auto_balance_zone_best_effort_with_data_locality( client, core_api, volume_name, pod): # NOQA """ Background: Given set `replica-soft-anti-affinity` to `true`. And set `replica-zone-soft-anti-affinity` to `true`. And set `default-data-locality` to `best-effort`. And set `replicaAutoBalance` to `best-effort`. And set node-1 to zone-1. set node-2 to zone-1. set node-3 to zone-2. And create volume with 2 replicas. And create pv for volume. And create pvc for volume. Scenario Outline: replica auto-balance zones with best-effort should not remove pod local replicas when data locality is enabled (best-effort). Given create and wait pod on <pod-node>. And disable scheduling and evict node-3. And count replicas on each nodes. And 1 replica running on <pod-node>. 1 replica running on <duplicate-node>. 0 replica running on node-3. When enable scheduling for node-3. Then count replicas on each nodes. And 1 replica running on <pod-node>. 0 replica running on <duplicate-node>. 1 replica running on node-3. And count replicas in each zones. And 1 replica running in zone-1. 1 replica running in zone-2. And loop 3 times with each wait 5 seconds and count replicas on each nodes. To ensure no addition scheduling is happening. 1 replica running on <pod-node>. 0 replica running on <duplicate-node>. 1 replica running on node-3. And delete pod. Examples: | pod-node | duplicate-node | | node-1 | node-2 | | node-2 | node-1 | | node-1 | node-2 | """ common.update_setting(client, SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY, "true") common.update_setting(client, SETTING_REPLICA_ZONE_SOFT_ANTI_AFFINITY, "true") common.update_setting(client, SETTING_DEFAULT_DATA_LOCALITY, "best-effort") common.update_setting(client, SETTING_REPLICA_AUTO_BALANCE, "best-effort") n1, n2, n3 = client.list_node() set_k8s_node_zone_label(core_api, n1.name, ZONE1) set_k8s_node_zone_label(core_api, n2.name, ZONE1) set_k8s_node_zone_label(core_api, n3.name, ZONE2) wait_longhorn_node_zone_updated(client) n_replicas = 2 volume = create_and_check_volume(client, volume_name, num_of_replicas=n_replicas) common.create_pv_for_volume(client, core_api, volume, volume_name) common.create_pvc_for_volume(client, core_api, volume, volume_name) pod['spec']['volumes'] = [{ "name": "pod-data", "persistentVolumeClaim": { "claimName": volume_name } }] for i in range(1, 4): pod_node_name = n2.name if i % 2 == 0 else n1.name pod['spec']['nodeSelector'] = {"kubernetes.io/hostname": pod_node_name} common.create_and_wait_pod(core_api, pod) client.update(n3, allowScheduling=False, evictionRequested=True) duplicate_node = [n1.name, n2.name] duplicate_node.remove(pod_node_name) for _ in range(RETRY_COUNTS): pod_node_r_count = common.get_host_replica_count(client, volume_name, pod_node_name, chk_running=True) duplicate_node_r_count = common.get_host_replica_count( client, volume_name, duplicate_node[0], chk_running=True) balance_node_r_count = common.get_host_replica_count( client, volume_name, n3.name, chk_running=False) if pod_node_r_count == duplicate_node_r_count == 1 and \ balance_node_r_count == 0: break time.sleep(RETRY_INTERVAL) assert pod_node_r_count == 1 assert duplicate_node_r_count == 1 assert balance_node_r_count == 0 client.update(n3, allowScheduling=True) for _ in range(RETRY_COUNTS): pod_node_r_count = common.get_host_replica_count(client, volume_name, pod_node_name, chk_running=True) duplicate_node_r_count = common.get_host_replica_count( client, volume_name, duplicate_node[0], chk_running=False) balance_node_r_count = common.get_host_replica_count( client, volume_name, n3.name, chk_running=True) if pod_node_r_count == balance_node_r_count == 1 and \ duplicate_node_r_count == 0: break time.sleep(RETRY_INTERVAL) assert pod_node_r_count == 1 assert duplicate_node_r_count == 0 assert balance_node_r_count == 1 z1_r_count = get_zone_replica_count(client, volume_name, ZONE1, chk_running=True) z2_r_count = get_zone_replica_count(client, volume_name, ZONE2, chk_running=True) assert z1_r_count == z2_r_count == 1 # loop 3 times and each to wait 5 seconds to ensure there is no # re-scheduling happening. for _ in range(3): time.sleep(5) assert pod_node_r_count == common.get_host_replica_count( client, volume_name, pod_node_name, chk_running=True) assert duplicate_node_r_count == common.get_host_replica_count( client, volume_name, duplicate_node[0], chk_running=False) assert balance_node_r_count == common.get_host_replica_count( client, volume_name, n3.name, chk_running=True) common.delete_and_wait_pod(core_api, pod['metadata']['name'])
def test_node_delete_umount_disks(client): # NOQA # create test disks for node disk_volume_name = 'vol-disk-1' lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) disks = node["disks"] disk_path1 = create_host_disk(client, disk_volume_name, str(Gi), lht_hostId) disk1 = { "path": disk_path1, "allowScheduling": True, "storageReserved": SMALL_DISK_SIZE } update_disk = get_update_disks(disks) for disk in update_disk: disk["allowScheduling"] = False # add new disk for node update_disk.append(disk1) # save disks to node node = node.diskUpdate(disks=update_disk) node = common.wait_for_disk_update(client, lht_hostId, len(update_disk)) assert len(node["disks"]) == len(update_disk) node = client.by_id_node(lht_hostId) assert len(node["disks"]) == len(update_disk) disks = node["disks"] # wait for node controller to update disk status for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", True) wait_for_disk_status(client, lht_hostId, fsid, "storageReserved", SMALL_DISK_SIZE) free, total = common.get_host_disk_size(disk_path1) wait_for_disk_status(client, lht_hostId, fsid, "storageAvailable", free) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", total) node = client.by_id_node(lht_hostId) disks = node["disks"] for key, disk in disks.iteritems(): if disk["path"] == disk_path1: assert disk["allowScheduling"] assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == 0 free, total = common.get_host_disk_size(disk_path1) assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE else: assert not disk["allowScheduling"] # create a volume nodes = client.list_node() vol_name = common.generate_volume_name() volume = create_volume(client, vol_name, str(SMALL_DISK_SIZE), lht_hostId, len(nodes)) replicas = volume["replicas"] for replica in replicas: id = replica["hostId"] assert id != "" assert replica["running"] if id == lht_hostId: assert replica["dataPath"].startswith(disk_path1) # umount the disk mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) common.umount_disk(mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0) wait_for_disk_conditions(client, lht_hostId, fsid, DISK_CONDITION_READY, CONDITION_STATUS_FALSE) # check result node = client.by_id_node(lht_hostId) disks = node["disks"] update_disks = [] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: assert not disk["allowScheduling"] assert disk["storageMaximum"] == 0 assert disk["storageAvailable"] == 0 assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == SMALL_DISK_SIZE conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_FALSE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_FALSE else: conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE update_disks.append(disk) # delete umount disk exception with pytest.raises(Exception) as e: node.diskUpdate(disks=update_disks) assert "disable the disk" in str(e.value) # update other disks disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: disk["allowScheduling"] = True test_update = get_update_disks(disks) node = node.diskUpdate(disks=test_update) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", True) node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: assert disk["allowScheduling"] # mount the disk back mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) disk_volume = client.by_id_volume(disk_volume_name) dev = get_volume_endpoint(disk_volume) common.mount_disk(dev, mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_conditions(client, lht_hostId, fsid, DISK_CONDITION_READY, CONDITION_STATUS_TRUE) # check result node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: free, total = common.get_host_disk_size(disk_path1) assert not disk["allowScheduling"] assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == SMALL_DISK_SIZE conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE else: conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE # delete volume and umount disk cleanup_volume(client, vol_name) mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) common.umount_disk(mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, fsid, "storageScheduled", 0) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0) # test delete the umount disk node = client.by_id_node(lht_hostId) node.diskUpdate(disks=update_disks) node = common.wait_for_disk_update(client, lht_hostId, len(update_disks)) assert len(node["disks"]) == len(update_disks) cmd = ['rm', '-r', mount_path] subprocess.check_call(cmd)
def test_replica_scheduler_update_minimal_available(client): # NOQA minimal_available_setting = client.by_id_setting( SETTING_STORAGE_MINIMAL_AVAILABLE_PERCENTAGE) old_minimal_setting = minimal_available_setting["value"] nodes = client.list_node() expect_node_disk = {} for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk # set storage minimal available percentage to 100 # to test all replica couldn't be scheduled minimal_available_setting = client.update(minimal_available_setting, value="100") # wait for disks state nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_conditions(client, node["name"], fsid, DISK_CONDITION_SCHEDULABLE, CONDITION_STATUS_FALSE) lht_hostId = get_self_host_id() vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=SIZE, numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_FALSE) # set storage minimal available percentage to default value(10) minimal_available_setting = client.update(minimal_available_setting, value=old_minimal_setting) # wait for disks state nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_conditions(client, node["name"], fsid, DISK_CONDITION_SCHEDULABLE, CONDITION_STATUS_TRUE) # check volume status volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name)
def test_setting_toleration_extra(core_api, apps_api): # NOQA """ Steps: 1. Set Kubernetes Taint Toleration to: `ex.com/foobar:NoExecute;ex.com/foobar:NoSchedule`. 2. Verify that all system components have the 2 tolerations `ex.com/foobar:NoExecute; ex.com/foobar:NoSchedule`. Verify that UI, manager, and drive deployer don't restart and don't have toleration. 3. Set Kubernetes Taint Toleration to: `node-role.kubernetes.io/controlplane=true:NoSchedule`. 4. Verify that all system components have the the toleration `node-role.kubernetes.io/controlplane=true:NoSchedule`, and don't have the 2 tolerations `ex.com/foobar:NoExecute;ex.com/foobar:NoSchedule`. Verify that UI, manager, and drive deployer don't restart and don't have toleration. 5. Set Kubernetes Taint Toleration to special value: `:`. 6. Verify that all system components have the toleration with `operator: Exists` and other field of the toleration are empty. Verify that all system components don't have the toleration `node-role.kubernetes.io/controlplane=true:NoSchedule`. Verify that UI, manager, and drive deployer don't restart and don't have toleration. 7. Clear Kubernetes Taint Toleration Note: system components are workloads other than UI, manager, driver deployer """ settings = [ { "value": "ex.com/foobar:NoExecute;ex.com/foobar:NoSchedule", "expect": [ { "key": "ex.com/foobar", "value": None, "operator": "Exists", "effect": "NoExecute" }, { "key": "ex.com/foobar", "value": None, "operator": "Exists", "effect": "NoSchedule" }, ], }, { "value": "node-role.kubernetes.io/controlplane=true:NoSchedule", "expect": [ { "key": "node-role.kubernetes.io/controlplane", "value": "true", "operator": "Equal", "effect": "NoSchedule" }, ], }, # Skip the this special toleration for now because it makes # Longhorn deploy manager pods on control/etcd nodes # and the control/etcd nodes become "down" after the test # clear this toleration. # We will enable this test once we implement logic for # deleting failed nodes. # { # "value": ":", # "expect": [ # { # "key": None, # "value": None, # "operator": "Exists", # "effect": None, # }, # ] # }, { "value": "", "expect": [], }, ] chk_removed_tolerations = [] for setting in settings: client = get_longhorn_api_client() # NOQA taint_toleration = client.by_id_setting(SETTING_TAINT_TOLERATION) updated = client.update(taint_toleration, value=setting["value"]) assert updated.value == setting["value"] node_count = len(client.list_node()) wait_for_toleration_update(core_api, apps_api, node_count, setting["expect"], chk_removed_tolerations) chk_removed_tolerations = setting["expect"]
def test_allow_volume_creation_with_degraded_availability_csi( client, core_api, apps_api, make_deployment_with_pvc): # NOQA """ Test Allow Volume Creation with Degraded Availability (CSI) Requirement: 1. Set `allow-volume-creation-with-degraded-availability` to true. 2. Set `node-level-soft-anti-affinity` to false. Steps: 1. Disable scheduling for node 3. 2. Create a Deployment Pod with a volume and 3 replicas. 1. After the volume is attached, scheduling error should be seen. 3. Write data to the Pod. 4. Scale down the deployment to 0 to detach the volume. 1. Scheduled condition should become true. 5. Scale up the deployment back to 1 and verify the data. 1. Scheduled condition should become false. 6. Enable the scheduling for node 3. 1. Volume should start rebuilding on the node 3 soon. 2. Once the rebuilding starts, the scheduled condition should become true. 7. Once rebuild finished, scale down and back the deployment to verify the data. """ setting = client.by_id_setting(common.SETTING_DEGRADED_AVAILABILITY) client.update(setting, value="true") setting = client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY) client.update(setting, value="false") nodes = client.list_node() node3 = nodes[2] client.update(node3, allowScheduling=False) vol = common.create_and_check_volume(client, generate_volume_name(), size=str(500 * Mi)) pv_name = vol.name + "-pv" common.create_pv_for_volume(client, core_api, vol, pv_name) pvc_name = vol.name + "-pvc" common.create_pvc_for_volume(client, core_api, vol, pvc_name) deployment_name = vol.name + "-dep" deployment = make_deployment_with_pvc(deployment_name, pvc_name) deployment["spec"]["replicas"] = 3 apps_api.create_namespaced_deployment(body=deployment, namespace='default') common.wait_for_volume_status(client, vol.name, common.VOLUME_FIELD_STATE, common.VOLUME_STATE_ATTACHED) common.wait_scheduling_failure(client, vol.name) data_path = "/data/test" pod = common.wait_and_get_any_deployment_pod(core_api, deployment_name) common.write_pod_volume_random_data(core_api, pod.metadata.name, data_path, common.DATA_SIZE_IN_MB_2) created_md5sum = get_pod_data_md5sum(core_api, pod.metadata.name, data_path) deployment['spec']['replicas'] = 0 apps_api.patch_namespaced_deployment(body=deployment, namespace='default', name=deployment_name) vol = common.wait_for_volume_detached(client, vol.name) assert vol.conditions[VOLUME_CONDITION_SCHEDULED]['status'] == "True" deployment['spec']['replicas'] = 1 apps_api.patch_namespaced_deployment(body=deployment, namespace='default', name=deployment_name) common.wait_for_volume_status(client, vol.name, common.VOLUME_FIELD_STATE, common.VOLUME_STATE_ATTACHED) common.wait_for_volume_condition_scheduled(client, vol.name, "status", common.CONDITION_STATUS_FALSE) pod = common.wait_and_get_any_deployment_pod(core_api, deployment_name) assert created_md5sum == get_pod_data_md5sum(core_api, pod.metadata.name, data_path) client.update(node3, allowScheduling=True) common.wait_for_rebuild_start(client, vol.name) vol = client.by_id_volume(vol.name) assert vol.conditions[VOLUME_CONDITION_SCHEDULED]['status'] == "True" common.wait_for_rebuild_complete(client, vol.name) deployment['spec']['replicas'] = 0 apps_api.patch_namespaced_deployment(body=deployment, namespace='default', name=deployment_name) common.wait_for_volume_detached(client, vol.name) deployment['spec']['replicas'] = 1 apps_api.patch_namespaced_deployment(body=deployment, namespace='default', name=deployment_name) common.wait_for_volume_status(client, vol.name, common.VOLUME_FIELD_STATE, common.VOLUME_STATE_ATTACHED) pod = common.wait_and_get_any_deployment_pod(core_api, deployment_name) assert created_md5sum == get_pod_data_md5sum(core_api, pod.metadata.name, data_path)
def test_replica_cleanup(client): # NOQA nodes = client.list_node() lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) extra_disk_path = create_host_disk(client, "extra-disk", "10G", lht_hostId) extra_disk = {"path": extra_disk_path, "allowScheduling": True} update_disks = get_update_disks(node["disks"]) update_disks.append(extra_disk) node = node.diskUpdate(disks=update_disks) node = common.wait_for_disk_update(client, lht_hostId, len(update_disks)) assert len(node["disks"]) == len(update_disks) extra_disk_fsid = "" for fsid, disk in node["disks"].iteritems(): if disk["path"] == extra_disk_path: extra_disk_fsid = fsid break for node in nodes: # disable all the disks except the ones on the current node if node["name"] == lht_hostId: continue for fsid, disk in node["disks"].iteritems(): break disk["allowScheduling"] = False update_disks = get_update_disks(node["disks"]) node.diskUpdate(disks=update_disks) node = wait_for_disk_status(client, node["name"], fsid, "allowScheduling", False) vol_name = common.generate_volume_name() # more replicas, make sure both default and extra disk will get one volume = create_volume(client, vol_name, str(Gi), lht_hostId, 5) data_paths = [] for replica in volume["replicas"]: data_paths.append(replica["dataPath"]) # data path should exist now for data_path in data_paths: assert exec_nsenter("ls {}".format(data_path)) cleanup_volume(client, vol_name) # data path should be gone due to the cleanup of replica for data_path in data_paths: with pytest.raises(subprocess.CalledProcessError): exec_nsenter("ls {}".format(data_path)) node = client.by_id_node(lht_hostId) disks = node["disks"] disk = disks[extra_disk_fsid] disk["allowScheduling"] = False update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) node = wait_for_disk_status(client, lht_hostId, extra_disk_fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, extra_disk_fsid, "storageScheduled", 0) disks = node["disks"] disk = disks[extra_disk_fsid] assert not disk["allowScheduling"] disks.pop(extra_disk_fsid) update_disks = get_update_disks(disks) node.diskUpdate(disks=update_disks) node = common.wait_for_disk_update(client, lht_hostId, len(update_disks)) cleanup_host_disk(client, 'extra-disk')