def test_node_controller_sync_storage_available(client): # NOQA lht_hostId = get_self_host_id() # create a disk to test storageAvailable node = client.by_id_node(lht_hostId) test_disk_path = create_host_disk(client, "vol-test", SIZE, lht_hostId) test_disk = {"path": test_disk_path, "allowScheduling": True} update_disks = get_update_disks(node["disks"]) update_disks.append(test_disk) node = node.diskUpdate(disks=update_disks) node = common.wait_for_disk_update(client, lht_hostId, len(update_disks)) assert len(node["disks"]) == len(update_disks) # write specified byte data into disk test_file_path = os.path.join(test_disk_path, TEST_FILE) if os.path.exists(test_file_path): os.remove(test_file_path) cmd = ['dd', 'if=/dev/zero', 'of=' + test_file_path, 'bs=1M', 'count=1'] subprocess.check_call(cmd) node = client.by_id_node(lht_hostId) disks = node["disks"] # wait for node controller update disk status expect_disk = {} free, total = common.get_host_disk_size(test_disk_path) for fsid, disk in disks.iteritems(): if disk["path"] == test_disk_path: node = wait_for_disk_status(client, lht_hostId, fsid, "storageAvailable", free) expect_disk = node["disks"][fsid] break assert expect_disk["storageAvailable"] == free os.remove(test_file_path) # cleanup test disks node = client.by_id_node(lht_hostId) disks = node["disks"] wait_fsid = '' for fsid, disk in disks.iteritems(): if disk["path"] == test_disk_path: wait_fsid = fsid disk["allowScheduling"] = False update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) node = wait_for_disk_status(client, lht_hostId, wait_fsid, "allowScheduling", False) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == test_disk_path: disks.pop(fsid) break update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) node = wait_for_disk_update(client, lht_hostId, len(update_disks)) assert len(node["disks"]) == len(update_disks) cleanup_host_disk(client, 'vol-test')
def test_node_delete_umount_disks(client): # NOQA # create test disks for node disk_volume_name = 'vol-disk-1' lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) disks = node["disks"] disk_path1 = create_host_disk(client, disk_volume_name, str(Gi), lht_hostId) disk1 = { "path": disk_path1, "allowScheduling": True, "storageReserved": SMALL_DISK_SIZE } update_disk = get_update_disks(disks) for disk in update_disk: disk["allowScheduling"] = False # add new disk for node update_disk.append(disk1) # save disks to node node = node.diskUpdate(disks=update_disk) node = common.wait_for_disk_update(client, lht_hostId, len(update_disk)) assert len(node["disks"]) == len(update_disk) node = client.by_id_node(lht_hostId) assert len(node["disks"]) == len(update_disk) disks = node["disks"] # wait for node controller to update disk status for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", True) wait_for_disk_status(client, lht_hostId, fsid, "storageReserved", SMALL_DISK_SIZE) free, total = common.get_host_disk_size(disk_path1) wait_for_disk_status(client, lht_hostId, fsid, "storageAvailable", free) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", total) node = client.by_id_node(lht_hostId) disks = node["disks"] for key, disk in disks.iteritems(): if disk["path"] == disk_path1: assert disk["allowScheduling"] assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == 0 free, total = common.get_host_disk_size(disk_path1) assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE else: assert not disk["allowScheduling"] # create a volume nodes = client.list_node() vol_name = common.generate_volume_name() volume = create_volume(client, vol_name, str(SMALL_DISK_SIZE), lht_hostId, len(nodes)) replicas = volume["replicas"] for replica in replicas: id = replica["hostId"] assert id != "" assert replica["running"] if id == lht_hostId: assert replica["dataPath"].startswith(disk_path1) # umount the disk mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) common.umount_disk(mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0) wait_for_disk_conditions(client, lht_hostId, fsid, DISK_CONDITION_READY, CONDITION_STATUS_FALSE) # check result node = client.by_id_node(lht_hostId) disks = node["disks"] update_disks = [] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: assert not disk["allowScheduling"] assert disk["storageMaximum"] == 0 assert disk["storageAvailable"] == 0 assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == SMALL_DISK_SIZE conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_FALSE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_FALSE else: conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE update_disks.append(disk) # delete umount disk exception with pytest.raises(Exception) as e: node.diskUpdate(disks=update_disks) assert "disable the disk" in str(e.value) # update other disks disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: disk["allowScheduling"] = True test_update = get_update_disks(disks) node = node.diskUpdate(disks=test_update) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", True) node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: assert disk["allowScheduling"] # mount the disk back mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) disk_volume = client.by_id_volume(disk_volume_name) dev = get_volume_endpoint(disk_volume) common.mount_disk(dev, mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_conditions(client, lht_hostId, fsid, DISK_CONDITION_READY, CONDITION_STATUS_TRUE) # check result node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: free, total = common.get_host_disk_size(disk_path1) assert not disk["allowScheduling"] assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == SMALL_DISK_SIZE conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE else: conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE # delete volume and umount disk cleanup_volume(client, vol_name) mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) common.umount_disk(mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, fsid, "storageScheduled", 0) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0) # test delete the umount disk node = client.by_id_node(lht_hostId) node.diskUpdate(disks=update_disks) node = common.wait_for_disk_update(client, lht_hostId, len(update_disks)) assert len(node["disks"]) == len(update_disks) cmd = ['rm', '-r', mount_path] subprocess.check_call(cmd)
def test_node_disk_update(client): # NOQA lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) disks = node["disks"] # test add same disk by different mount path exception with pytest.raises(Exception) as e: disk = { "path": "/var/lib", "allowScheduling": True, "storageReserved": 2 * Gi } update_disk = get_update_disks(disks) update_disk.append(disk) node = node.diskUpdate(disks=update_disk) assert "the same file system" in str(e.value) # test delete disk exception with pytest.raises(Exception) as e: node.diskUpdate(disks=[]) assert "disable the disk" in str(e.value) # test storageReserved invalid exception with pytest.raises(Exception) as e: for fsid, disk in disks.iteritems(): disk["storageReserved"] = disk["storageMaximum"] + 1 * Gi update_disk = get_update_disks(disks) node.diskUpdate(disks=update_disk) assert "storageReserved setting of disk" in str(e.value) # create multiple disks for node node = client.by_id_node(lht_hostId) disks = node["disks"] disk_path1 = create_host_disk(client, 'vol-disk-1', str(Gi), lht_hostId) disk1 = {"path": disk_path1, "allowScheduling": True} disk_path2 = create_host_disk(client, 'vol-disk-2', str(Gi), lht_hostId) disk2 = {"path": disk_path2, "allowScheduling": True} update_disk = get_update_disks(disks) # add new disk for node update_disk.append(disk1) update_disk.append(disk2) # save disks to node node = node.diskUpdate(disks=update_disk) node = common.wait_for_disk_update(client, lht_hostId, len(update_disk)) assert len(node["disks"]) == len(update_disk) node = client.by_id_node(lht_hostId) assert len(node["disks"]) == len(update_disk) # update disk disks = node["disks"] update_disk = get_update_disks(disks) for disk in update_disk: # keep default disk for other tests if disk["path"] == disk_path1 or disk["path"] == disk_path2: disk["allowScheduling"] = False disk["storageReserved"] = SMALL_DISK_SIZE node = node.diskUpdate(disks=update_disk) disks = node["disks"] # wait for node controller to update disk status for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1 or disk["path"] == disk_path2: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, fsid, "storageReserved", SMALL_DISK_SIZE) free, total = common.get_host_disk_size(disk_path1) wait_for_disk_status(client, lht_hostId, fsid, "storageAvailable", free) node = client.by_id_node(lht_hostId) disks = node["disks"] for key, disk in disks.iteritems(): if disk["path"] == disk_path1: assert not disk["allowScheduling"] assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == 0 free, total = common.get_host_disk_size(disk_path1) assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free elif disk["path"] == disk_path2: assert not disk["allowScheduling"] assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == 0 free, total = common.get_host_disk_size(disk_path2) assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free # delete other disks, just remain default disk update_disk = get_update_disks(disks) remain_disk = [] for disk in update_disk: if disk["path"] != disk_path1 and disk["path"] != disk_path2: remain_disk.append(disk) node = node.diskUpdate(disks=remain_disk) node = wait_for_disk_update(client, lht_hostId, len(remain_disk)) assert len(node["disks"]) == len(remain_disk) # cleanup disks cleanup_host_disk(client, 'vol-disk-1', 'vol-disk-2')
def test_node_delete_umount_disks(client): # NOQA # create test disks for node disk_volume_name = 'vol-disk-1' lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) disks = node["disks"] disk_path1 = create_host_disk(client, disk_volume_name, str(Gi), lht_hostId) disk1 = {"path": disk_path1, "allowScheduling": True, "storageReserved": SMALL_DISK_SIZE} update_disk = get_update_disks(disks) for disk in update_disk: disk["allowScheduling"] = False # add new disk for node update_disk.append(disk1) # save disks to node node = node.diskUpdate(disks=update_disk) node = common.wait_for_disk_update(client, lht_hostId, len(update_disk)) assert len(node["disks"]) == len(update_disk) node = client.by_id_node(lht_hostId) assert len(node["disks"]) == len(update_disk) disks = node["disks"] # wait for node controller to update disk status for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", True) wait_for_disk_status(client, lht_hostId, fsid, "storageReserved", SMALL_DISK_SIZE) free, total = common.get_host_disk_size(disk_path1) wait_for_disk_status(client, lht_hostId, fsid, "storageAvailable", free) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", total) node = client.by_id_node(lht_hostId) disks = node["disks"] for key, disk in disks.iteritems(): if disk["path"] == disk_path1: assert disk["allowScheduling"] assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == 0 free, total = common.get_host_disk_size(disk_path1) assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE else: assert not disk["allowScheduling"] # create a volume nodes = client.list_node() vol_name = common.generate_volume_name() volume = create_volume(client, vol_name, str(SMALL_DISK_SIZE), lht_hostId, len(nodes)) replicas = volume["replicas"] for replica in replicas: id = replica["hostId"] assert id != "" assert replica["running"] if id == lht_hostId: assert replica["dataPath"].startswith(disk_path1) # umount the disk mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) common.umount_disk(mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0) wait_for_disk_conditions(client, lht_hostId, fsid, DISK_CONDITION_READY, CONDITION_STATUS_FALSE) # check result node = client.by_id_node(lht_hostId) disks = node["disks"] update_disks = [] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: assert not disk["allowScheduling"] assert disk["storageMaximum"] == 0 assert disk["storageAvailable"] == 0 assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == SMALL_DISK_SIZE conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_FALSE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_FALSE else: conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE update_disks.append(disk) # delete umount disk exception with pytest.raises(Exception) as e: node.diskUpdate(disks=update_disks) assert "disable the disk" in str(e.value) # update other disks disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: disk["allowScheduling"] = True test_update = get_update_disks(disks) node = node.diskUpdate(disks=test_update) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", True) node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] != disk_path1: assert disk["allowScheduling"] # mount the disk back mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) disk_volume = client.by_id_volume(disk_volume_name) dev = get_volume_endpoint(disk_volume) common.mount_disk(dev, mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_conditions(client, lht_hostId, fsid, DISK_CONDITION_READY, CONDITION_STATUS_TRUE) # check result node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: free, total = common.get_host_disk_size(disk_path1) assert not disk["allowScheduling"] assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == SMALL_DISK_SIZE conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE else: conditions = disk["conditions"] assert conditions[DISK_CONDITION_READY]["status"] == \ CONDITION_STATUS_TRUE assert conditions[DISK_CONDITION_SCHEDULABLE]["status"] == \ CONDITION_STATUS_TRUE # delete volume and umount disk cleanup_volume(client, vol_name) mount_path = os.path.join(DIRECTORY_PATH, disk_volume_name) common.umount_disk(mount_path) # wait for update node status node = client.by_id_node(lht_hostId) disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, fsid, "storageScheduled", 0) wait_for_disk_status(client, lht_hostId, fsid, "storageMaximum", 0) # test delete the umount disk node = client.by_id_node(lht_hostId) node.diskUpdate(disks=update_disks) node = common.wait_for_disk_update(client, lht_hostId, len(update_disks)) assert len(node["disks"]) == len(update_disks) cmd = ['rm', '-r', mount_path] subprocess.check_call(cmd)
def test_node_disk_update(client): # NOQA lht_hostId = get_self_host_id() node = client.by_id_node(lht_hostId) disks = node["disks"] # test add same disk by different mount path exception with pytest.raises(Exception) as e: disk = {"path": "/var/lib", "allowScheduling": True, "storageReserved": 2 * Gi} update_disk = get_update_disks(disks) update_disk.append(disk) node = node.diskUpdate(disks=update_disk) assert "the same file system" in str(e.value) # test delete disk exception with pytest.raises(Exception) as e: node.diskUpdate(disks=[]) assert "disable the disk" in str(e.value) # test storageReserved invalid exception with pytest.raises(Exception) as e: for fsid, disk in disks.iteritems(): disk["storageReserved"] = disk["storageMaximum"] + 1*Gi update_disk = get_update_disks(disks) node.diskUpdate(disks=update_disk) assert "storageReserved setting of disk" in str(e.value) # create multiple disks for node node = client.by_id_node(lht_hostId) disks = node["disks"] disk_path1 = create_host_disk(client, 'vol-disk-1', str(Gi), lht_hostId) disk1 = {"path": disk_path1, "allowScheduling": True} disk_path2 = create_host_disk(client, 'vol-disk-2', str(Gi), lht_hostId) disk2 = {"path": disk_path2, "allowScheduling": True} update_disk = get_update_disks(disks) # add new disk for node update_disk.append(disk1) update_disk.append(disk2) # save disks to node node = node.diskUpdate(disks=update_disk) node = common.wait_for_disk_update(client, lht_hostId, len(update_disk)) assert len(node["disks"]) == len(update_disk) node = client.by_id_node(lht_hostId) assert len(node["disks"]) == len(update_disk) # update disk disks = node["disks"] update_disk = get_update_disks(disks) for disk in update_disk: # keep default disk for other tests if disk["path"] == disk_path1 or disk["path"] == disk_path2: disk["allowScheduling"] = False disk["storageReserved"] = SMALL_DISK_SIZE node = node.diskUpdate(disks=update_disk) disks = node["disks"] # wait for node controller to update disk status for fsid, disk in disks.iteritems(): if disk["path"] == disk_path1 or disk["path"] == disk_path2: wait_for_disk_status(client, lht_hostId, fsid, "allowScheduling", False) wait_for_disk_status(client, lht_hostId, fsid, "storageReserved", SMALL_DISK_SIZE) free, total = common.get_host_disk_size(disk_path1) wait_for_disk_status(client, lht_hostId, fsid, "storageAvailable", free) node = client.by_id_node(lht_hostId) disks = node["disks"] for key, disk in disks.iteritems(): if disk["path"] == disk_path1: assert not disk["allowScheduling"] assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == 0 free, total = common.get_host_disk_size(disk_path1) assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free elif disk["path"] == disk_path2: assert not disk["allowScheduling"] assert disk["storageReserved"] == SMALL_DISK_SIZE assert disk["storageScheduled"] == 0 free, total = common.get_host_disk_size(disk_path2) assert disk["storageMaximum"] == total assert disk["storageAvailable"] == free # delete other disks, just remain default disk update_disk = get_update_disks(disks) remain_disk = [] for disk in update_disk: if disk["path"] != disk_path1 and disk["path"] != disk_path2: remain_disk.append(disk) node = node.diskUpdate(disks=remain_disk) node = wait_for_disk_update(client, lht_hostId, len(remain_disk)) assert len(node["disks"]) == len(remain_disk) # cleanup disks cleanup_host_disk(client, 'vol-disk-1', 'vol-disk-2')