def csi_io_test(client, core_api, csi_pv, pvc, pod_make, base_image=""): # NOQA pv_name = generate_volume_name() pod_name = 'csi-io-test' create_and_wait_csi_pod_named_pv(pv_name, pod_name, client, core_api, csi_pv, pvc, pod_make, base_image, "") test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_volume_data(core_api, pod_name, test_data) delete_and_wait_pod(core_api, pod_name) common.wait_for_volume_detached(client, csi_pv['metadata']['name']) pod_name = 'csi-io-test-2' pod = pod_make(name=pod_name) pod['spec']['volumes'] = [ create_pvc_spec(pv_name) ] csi_pv['metadata']['name'] = pv_name csi_pv['spec']['csi']['volumeHandle'] = pv_name pvc['metadata']['name'] = pv_name pvc['spec']['volumeName'] = pv_name update_storageclass_references(CSI_PV_TEST_STORAGE_NAME, csi_pv, pvc) create_and_wait_pod(core_api, pod) resp = read_volume_data(core_api, pod_name) assert resp == test_data
def migration_rollback_test(clients, volume_name, base_image=""): # NOQA client = get_random_client(clients) hosts = clients.keys() host1 = hosts[0] host2 = hosts[1] volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=REPLICA_COUNT, baseImage=base_image) volume = common.wait_for_volume_detached(client, volume_name) volume.attach(hostId=host1) volume = common.wait_for_volume_healthy(client, volume_name) volume = volume.migrationStart(nodeId=host2) attached_nodes = get_volume_attached_nodes(volume) assert host1 in attached_nodes assert volume["migrationNodeID"] == host2 volume = common.wait_for_volume_migration_ready(client, volume_name) volume = volume.migrationRollback() volume = common.wait_for_volume_migration_node(client, volume_name, host1) assert volume["migrationNodeID"] == "" volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) client.delete(volume) wait_for_volume_delete(client, volume_name)
def test_recurring_job_in_volume_creation(clients, volume_name): # NOQA for host_id, client in clients.iteritems(): # NOQA break # error when creating volume with duplicate jobs with pytest.raises(Exception) as e: client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2, recurringJobs=create_jobs1() + create_jobs1()) assert "duplicate job" in str(e.value) client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2, recurringJobs=create_jobs1()) volume = common.wait_for_volume_detached(client, volume_name) volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) # 5 minutes time.sleep(300) check_jobs1_result(volume) volume = volume.detach() common.wait_for_volume_detached(client, volume_name) client.delete(volume) wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0
def csi_io_test(client, core_api, csi_pv, pvc, pod_make, base_image=""): # NOQA pv_name = generate_volume_name() pod_name = 'csi-io-test' create_and_wait_csi_pod_named_pv(pv_name, pod_name, client, core_api, csi_pv, pvc, pod_make, base_image, "") test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name, test_data) delete_and_wait_pod(core_api, pod_name) common.wait_for_volume_detached(client, csi_pv['metadata']['name']) pod_name = 'csi-io-test-2' pod = pod_make(name=pod_name) pod['spec']['volumes'] = [ create_pvc_spec(pv_name) ] csi_pv['metadata']['name'] = pv_name csi_pv['spec']['csi']['volumeHandle'] = pv_name pvc['metadata']['name'] = pv_name pvc['spec']['volumeName'] = pv_name update_storageclass_references(CSI_PV_TEST_STORAGE_NAME, csi_pv, pvc) create_and_wait_pod(core_api, pod) resp = read_volume_data(core_api, pod_name) assert resp == test_data
def migration_confirm_test(clients, volume_name, base_image=""): # NOQA client = get_random_client(clients) hosts = clients.keys() host1 = hosts[0] host2 = hosts[1] volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=REPLICA_COUNT, baseImage=base_image) volume = common.wait_for_volume_detached(client, volume_name) volume.attach(hostId=host1) volume = common.wait_for_volume_healthy(client, volume_name) volume = volume.migrationStart(nodeId=host2) attached_nodes = get_volume_attached_nodes(volume) assert host1 in attached_nodes assert volume["migrationNodeID"] == host2 with pytest.raises(Exception) as e: volume.migrationConfirm() assert "migration is not ready" in str(e.value) volume = common.wait_for_volume_migration_ready(client, volume_name) volume = volume.migrationConfirm() volume = common.wait_for_volume_migration_node(client, volume_name, host2) assert volume["migrationNodeID"] == "" volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) client.delete(volume) wait_for_volume_delete(client, volume_name)
def test_ha_prohibit_deleting_last_replica(client, volume_name): # NOQA volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=1) volume = common.wait_for_volume_detached(client, volume_name) assert volume["name"] == volume_name assert volume["size"] == SIZE assert volume["numberOfReplicas"] == 1 assert volume["state"] == "detached" assert volume["created"] != "" host_id = get_self_host_id() volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) assert len(volume["replicas"]) == 1 replica0 = volume["replicas"][0] with pytest.raises(Exception) as e: volume.replicaRemove(name=replica0["name"]) assert "no other healthy replica available" in str(e.value) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) client.delete(volume) common.wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0
def test_csi_io(client, core_api, csi_pv, pvc, pod): # NOQA """ Test that input and output on a statically defined CSI volume works as expected. Fixtures are torn down here in reverse order that they are specified as a parameter. Take caution when reordering test fixtures. """ pod_name = 'csi-io-test' pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])] pvc['spec']['volumeName'] = csi_pv['metadata']['name'] test_data = generate_random_data(VOLUME_RWTEST_SIZE) create_pv_storage(core_api, client, csi_pv, pvc) create_and_wait_pod(core_api, pod) write_volume_data(core_api, pod_name, test_data) delete_and_wait_pod(core_api, pod_name) common.wait_for_volume_detached(client, csi_pv['metadata']['name']) pod_name = 'csi-io-test-2' pod['metadata']['name'] = pod_name create_and_wait_pod(core_api, pod) resp = read_volume_data(core_api, pod_name) assert resp == test_data
def test_flexvolume_io(client, core_api, flexvolume, pod): # NOQA """ Test that input and output on a statically defined volume works as expected. Fixtures are torn down here in reverse order that they are specified as a parameter. Take caution when reordering test fixtures. """ pod_name = 'flexvolume-io-test' pod['metadata']['name'] = pod_name pod['spec']['containers'][0]['volumeMounts'][0]['name'] = \ flexvolume['name'] pod['spec']['volumes'] = [flexvolume] test_data = generate_random_data(VOLUME_RWTEST_SIZE) create_and_wait_pod(core_api, pod) common.write_volume_data(core_api, pod_name, test_data) delete_and_wait_pod(core_api, pod_name) wait_for_volume_detached(client, flexvolume["name"]) pod_name = 'volume-driver-io-test-2' pod['metadata']['name'] = pod_name create_and_wait_pod(core_api, pod) resp = read_volume_data(core_api, pod_name) assert resp == test_data
def test_provisioner_io(client, core_api, storage_class, pvc, pod): # NOQA """ Test that input and output on a StorageClass provisioned PersistentVolumeClaim works as expected. Fixtures are torn down here in reverse order that they are specified as a parameter. Take caution when reordering test fixtures. """ # Prepare pod and volume specs. pod_name = 'provisioner-io-test' pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [create_pvc_spec(pvc['metadata']['name'])] pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME test_data = generate_random_data(VOLUME_RWTEST_SIZE) create_storage(core_api, storage_class, pvc) create_and_wait_pod(core_api, pod) pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name']) write_volume_data(core_api, pod_name, test_data) delete_and_wait_pod(core_api, pod_name) common.wait_for_volume_detached(client, pvc_volume_name) pod_name = 'flexvolume-provisioner-io-test-2' pod['metadata']['name'] = pod_name create_and_wait_pod(core_api, pod) resp = read_volume_data(core_api, pod_name) assert resp == test_data
def test_provisioner_io(client, core_api, storage_class, pvc, pod): # NOQA """ Test that input and output on a StorageClass provisioned PersistentVolumeClaim works as expected. Fixtures are torn down here in reverse order that they are specified as a parameter. Take caution when reordering test fixtures. """ # Prepare pod and volume specs. pod_name = 'provisioner-io-test' pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [ create_pvc_spec(pvc['metadata']['name']) ] pvc['spec']['storageClassName'] = DEFAULT_STORAGECLASS_NAME storage_class['metadata']['name'] = DEFAULT_STORAGECLASS_NAME test_data = generate_random_data(VOLUME_RWTEST_SIZE) create_storage(core_api, storage_class, pvc) create_and_wait_pod(core_api, pod) pvc_volume_name = get_volume_name(core_api, pvc['metadata']['name']) write_volume_data(core_api, pod_name, test_data) delete_and_wait_pod(core_api, pod_name) common.wait_for_volume_detached(client, pvc_volume_name) pod_name = 'flexvolume-provisioner-io-test-2' pod['metadata']['name'] = pod_name create_and_wait_pod(core_api, pod) resp = read_volume_data(core_api, pod_name) assert resp == test_data
def test_snapshot(clients, volume_name): # NOQA for host_id, client in clients.iteritems(): break volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = common.wait_for_volume_detached(client, volume_name) assert volume["name"] == volume_name assert volume["size"] == SIZE assert volume["numberOfReplicas"] == 2 assert volume["state"] == "detached" lht_hostId = get_self_host_id() volume = volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, volume_name) snapshot_test(client, volume_name) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) client.delete(volume) volume = wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0
def ha_simple_recovery_test(client, volume_name, size, base_image=""): # NOQA volume = client.create_volume(name=volume_name, size=size, numberOfReplicas=2, baseImage=base_image) volume = common.wait_for_volume_detached(client, volume_name) assert volume["name"] == volume_name assert volume["size"] == size assert volume["numberOfReplicas"] == 2 assert volume["state"] == "detached" assert volume["created"] != "" assert volume["baseImage"] == base_image host_id = get_self_host_id() volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) ha_rebuild_replica_test(client, volume_name) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) client.delete(volume) common.wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0
def volume_iscsi_basic_test(clients, volume_name, base_image=""): # NOQA # get a random client for host_id, client in clients.iteritems(): break volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=3, frontend="iscsi", baseImage=base_image) assert volume["name"] == volume_name assert volume["size"] == SIZE assert volume["numberOfReplicas"] == 3 assert volume["frontend"] == "iscsi" assert volume["baseImage"] == base_image volume = common.wait_for_volume_detached(client, volume_name) assert len(volume["replicas"]) == 3 assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) volumes = client.list_volume() assert len(volumes) == 1 assert volumes[0]["name"] == volume["name"] assert volumes[0]["size"] == volume["size"] assert volumes[0]["numberOfReplicas"] == volume["numberOfReplicas"] assert volumes[0]["state"] == volume["state"] assert volumes[0]["created"] == volume["created"] assert volumes[0]["frontend"] == "iscsi" endpoint = get_volume_endpoint(volumes[0]) assert endpoint.startswith("iscsi://") try: dev = iscsi_login(endpoint) volume_rw_test(dev) finally: iscsi_logout(endpoint) volume = volume.detach() common.wait_for_volume_detached(client, volume_name) client.delete(volume) wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0
def test_ha_salvage(client, volume_name): # NOQA # get a random client volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = common.wait_for_volume_detached(client, volume_name) assert volume["name"] == volume_name assert volume["size"] == SIZE assert volume["numberOfReplicas"] == 2 assert volume["state"] == "detached" assert volume["created"] != "" host_id = get_self_host_id() volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) assert len(volume["replicas"]) == 2 replica0_name = volume["replicas"][0]["name"] replica1_name = volume["replicas"][1]["name"] data = write_random_data(volume["endpoint"]) common.k8s_delete_replica_pods_for_volume(volume_name) volume = common.wait_for_volume_faulted(client, volume_name) assert len(volume["replicas"]) == 2 assert volume["replicas"][0]["failedAt"] != "" assert volume["replicas"][1]["failedAt"] != "" volume.salvage(names=[replica0_name, replica1_name]) volume = common.wait_for_volume_detached(client, volume_name) assert len(volume["replicas"]) == 2 assert volume["replicas"][0]["failedAt"] == "" assert volume["replicas"][1]["failedAt"] == "" volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) check_data(volume["endpoint"], data) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) client.delete(volume) common.wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0
def create_pv_storage(api, cli, pv, claim): """ Manually create a new PV and PVC for testing. """ cli.create_volume( name=pv['metadata']['name'], size=pv['spec']['capacity']['storage'], numberOfReplicas=int( pv['spec']['csi']['volumeAttributes']['numberOfReplicas'])) common.wait_for_volume_detached(cli, pv['metadata']['name']) api.create_persistent_volume(pv) api.create_namespaced_persistent_volume_claim(body=claim, namespace='default')
def test_csi_expansion_with_size_round_up(client, core_api): # NOQA """ test expand longhorn volume 1. Create longhorn volume with size '1Gi' 2. Attach, write data, and detach 3. Expand volume size to '2000000000/2G' and check if size round up '2000683008' 4. Attach, write data, and detach 5. Expand volume size to '2Gi' and check if size is '2147483648' 6. Attach, write data, and detach """ volume_name = generate_volume_name() volume = create_and_check_volume(client, volume_name, 2, str(1 * Gi)) self_hostId = get_self_host_id() volume.attach(hostId=self_hostId, disableFrontend=False) volume = wait_for_volume_healthy(client, volume_name) test_data = write_volume_random_data(volume) volume.detach(hostId="") volume = wait_for_volume_detached(client, volume_name) volume.expand(size="2000000000") wait_for_volume_expansion(client, volume_name) volume = client.by_id_volume(volume_name) assert volume.size == "2000683008" self_hostId = get_self_host_id() volume.attach(hostId=self_hostId, disableFrontend=False) volume = wait_for_volume_healthy(client, volume_name) check_volume_data(volume, test_data, False) test_data = write_volume_random_data(volume) volume.detach(hostId="") volume = wait_for_volume_detached(client, volume_name) volume.expand(size=str(2 * Gi)) wait_for_volume_expansion(client, volume_name) volume = client.by_id_volume(volume_name) assert volume.size == "2147483648" self_hostId = get_self_host_id() volume.attach(hostId=self_hostId, disableFrontend=False) volume = wait_for_volume_healthy(client, volume_name) check_volume_data(volume, test_data, False) volume.detach(hostId="") volume = wait_for_volume_detached(client, volume_name) client.delete(volume) wait_for_volume_delete(client, volume_name)
def backup_test(clients, volume_name, size, base_image=""): # NOQA for host_id, client in clients.iteritems(): break volume = client.create_volume(name=volume_name, size=size, numberOfReplicas=2, baseImage=base_image) volume = common.wait_for_volume_detached(client, volume_name) assert volume["name"] == volume_name assert volume["size"] == size assert volume["numberOfReplicas"] == 2 assert volume["state"] == "detached" assert volume["baseImage"] == base_image lht_hostId = get_self_host_id() volume = volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, volume_name) setting = client.by_id_setting(common.SETTING_BACKUP_TARGET) # test backupTarget for multiple settings backupstores = common.get_backupstore_url() for backupstore in backupstores: if common.is_backupTarget_s3(backupstore): backupsettings = backupstore.split("$") setting = client.update(setting, value=backupsettings[0]) assert setting["value"] == backupsettings[0] credential = client.by_id_setting( common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET) credential = client.update(credential, value=backupsettings[1]) assert credential["value"] == backupsettings[1] else: setting = client.update(setting, value=backupstore) assert setting["value"] == backupstore credential = client.by_id_setting( common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET) credential = client.update(credential, value="") assert credential["value"] == "" backupstore_test(client, lht_hostId, volume_name, size) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) client.delete(volume) volume = wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0
def backupstore_test(client, host_id, volname, size): volume = client.by_id_volume(volname) volume.snapshotCreate() data = write_volume_random_data(volume) snap2 = volume.snapshotCreate() volume.snapshotCreate() volume.snapshotBackup(name=snap2["name"]) bv, b = common.find_backup(client, volname, snap2["name"]) new_b = bv.backupGet(name=b["name"]) assert new_b["name"] == b["name"] assert new_b["url"] == b["url"] assert new_b["snapshotName"] == b["snapshotName"] assert new_b["snapshotCreated"] == b["snapshotCreated"] assert new_b["created"] == b["created"] assert new_b["volumeName"] == b["volumeName"] assert new_b["volumeSize"] == b["volumeSize"] assert new_b["volumeCreated"] == b["volumeCreated"] # test restore restoreName = generate_volume_name() volume = client.create_volume(name=restoreName, size=size, numberOfReplicas=2, fromBackup=b["url"]) volume = common.wait_for_volume_detached(client, restoreName) assert volume["name"] == restoreName assert volume["size"] == size assert volume["numberOfReplicas"] == 2 assert volume["state"] == "detached" volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, restoreName) check_volume_data(volume, data) volume = volume.detach() volume = common.wait_for_volume_detached(client, restoreName) client.delete(volume) volume = wait_for_volume_delete(client, restoreName) bv.backupDelete(name=b["name"]) backups = bv.backupList() found = False for b in backups: if b["snapshotName"] == snap2["name"]: found = True break assert not found
def create_pv_storage(api, cli, pv, claim, base_image, from_backup): """ Manually create a new PV and PVC for testing. """ cli.create_volume( name=pv['metadata']['name'], size=pv['spec']['capacity']['storage'], numberOfReplicas=int(pv['spec']['csi']['volumeAttributes'] ['numberOfReplicas']), baseImage=base_image, fromBackup=from_backup) common.wait_for_volume_detached(cli, pv['metadata']['name']) api.create_persistent_volume(pv) api.create_namespaced_persistent_volume_claim( body=claim, namespace='default')
def ha_salvage_test(client, volume_name, base_image=""): # NOQA volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2, baseImage=base_image) volume = common.wait_for_volume_detached(client, volume_name) assert volume["name"] == volume_name assert volume["size"] == SIZE assert volume["numberOfReplicas"] == 2 assert volume["state"] == "detached" assert volume["created"] != "" assert volume["baseImage"] == base_image host_id = get_self_host_id() volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) assert len(volume["replicas"]) == 2 replica0_name = volume["replicas"][0]["name"] replica1_name = volume["replicas"][1]["name"] data = write_volume_random_data(volume) common.k8s_delete_replica_pods_for_volume(volume_name) volume = common.wait_for_volume_faulted(client, volume_name) assert len(volume["replicas"]) == 2 assert volume["replicas"][0]["failedAt"] != "" assert volume["replicas"][1]["failedAt"] != "" volume.salvage(names=[replica0_name, replica1_name]) volume = common.wait_for_volume_detached(client, volume_name) assert len(volume["replicas"]) == 2 assert volume["replicas"][0]["failedAt"] == "" assert volume["replicas"][1]["failedAt"] == "" volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) check_volume_data(volume, data) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) client.delete(volume) common.wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0
def test_recurring_job_in_volume_creation(set_random_backupstore, client, volume_name): # NOQA """ Test create volume with recurring jobs 1. Create volume with recurring jobs though Longhorn API 2. Verify the recurring jobs run correctly """ host_id = get_self_host_id() # error when creating volume with duplicate jobs with pytest.raises(Exception) as e: client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2, recurringJobs=create_jobs1() + create_jobs1()) assert "duplicate job" in str(e.value) client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2, recurringJobs=create_jobs1()) volume = common.wait_for_volume_detached(client, volume_name) volume.attach(hostId=host_id) volume = wait_for_volume_healthy(client, volume_name) # wait until the beginning of an even minute wait_until_begin_of_an_even_minute() # wait until the 10th second of an even minute # to avoid writing data at the same time backup is taking time.sleep(10) write_volume_random_data(volume) time.sleep(150) # 2.5 minutes write_volume_random_data(volume) time.sleep(150) # 2.5 minutes check_jobs1_result(volume) volume = volume.detach(hostId="") common.wait_for_volume_detached(client, volume_name) client.delete(volume) wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0
def test_attach_without_frontend(clients, volume_name): # NOQA for host_id, client in clients.iteritems(): break volume = create_and_check_volume(client, volume_name) lht_hostId = get_self_host_id() volume.attach(hostId=lht_hostId, disableFrontend=False) common.wait_for_volume_healthy(client, volume_name) volume = client.by_id_volume(volume_name) assert volume["disableFrontend"] is False assert volume["frontend"] == "blockdev" snap1_data = write_volume_random_data(volume) snap1 = volume.snapshotCreate() write_volume_random_data(volume) volume.snapshotCreate() volume.detach() volume = common.wait_for_volume_detached(client, volume_name) volume.attach(hostId=lht_hostId, disableFrontend=True) common.wait_for_volume_healthy(client, volume_name) volume = client.by_id_volume(volume_name) engine = get_volume_engine(volume) assert volume["disableFrontend"] is True assert volume["frontend"] == "blockdev" assert engine["endpoint"] == "" volume.snapshotRevert(name=snap1["name"]) volume.detach() volume = common.wait_for_volume_detached(client, volume_name) volume.attach(hostId=lht_hostId, disableFrontend=False) common.wait_for_volume_healthy(client, volume_name) volume = client.by_id_volume(volume_name) assert volume["disableFrontend"] is False assert volume["frontend"] == "blockdev" check_volume_data(volume, snap1_data) client.delete(volume) wait_for_volume_delete(client, volume_name)
def test_recurring_job(clients, volume_name): # NOQA for host_id, client in clients.iteritems(): # NOQA break volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = common.wait_for_volume_detached(client, volume_name) jobs = create_jobs1() volume.recurringUpdate(jobs=jobs) volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) # 5 minutes time.sleep(300) check_jobs1_result(volume) job_backup2 = { "name": "backup2", "cron": "* * * * *", "task": "backup", "retain": 2 } volume.recurringUpdate(jobs=[jobs[0], job_backup2]) # 5 minutes time.sleep(300) snapshots = volume.snapshotList() count = 0 for snapshot in snapshots: if snapshot["removed"] is False: count += 1 # 2 from job_snap, 1 from job_backup, 2 from job_backup2, 1 volume-head assert count == 6 volume = volume.detach() common.wait_for_volume_detached(client, volume_name) client.delete(volume) wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0
def test_replica_scheduler_update_over_provisioning(client): # NOQA nodes = client.list_node() lht_hostId = get_self_host_id() expect_node_disk = {} for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk over_provisioning_setting = client.by_id_setting( SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE) old_provisioning_setting = over_provisioning_setting["value"] # set storage over provisioning percentage to 0 # to test all replica couldn't be scheduled over_provisioning_setting = client.update(over_provisioning_setting, value="0") vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=SIZE, numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_FALSE) # set storage over provisioning percentage to 100 over_provisioning_setting = client.update(over_provisioning_setting, value="100") # check volume status volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name) client.update(over_provisioning_setting, value=old_provisioning_setting)
def test_pv_creation(client, core_api): # NOQA """ Test creating PV using Longhorn API 1. Create volume 2. Create PV for the volume 3. Try to create another PV for the same volume. It should fail. 4. Check Kubernetes Status for the volume since PV is created. """ volume_name = "test-pv-creation" # NOQA client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name create_pv_for_volume(client, core_api, volume, pv_name) # try to create one more pv for the volume pv_name_2 = "pv2-" + volume_name with pytest.raises(Exception) as e: volume.pvCreate(pvName=pv_name_2) assert "already exist" in str(e.value) ks = { 'pvName': pv_name, 'pvStatus': 'Available', 'namespace': '', 'pvcName': '', 'lastPVCRefAt': '', 'lastPodRefAt': '', } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pv(core_api, pv_name)
def test_replica_scheduler_just_under_over_provisioning(client): # NOQA over_provisioning_setting = client.by_id_setting( SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE) old_provisioning_setting = over_provisioning_setting["value"] # set storage over provisioning percentage to 100 over_provisioning_setting = client.update(over_provisioning_setting, value="100") lht_hostId = get_self_host_id() nodes = client.list_node() expect_node_disk = {} max_size_array = [] for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk max_size_array.append(disk["storageMaximum"]) disk["storageReserved"] = 0 update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_status(client, node["name"], fsid, "storageReserved", 0) max_size = min(max_size_array) # test just under over provisioning limit could be scheduled vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=str(max_size), numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled( client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name) client.update(over_provisioning_setting, value=old_provisioning_setting)
def test_hard_anti_affinity_offline_rebuild(client, volume_name): # NOQA """ Test that volumes with Hard Anti-Affinity can build new replicas during the attaching process once a valid node is available. Once a new replica has been built as part of the attaching process, the volume should be Healthy again. """ volume = create_and_check_volume(client, volume_name) host_id = get_self_host_id() volume.attach(hostId=host_id) volume = wait_for_volume_healthy(client, volume_name) assert len(volume["replicas"]) == 3 data = write_volume_random_data(volume) setting = client.by_id_setting(SETTING_REPLICA_SOFT_ANTI_AFFINITY) client.update(setting, value="false") node = client.by_id_node(host_id) client.update(node, allowScheduling=False) replica_names = map(lambda replica: replica.name, volume["replicas"]) host_replica = get_host_replica(volume, host_id) volume.replicaRemove(name=host_replica["name"]) volume = wait_for_volume_degraded(client, volume_name) wait_scheduling_failure(client, volume_name) volume.detach() volume = wait_for_volume_detached(client, volume_name) client.update(node, allowScheduling=True) volume.attach(hostId=host_id) wait_new_replica_ready(client, volume_name, replica_names) volume = wait_for_volume_healthy(client, volume_name) assert len(volume["replicas"]) == 3 check_volume_data(volume, data) cleanup_volume(client, volume)
def recurring_job_labels_test(client, labels, volume_name, size=SIZE, backing_image=""): # NOQA host_id = get_self_host_id() client.create_volume(name=volume_name, size=size, numberOfReplicas=2, backingImage=backing_image) volume = common.wait_for_volume_detached(client, volume_name) # Simple Backup Job that runs every 1 minute, retains 1. jobs = [{ "name": RECURRING_JOB_NAME, "cron": "*/1 * * * *", "task": "backup", "retain": 1, "labels": labels }] volume.recurringUpdate(jobs=jobs) volume.attach(hostId=host_id) volume = wait_for_volume_healthy(client, volume_name) write_volume_random_data(volume) # 1 minutes 15s time.sleep(75) labels["we-added-this-label"] = "definitely" jobs[0]["labels"] = labels volume = volume.recurringUpdate(jobs=jobs) volume = wait_for_volume_healthy(client, volume_name) write_volume_random_data(volume) # 2 minutes 15s time.sleep(135) snapshots = volume.snapshotList() count = 0 for snapshot in snapshots: if snapshot.removed is False: count += 1 # 1 from Backup, 1 from Volume Head. assert count == 2 # Verify the Labels on the actual Backup. bv = client.by_id_backupVolume(volume_name) backups = bv.backupList().data assert len(backups) == 1 b = bv.backupGet(name=backups[0].name) for key, val in iter(labels.items()): assert b.labels.get(key) == val assert b.labels.get(RECURRING_JOB_LABEL) == RECURRING_JOB_NAME # One extra Label from RecurringJob. assert len(b.labels) == len(labels) + 1 if backing_image: assert b.volumeBackingImageName == \ backing_image assert b.volumeBackingImageURL != "" cleanup_volume(client, volume)
def test_pv_creation(client, core_api): # NOQA volume_name = "test-pv-creation" client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name create_pv_for_volume(client, core_api, volume, pv_name) # try to create one more pv for the volume pv_name_2 = "pv2-" + volume_name with pytest.raises(Exception) as e: volume.pvCreate(pvName=pv_name_2) assert "already exist" in str(e.value) ks = { 'pvName': pv_name, 'pvStatus': 'Available', 'namespace': '', 'pvcName': '', 'lastPVCRefAt': '', 'lastPodRefAt': '', } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pv(core_api, pv_name)
def test_soft_anti_affinity_detach(client, volume_name): # NOQA """ Test that volumes with Soft Anti-Affinity can detach and reattach to a node properly. """ volume = create_and_check_volume(client, volume_name) host_id = get_self_host_id() volume.attach(hostId=host_id) volume = wait_for_volume_healthy(client, volume_name) assert len(volume["replicas"]) == 3 data = write_volume_random_data(volume) setting = client.by_id_setting(SETTING_REPLICA_SOFT_ANTI_AFFINITY) client.update(setting, value="true") node = client.by_id_node(host_id) client.update(node, allowScheduling=False) replica_names = map(lambda replica: replica.name, volume["replicas"]) host_replica = get_host_replica(volume, host_id) volume.replicaRemove(name=host_replica["name"]) wait_new_replica_ready(client, volume_name, replica_names) volume = wait_for_volume_healthy(client, volume_name) volume.detach() volume = wait_for_volume_detached(client, volume_name) assert len(volume["replicas"]) == 3 volume.attach(hostId=host_id) volume = wait_for_volume_healthy(client, volume_name) assert len(volume["replicas"]) == 3 check_volume_data(volume, data) cleanup_volume(client, volume)
def restore_csi_volume_snapshot(core_api, client, csivolsnap, pvc_name, pvc_request_storage_size): # NOQA restore_pvc = { 'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim', 'metadata': { 'name': pvc_name }, 'spec': { 'accessModes': ['ReadWriteOnce'], 'resources': { 'requests': { 'storage': pvc_request_storage_size } }, 'storageClassName': 'longhorn', 'dataSource': { 'kind': 'VolumeSnapshot', 'apiGroup': 'snapshot.storage.k8s.io', 'name': csivolsnap["metadata"]["name"] } } } core_api.create_namespaced_persistent_volume_claim(body=restore_pvc, namespace='default') restore_volume_name = None restore_pvc_name = restore_pvc["metadata"]["name"] for i in range(RETRY_COUNTS): restore_pvc = \ core_api.read_namespaced_persistent_volume_claim( name=restore_pvc_name, namespace="default") if restore_pvc.spec.volume_name is not None: restore_volume_name = restore_pvc.spec.volume_name break time.sleep(RETRY_INTERVAL) assert restore_volume_name is not None wait_for_volume_restoration_completed(client, restore_volume_name) wait_for_volume_detached(client, restore_volume_name) return restore_pvc
def test_volume_scheduling_failure(clients, volume_name): # NOQA ''' Test fail to schedule by disable scheduling for all the nodes Also test cannot attach a scheduling failed volume ''' client = get_random_client(clients) nodes = client.list_node() assert len(nodes) > 0 for node in nodes: node = client.update(node, allowScheduling=False) node = common.wait_for_node_update(client, node["id"], "allowScheduling", False) volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=3) volume = common.wait_for_volume_condition_scheduled(client, volume_name, "status", CONDITION_STATUS_FALSE) volume = common.wait_for_volume_detached(client, volume_name) self_node = get_self_host_id() with pytest.raises(Exception) as e: volume.attach(hostId=self_node) assert "not scheduled" in str(e.value) for node in nodes: node = client.update(node, allowScheduling=True) node = common.wait_for_node_update(client, node["id"], "allowScheduling", True) volume = common.wait_for_volume_condition_scheduled(client, volume_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, volume_name) volume = volume.attach(hostId=self_node) volume = common.wait_for_volume_healthy(client, volume_name) endpoint = get_volume_endpoint(volume) assert endpoint != "" volume_rw_test(endpoint) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) client.delete(volume) wait_for_volume_delete(client, volume_name)
def test_tag_scheduling_failure(client, node_default_tags): # NOQA """ Test that scheduling fails if no Nodes/Disks with the requested Tags are available. Case 1: Validate that if specifying nonexist tags in volume, API call will fail. Case 2: 1. Specify existing but no node or disk can unsatisfied tags. 2. Validate the volume will failed the scheduling """ invalid_tag_cases = [ # Only one Disk Tag exists. { "disk": ["doesnotexist", "ssd"], "node": [] }, # Only one Node Tag exists. { "disk": [], "node": ["doesnotexist", "main"] } ] for tags in invalid_tag_cases: volume_name = generate_volume_name() # NOQA with pytest.raises(Exception) as e: client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=3, diskSelector=tags["disk"], nodeSelector=tags["node"]) assert "does not exist" in str(e.value) unsatisfied_tag_cases = [{ "disk": [], "node": ["main", "fallback"] }, { "disk": ["ssd", "m2"], "node": [] }] for tags in unsatisfied_tag_cases: volume_name = generate_volume_name() client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=3, diskSelector=tags["disk"], nodeSelector=tags["node"]) volume = wait_for_volume_detached(client, volume_name) assert volume.diskSelector == tags["disk"] assert volume.nodeSelector == tags["node"] wait_scheduling_failure(client, volume_name) client.delete(volume) wait_for_volume_delete(client, volume.name) volumes = client.list_volume() assert len(volumes) == 0
def test_hard_anti_affinity_detach(client, volume_name): # NOQA """ Test that volumes with Hard Anti-Affinity are still able to detach and reattach to a node properly, even in degraded state. 1. Create a volume and attach to the current node 2. Generate and write `data` to the volume. 3. Set `soft anti-affinity` to false 4. Disable current node's scheduling. 5. Remove the replica on the current node 1. Verify volume will be in degraded state. 2. Verify volume reports condition `scheduled == false` 6. Detach the volume. 7. Verify that volume only have 2 replicas 1. Unhealthy replica will be removed upon detach. 8. Attach the volume again. 1. Verify volume will be in degraded state. 2. Verify volume reports condition `scheduled == false` 3. Verify only two of three replicas of volume are healthy. 4. Verify the remaining replica doesn't have `replica.HostID`, meaning it's unscheduled 9. Check volume `data` """ volume = create_and_check_volume(client, volume_name) host_id = get_self_host_id() volume.attach(hostId=host_id) volume = wait_for_volume_healthy(client, volume_name) assert len(volume.replicas) == 3 data = write_volume_random_data(volume) setting = client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY) client.update(setting, value="false") node = client.by_id_node(host_id) client.update(node, allowScheduling=False) host_replica = get_host_replica(volume, host_id) volume.replicaRemove(name=host_replica.name) volume = wait_for_volume_degraded(client, volume_name) wait_scheduling_failure(client, volume_name) volume.detach() volume = wait_for_volume_detached(client, volume_name) assert len(volume.replicas) == 2 volume.attach(hostId=host_id) # Make sure we're still not getting another successful replica. volume = wait_for_volume_degraded(client, volume_name) wait_scheduling_failure(client, volume_name) assert sum([ 1 for replica in volume.replicas if replica.running and replica.mode == "RW" ]) == 2 assert sum([1 for replica in volume.replicas if not replica.hostId]) == 1 assert len(volume.replicas) == 3 check_volume_data(volume, data) cleanup_volume(client, volume)
def test_recurring_job(clients, volume_name): # NOQA for host_id, client in clients.iteritems(): # NOQA break volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = common.wait_for_volume_detached(client, volume_name) jobs = create_jobs1() volume.recurringUpdate(jobs=jobs) volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) # 5 minutes time.sleep(300) check_jobs1_result(volume) job_backup2 = {"name": "backup2", "cron": "* * * * *", "task": "backup", "retain": 2} volume.recurringUpdate(jobs=[jobs[0], job_backup2]) # 5 minutes time.sleep(300) snapshots = volume.snapshotList() count = 0 for snapshot in snapshots: if snapshot["removed"] is False: count += 1 # 2 from job_snap, 1 from job_backup, 2 from job_backup2, 1 volume-head assert count == 6 volume = volume.detach() common.wait_for_volume_detached(client, volume_name) client.delete(volume) wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0
def test_pv_creation(client, core_api): # NOQA volume_name = "test-pv-creation" client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name volume.pvCreate(pvName=pv_name) for i in range(RETRY_COUNTS): if check_pv_existence(core_api, pv_name): break time.sleep(RETRY_INTERVAL) assert check_pv_existence(core_api, pv_name) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] for i in range(RETRY_COUNTS): if k_status['pvName'] and k_status['pvStatus'] == 'Available': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == pv_name assert k_status['pvStatus'] == 'Available' assert not k_status['namespace'] assert not k_status['pvcName'] assert not workloads assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] # try to create one more pv for the volume pv_name_2 = "pv2-" + volume_name with pytest.raises(Exception) as e: volume.pvCreate(pvName=pv_name_2) assert "already exist" in str(e.value) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == pv_name assert k_status['pvStatus'] == 'Available' assert not k_status['namespace'] assert not k_status['pvcName'] assert not workloads assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] delete_and_wait_pv(core_api, pv_name)
def flexvolume_io_test(client, core_api, flexvolume, pod): # NOQA pod_name = 'flexvolume-io-test' pod['metadata']['name'] = pod_name pod['spec']['containers'][0]['volumeMounts'][0]['name'] = \ flexvolume['name'] pod['spec']['volumes'] = [ flexvolume ] test_data = generate_random_data(VOLUME_RWTEST_SIZE) create_and_wait_pod(core_api, pod) common.write_volume_data(core_api, pod_name, test_data) delete_and_wait_pod(core_api, pod_name) wait_for_volume_detached(client, flexvolume["name"]) pod_name = 'volume-driver-io-test-2' pod['metadata']['name'] = pod_name create_and_wait_pod(core_api, pod) resp = read_volume_data(core_api, pod_name) assert resp == test_data
def create_volume(client, vol_name, size, node_id, r_num): # NOQA volume = client.create_volume(name=vol_name, size=size, numberOfReplicas=r_num) assert volume["numberOfReplicas"] == r_num assert volume["frontend"] == "blockdev" volume = common.wait_for_volume_detached(client, vol_name) assert len(volume["replicas"]) == r_num assert volume["state"] == "detached" assert volume["created"] != "" volumeByName = client.by_id_volume(vol_name) assert volumeByName["name"] == volume["name"] assert volumeByName["size"] == volume["size"] assert volumeByName["numberOfReplicas"] == volume["numberOfReplicas"] assert volumeByName["state"] == volume["state"] assert volumeByName["created"] == volume["created"] volume.attach(hostId=node_id) volume = common.wait_for_volume_healthy(client, vol_name) return volume
def test_replica_scheduler_just_under_over_provisioning(client): # NOQA over_provisioning_setting = client.by_id_setting( SETTING_STORAGE_OVER_PROVISIONING_PERCENTAGE) old_provisioning_setting = over_provisioning_setting["value"] # set storage over provisioning percentage to 100 over_provisioning_setting = client.update(over_provisioning_setting, value="100") lht_hostId = get_self_host_id() nodes = client.list_node() expect_node_disk = {} max_size_array = [] for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk max_size_array.append(disk["storageMaximum"]) disk["storageReserved"] = 0 update_disks = get_update_disks(disks) node = node.diskUpdate(disks=update_disks) disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_status(client, node["name"], fsid, "storageReserved", 0) max_size = min(max_size_array) # test just under over provisioning limit could be scheduled vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=str(max_size), numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name) client.update(over_provisioning_setting, value=old_provisioning_setting)
def test_pvc_creation(client, core_api, pod): # NOQA volume_name = "test-pvc-creation" client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name pvc_name = "pvc-" + volume_name pod_name = "pod-" + volume_name # try to create pvc without pv for the volume with pytest.raises(Exception) as e: volume.pvcCreate(namespace="default", pvcName=pvc_name) assert "connot find existing PV for volume" in str(e.value) volume.pvCreate(pvName=pv_name) for i in range(RETRY_COUNTS): if check_pv_existence(core_api, pv_name): break time.sleep(RETRY_INTERVAL) assert check_pv_existence(core_api, pv_name) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] for i in range(RETRY_COUNTS): if k_status['pvName'] and k_status['pvStatus'] == 'Available': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] assert k_status['pvName'] == pv_name assert k_status['pvStatus'] == 'Available' assert not k_status['namespace'] assert not k_status['pvcName'] assert not k_status['workloadsStatus'] assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] volume.pvcCreate(namespace="default", pvcName=pvc_name) for i in range(RETRY_COUNTS): if check_pvc_existence(core_api, pvc_name): break time.sleep(RETRY_INTERVAL) assert check_pvc_existence(core_api, pvc_name) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] for i in range(RETRY_COUNTS): if k_status['pvcName'] and k_status['namespace']: break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] assert k_status['pvName'] == pv_name assert k_status['pvStatus'] == 'Bound' assert k_status['namespace'] == "default" assert k_status['pvcName'] == pvc_name assert not k_status['workloadsStatus'] assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': pvc_name, }, }] create_and_wait_pod(core_api, pod) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == pv_name assert k_status['pvStatus'] == 'Bound' assert len(workloads) == 1 for i in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) volume = client.by_id_volume(volume_name) k_status = volume["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 1 assert workloads[0]['podName'] == pod_name assert workloads[0]['podStatus'] == 'Running' assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] assert k_status['namespace'] == 'default' assert k_status['pvcName'] == pvc_name assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] delete_and_wait_pod(core_api, pod_name) delete_and_wait_pvc(core_api, pvc_name) wait_delete_pv(core_api, pv_name)
def test_replica_scheduler_too_large_volume_fit_any_disks(client): # NOQA nodes = client.list_node() lht_hostId = get_self_host_id() expect_node_disk = {} for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk disk["storageReserved"] = disk["storageMaximum"] update_disks = get_update_disks(disks) node.diskUpdate(disks=update_disks) # volume is too large to fill into any disks vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=str(4*Gi), numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_FALSE) # reduce StorageReserved of each default disk nodes = client.list_node() for node in nodes: disks = node["disks"] update_disks = get_update_disks(disks) for disk in update_disks: disk["storageReserved"] = 0 node = node.diskUpdate(disks=update_disks) disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_status(client, node["name"], fsid, "storageReserved", 0) # check volume status volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name)
def test_replica_scheduler_update_minimal_available(client): # NOQA minimal_available_setting = client.by_id_setting( SETTING_STORAGE_MINIMAL_AVAILABLE_PERCENTAGE) old_minimal_setting = minimal_available_setting["value"] nodes = client.list_node() expect_node_disk = {} for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): if disk["path"] == DEFAULT_DISK_PATH: expect_disk = disk expect_disk["fsid"] = fsid expect_node_disk[node["name"]] = expect_disk # set storage minimal available percentage to 100 # to test all replica couldn't be scheduled minimal_available_setting = client.update(minimal_available_setting, value="100") # wait for disks state nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_conditions(client, node["name"], fsid, DISK_CONDITION_SCHEDULABLE, CONDITION_STATUS_FALSE) lht_hostId = get_self_host_id() vol_name = common.generate_volume_name() volume = client.create_volume(name=vol_name, size=SIZE, numberOfReplicas=len(nodes)) volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_FALSE) # set storage minimal available percentage to default value(10) minimal_available_setting = client.update(minimal_available_setting, value=old_minimal_setting) # wait for disks state nodes = client.list_node() for node in nodes: disks = node["disks"] for fsid, disk in disks.iteritems(): wait_for_disk_conditions(client, node["name"], fsid, DISK_CONDITION_SCHEDULABLE, CONDITION_STATUS_TRUE) # check volume status volume = common.wait_for_volume_condition_scheduled(client, vol_name, "status", CONDITION_STATUS_TRUE) volume = common.wait_for_volume_detached(client, vol_name) assert volume["state"] == "detached" assert volume["created"] != "" volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, vol_name) nodes = client.list_node() node_hosts = [] for node in nodes: node_hosts.append(node["name"]) # check all replica should be scheduled to default disk for replica in volume["replicas"]: id = replica["hostId"] assert id != "" assert replica["running"] expect_disk = expect_node_disk[id] assert replica["diskID"] == expect_disk["fsid"] assert expect_disk["path"] in replica["dataPath"] node_hosts = filter(lambda x: x != id, node_hosts) assert len(node_hosts) == 0 # clean volume and disk cleanup_volume(client, vol_name)
def test_statefulset_restore(client, core_api, storage_class, # NOQA statefulset): # NOQA """ Test that data can be restored into volumes usable by a StatefulSet. """ statefulset_name = 'statefulset-restore-test' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_storage_class(storage_class) create_and_wait_statefulset(statefulset) pod_info = get_statefulset_pod_info(core_api, statefulset) create_and_test_backups(core_api, client, pod_info) delete_and_wait_statefulset(core_api, client, statefulset) csi = check_csi(core_api) # StatefulSet fixture already cleans these up, use the manifests instead of # the fixtures to avoid issues during teardown. pv = { 'apiVersion': 'v1', 'kind': 'PersistentVolume', 'metadata': { 'name': '' }, 'spec': { 'capacity': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) }, 'volumeMode': 'Filesystem', 'accessModes': ['ReadWriteOnce'], 'persistentVolumeReclaimPolicy': 'Delete', 'storageClassName': DEFAULT_STORAGECLASS_NAME } } pvc = { 'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim', 'metadata': { 'name': '' }, 'spec': { 'accessModes': [ 'ReadWriteOnce' ], 'resources': { 'requests': { 'storage': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } }, 'storageClassName': DEFAULT_STORAGECLASS_NAME } } if csi: pv['spec']['csi'] = { 'driver': 'io.rancher.longhorn', 'fsType': 'ext4', 'volumeAttributes': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'] }, 'volumeHandle': '' } else: pv['spec']['flexVolume'] = { 'driver': 'rancher.io/longhorn', 'fsType': 'ext4', 'options': { 'numberOfReplicas': storage_class['parameters']['numberOfReplicas'], 'staleReplicaTimeout': storage_class['parameters']['staleReplicaTimeout'], 'fromBackup': '', 'size': size_to_string(DEFAULT_VOLUME_SIZE * Gi) } } # Make sure that volumes still work even if the Pod and StatefulSet names # are different. for pod in pod_info: pod['pod_name'] = pod['pod_name'].replace('statefulset-restore-test', 'statefulset-restore-test-2') pod['pvc_name'] = pod['pvc_name'].replace('statefulset-restore-test', 'statefulset-restore-test-2') pv['metadata']['name'] = pod['pvc_name'] if csi: client.create_volume( name=pod['pvc_name'], size=size_to_string(DEFAULT_VOLUME_SIZE * Gi), numberOfReplicas=int( storage_class['parameters']['numberOfReplicas']), fromBackup=pod['backup_snapshot']['url']) wait_for_volume_detached(client, pod['pvc_name']) pv['spec']['csi']['volumeHandle'] = pod['pvc_name'] else: pv['spec']['flexVolume']['options']['fromBackup'] = \ pod['backup_snapshot']['url'] core_api.create_persistent_volume(pv) pvc['metadata']['name'] = pod['pvc_name'] pvc['spec']['volumeName'] = pod['pvc_name'] core_api.create_namespaced_persistent_volume_claim( body=pvc, namespace='default') statefulset_name = 'statefulset-restore-test-2' update_statefulset_manifests(statefulset, storage_class, statefulset_name) create_and_wait_statefulset(statefulset) for pod in pod_info: resp = read_volume_data(core_api, pod['pod_name']) assert resp == pod['data']
def ha_backup_deletion_recovery_test(client, volume_name, size, base_image=""): # NOQA volume = client.create_volume(name=volume_name, size=size, numberOfReplicas=2, baseImage=base_image) volume = common.wait_for_volume_detached(client, volume_name) host_id = get_self_host_id() volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) setting = client.by_id_setting(common.SETTING_BACKUP_TARGET) # test backupTarget for multiple settings backupstores = common.get_backupstore_url() for backupstore in backupstores: if common.is_backupTarget_s3(backupstore): backupsettings = backupstore.split("$") setting = client.update(setting, value=backupsettings[0]) assert setting["value"] == backupsettings[0] credential = client.by_id_setting( common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET) credential = client.update(credential, value=backupsettings[1]) assert credential["value"] == backupsettings[1] else: setting = client.update(setting, value=backupstore) assert setting["value"] == backupstore credential = client.by_id_setting( common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET) credential = client.update(credential, value="") assert credential["value"] == "" data = write_volume_random_data(volume) snap2 = volume.snapshotCreate() volume.snapshotCreate() volume.snapshotBackup(name=snap2["name"]) _, b = common.find_backup(client, volume_name, snap2["name"]) res_name = common.generate_volume_name() res_volume = client.create_volume(name=res_name, size=size, numberOfReplicas=2, fromBackup=b["url"]) res_volume = common.wait_for_volume_detached(client, res_name) res_volume = res_volume.attach(hostId=host_id) res_volume = common.wait_for_volume_healthy(client, res_name) check_volume_data(res_volume, data) snapshots = res_volume.snapshotList() # only the backup snapshot + volume-head assert len(snapshots) == 2 backup_snapshot = "" for snap in snapshots: if snap["name"] != "volume-head": backup_snapshot = snap["name"] assert backup_snapshot != "" res_volume.snapshotCreate() snapshots = res_volume.snapshotList() assert len(snapshots) == 3 res_volume.snapshotDelete(name=backup_snapshot) res_volume.snapshotPurge() snapshots = res_volume.snapshotList() assert len(snapshots) == 2 ha_rebuild_replica_test(client, res_name) res_volume = res_volume.detach() res_volume = common.wait_for_volume_detached(client, res_name) client.delete(res_volume) common.wait_for_volume_delete(client, res_name) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) client.delete(volume) common.wait_for_volume_delete(client, volume_name) volumes = client.list_volume() assert len(volumes) == 0