def test_attach_without_frontend(clients, volume_name): # NOQA for host_id, client in clients.iteritems(): break volume = create_and_check_volume(client, volume_name) lht_hostId = get_self_host_id() volume.attach(hostId=lht_hostId, disableFrontend=False) common.wait_for_volume_healthy(client, volume_name) volume = client.by_id_volume(volume_name) assert volume["disableFrontend"] is False assert volume["frontend"] == "blockdev" snap1_data = write_volume_random_data(volume) snap1 = volume.snapshotCreate() write_volume_random_data(volume) volume.snapshotCreate() volume.detach() volume = common.wait_for_volume_detached(client, volume_name) volume.attach(hostId=lht_hostId, disableFrontend=True) common.wait_for_volume_healthy(client, volume_name) volume = client.by_id_volume(volume_name) engine = get_volume_engine(volume) assert volume["disableFrontend"] is True assert volume["frontend"] == "blockdev" assert engine["endpoint"] == "" volume.snapshotRevert(name=snap1["name"]) volume.detach() volume = common.wait_for_volume_detached(client, volume_name) volume.attach(hostId=lht_hostId, disableFrontend=False) common.wait_for_volume_healthy(client, volume_name) volume = client.by_id_volume(volume_name) assert volume["disableFrontend"] is False assert volume["frontend"] == "blockdev" check_volume_data(volume, snap1_data) client.delete(volume) wait_for_volume_delete(client, volume_name)
def test_volume_multinode(clients, volume_name): # NOQA hosts = clients.keys() volume = get_random_client(clients).create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = common.wait_for_volume_detached(get_random_client(clients), volume_name) for host_id in hosts: volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(get_random_client(clients), volume_name) engine = get_volume_engine(volume) assert engine["hostId"] == host_id volume = volume.detach() volume = common.wait_for_volume_detached(get_random_client(clients), volume_name) get_random_client(clients).delete(volume) wait_for_volume_delete(get_random_client(clients), volume_name) volumes = get_random_client(clients).list_volume() assert len(volumes) == 0
def engine_offline_upgrade_test(client, volume_name, base_image=""): # NOQA default_img = common.get_default_engine_image(client) default_img_name = default_img["name"] default_img = wait_for_engine_image_ref_count(client, default_img_name, 0) cli_v = default_img["cliAPIVersion"] cli_minv = default_img["cliAPIMinVersion"] ctl_v = default_img["controllerAPIVersion"] ctl_minv = default_img["controllerAPIMinVersion"] data_v = default_img["dataFormatVersion"] data_minv = default_img["dataFormatMinVersion"] engine_upgrade_image = common.get_upgrade_test_image( cli_v, cli_minv, ctl_v, ctl_minv, data_v, data_minv) new_img = client.create_engine_image(image=engine_upgrade_image) new_img_name = new_img["name"] new_img = wait_for_engine_image_state(client, new_img_name, "ready") assert new_img["refCount"] == 0 assert new_img["noRefSince"] != "" default_img = common.get_default_engine_image(client) default_img_name = default_img["name"] volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=REPLICA_COUNT, baseImage=base_image) volume = common.wait_for_volume_detached(client, volume_name) default_img = wait_for_engine_image_ref_count(client, default_img_name, 1) original_engine_image = default_img["image"] assert volume["name"] == volume_name assert volume["engineImage"] == original_engine_image assert volume["currentImage"] == original_engine_image assert volume["baseImage"] == base_image # Before our upgrade, write data to the volume first. host_id = get_self_host_id() volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) data = write_volume_random_data(volume) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) volume.engineUpgrade(image=engine_upgrade_image) volume = wait_for_volume_current_image(client, volume_name, engine_upgrade_image) default_img = wait_for_engine_image_ref_count(client, default_img_name, 0) new_img = wait_for_engine_image_ref_count(client, new_img_name, 1) # cannot delete a image in use with pytest.raises(Exception) as e: client.delete(new_img) assert "while being used" in str(e.value) volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) engine = get_volume_engine(volume) assert engine["engineImage"] == engine_upgrade_image assert engine["currentImage"] == engine_upgrade_image for replica in volume["replicas"]: assert replica["engineImage"] == engine_upgrade_image assert replica["currentImage"] == engine_upgrade_image check_volume_data(volume, data) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) volume.engineUpgrade(image=original_engine_image) volume = wait_for_volume_current_image(client, volume_name, original_engine_image) engine = get_volume_engine(volume) assert volume["engineImage"] == original_engine_image assert engine["engineImage"] == original_engine_image for replica in volume["replicas"]: assert replica["engineImage"] == original_engine_image default_img = wait_for_engine_image_ref_count(client, default_img_name, 1) new_img = wait_for_engine_image_ref_count(client, new_img_name, 0) volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) engine = get_volume_engine(volume) assert engine["engineImage"] == original_engine_image assert engine["currentImage"] == original_engine_image for replica in volume["replicas"]: assert replica["engineImage"] == original_engine_image assert replica["currentImage"] == original_engine_image check_volume_data(volume, data) client.delete(volume) wait_for_volume_delete(client, volume_name) client.delete(new_img)
def engine_live_upgrade_rollback_test(client, volume_name, base_image=""): # NOQA default_img = common.get_default_engine_image(client) default_img_name = default_img["name"] default_img = wait_for_engine_image_ref_count(client, default_img_name, 0) cli_v = default_img["cliAPIVersion"] cli_minv = default_img["cliAPIMinVersion"] ctl_v = default_img["controllerAPIVersion"] ctl_minv = default_img["controllerAPIMinVersion"] data_v = default_img["dataFormatVersion"] data_minv = default_img["dataFormatMinVersion"] wrong_engine_upgrade_image = common.get_compatibility_test_image( cli_v, cli_minv, ctl_v, ctl_minv, data_v, data_minv) new_img = client.create_engine_image(image=wrong_engine_upgrade_image) new_img_name = new_img["name"] new_img = wait_for_engine_image_state(client, new_img_name, "ready") assert new_img["refCount"] == 0 assert new_img["noRefSince"] != "" default_img = common.get_default_engine_image(client) default_img_name = default_img["name"] volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2, baseImage=base_image) volume = common.wait_for_volume_detached(client, volume_name) default_img = wait_for_engine_image_ref_count(client, default_img_name, 1) assert volume["baseImage"] == base_image original_engine_image = volume["engineImage"] assert original_engine_image != wrong_engine_upgrade_image host_id = get_self_host_id() volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) data = write_volume_random_data(volume) volume.engineUpgrade(image=wrong_engine_upgrade_image) volume = client.by_id_volume(volume["name"]) assert volume["engineImage"] == wrong_engine_upgrade_image assert volume["currentImage"] == original_engine_image with pytest.raises(Exception): # this will timeout wait_for_volume_current_image(client, volume_name, wrong_engine_upgrade_image) # rollback volume.engineUpgrade(image=original_engine_image) volume = wait_for_volume_current_image(client, volume_name, original_engine_image) assert volume["engineImage"] == original_engine_image assert volume["currentImage"] == original_engine_image engine = get_volume_engine(volume) assert engine["engineImage"] == original_engine_image assert engine["currentImage"] == original_engine_image volume = common.wait_for_volume_replica_count(client, volume_name, REPLICA_COUNT) check_volume_data(volume, data) assert volume["state"] == common.VOLUME_STATE_ATTACHED assert volume["robustness"] == common.VOLUME_ROBUSTNESS_HEALTHY # try again, this time let's try detach volume.engineUpgrade(image=wrong_engine_upgrade_image) volume = client.by_id_volume(volume["name"]) assert volume["engineImage"] == wrong_engine_upgrade_image assert volume["currentImage"] == original_engine_image with pytest.raises(Exception): # this will timeout wait_for_volume_current_image(client, volume_name, wrong_engine_upgrade_image) volume = volume.detach() volume = wait_for_volume_current_image(client, volume_name, wrong_engine_upgrade_image) # all the images would be updated assert volume["engineImage"] == wrong_engine_upgrade_image engine = get_volume_engine(volume) assert engine["engineImage"] == wrong_engine_upgrade_image volume = common.wait_for_volume_replica_count(client, volume_name, REPLICA_COUNT) for replica in volume["replicas"]: assert replica["engineImage"] == wrong_engine_upgrade_image # upgrade to the correct image when offline volume.engineUpgrade(image=original_engine_image) volume = client.by_id_volume(volume["name"]) assert volume["engineImage"] == original_engine_image volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) assert volume["engineImage"] == original_engine_image assert volume["currentImage"] == original_engine_image engine = get_volume_engine(volume) assert engine["engineImage"] == original_engine_image assert engine["currentImage"] == original_engine_image for replica in volume["replicas"]: assert replica["engineImage"] == original_engine_image assert replica["currentImage"] == original_engine_image check_volume_data(volume, data) client.delete(volume) wait_for_volume_delete(client, volume_name) client.delete(new_img)
def engine_live_upgrade_test(client, volume_name, base_image=""): # NOQA default_img = common.get_default_engine_image(client) default_img_name = default_img["name"] default_img = wait_for_engine_image_ref_count(client, default_img_name, 0) cli_v = default_img["cliAPIVersion"] cli_minv = default_img["cliAPIMinVersion"] ctl_v = default_img["controllerAPIVersion"] ctl_minv = default_img["controllerAPIMinVersion"] data_v = default_img["dataFormatVersion"] data_minv = default_img["dataFormatMinVersion"] engine_upgrade_image = common.get_upgrade_test_image( cli_v, cli_minv, ctl_v, ctl_minv, data_v, data_minv) new_img = client.create_engine_image(image=engine_upgrade_image) new_img_name = new_img["name"] new_img = wait_for_engine_image_state(client, new_img_name, "ready") assert new_img["refCount"] == 0 assert new_img["noRefSince"] != "" default_img = common.get_default_engine_image(client) default_img_name = default_img["name"] volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2, baseImage=base_image) volume = common.wait_for_volume_detached(client, volume_name) default_img = wait_for_engine_image_ref_count(client, default_img_name, 1) assert volume["name"] == volume_name assert volume["baseImage"] == base_image original_engine_image = volume["engineImage"] assert original_engine_image != engine_upgrade_image host_id = get_self_host_id() volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) assert volume["engineImage"] == original_engine_image assert volume["currentImage"] == original_engine_image engine = get_volume_engine(volume) assert engine["engineImage"] == original_engine_image assert engine["currentImage"] == original_engine_image for replica in volume["replicas"]: assert replica["engineImage"] == original_engine_image assert replica["currentImage"] == original_engine_image data = write_volume_random_data(volume) volume.engineUpgrade(image=engine_upgrade_image) volume = wait_for_volume_current_image(client, volume_name, engine_upgrade_image) engine = get_volume_engine(volume) assert engine["engineImage"] == engine_upgrade_image default_img = wait_for_engine_image_ref_count(client, default_img_name, 0) new_img = wait_for_engine_image_ref_count(client, new_img_name, 1) count = 0 # old replica may be in deletion process for replica in volume["replicas"]: if replica["currentImage"] == engine_upgrade_image: count += 1 assert count == REPLICA_COUNT check_volume_data(volume, data) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) assert len(volume["replicas"]) == REPLICA_COUNT assert volume["engineImage"] == engine_upgrade_image engine = get_volume_engine(volume) assert engine["engineImage"] == engine_upgrade_image for replica in volume["replicas"]: assert replica["engineImage"] == engine_upgrade_image volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) assert volume["engineImage"] == engine_upgrade_image assert volume["currentImage"] == engine_upgrade_image engine = get_volume_engine(volume) assert engine["engineImage"] == engine_upgrade_image assert engine["currentImage"] == engine_upgrade_image for replica in volume["replicas"]: assert replica["engineImage"] == engine_upgrade_image assert replica["currentImage"] == engine_upgrade_image # Make sure detaching didn't somehow interfere with the data. check_volume_data(volume, data) volume.engineUpgrade(image=original_engine_image) volume = wait_for_volume_current_image(client, volume_name, original_engine_image) engine = get_volume_engine(volume) assert engine["engineImage"] == original_engine_image default_img = wait_for_engine_image_ref_count(client, default_img_name, 1) new_img = wait_for_engine_image_ref_count(client, new_img_name, 0) assert volume["engineImage"] == original_engine_image engine = get_volume_engine(volume) assert engine["engineImage"] == original_engine_image count = 0 # old replica may be in deletion process for replica in volume["replicas"]: if replica["engineImage"] == original_engine_image: count += 1 assert count == REPLICA_COUNT check_volume_data(volume, data) volume = volume.detach() volume = common.wait_for_volume_detached(client, volume_name) assert len(volume["replicas"]) == REPLICA_COUNT assert volume["engineImage"] == original_engine_image engine = get_volume_engine(volume) assert engine["engineImage"] == original_engine_image for replica in volume["replicas"]: assert replica["engineImage"] == original_engine_image client.delete(volume) wait_for_volume_delete(client, volume_name) client.delete(new_img)
def test_csi_offline_expansion(client, core_api, storage_class, pvc, pod_manifest): # NOQA """ Test CSI feature: offline expansion 1. Create a new `storage_class` with `allowVolumeExpansion` set 2. Create PVC and Pod with dynamic provisioned volume from the StorageClass 3. Generate `test_data` and write to the pod 4. Delete the pod 5. Update pvc.spec.resources to expand the volume 6. Verify the volume expansion done using Longhorn API 7. Create a new pod and validate the volume content """ create_storage_class(storage_class) pod_name = 'csi-offline-expand-volume-test' pvc_name = pod_name + "-pvc" pvc['metadata']['name'] = pvc_name pvc['spec']['storageClassName'] = storage_class['metadata']['name'] create_pvc(pvc) pod_manifest['metadata']['name'] = pod_name pod_manifest['spec']['volumes'] = [{ 'name': pod_manifest['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': {'claimName': pvc_name}, }] create_and_wait_pod(core_api, pod_manifest) test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name, test_data) delete_and_wait_pod(core_api, pod_name) pv = wait_and_get_pv_for_pvc(core_api, pvc_name) assert pv.status.phase == "Bound" volume_name = pv.spec.csi.volume_handle wait_for_volume_detached(client, volume_name) pvc['spec']['resources'] = { 'requests': { 'storage': size_to_string(EXPANDED_VOLUME_SIZE*Gi) } } expand_and_wait_for_pvc(core_api, pvc) wait_for_volume_expansion(client, volume_name) volume = client.by_id_volume(volume_name) assert volume.state == "detached" assert volume.size == str(EXPANDED_VOLUME_SIZE*Gi) pod_manifest['metadata']['name'] = pod_name pod_manifest['spec']['volumes'] = [{ 'name': pod_manifest['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': {'claimName': pvc_name}, }] create_and_wait_pod(core_api, pod_manifest) resp = read_volume_data(core_api, pod_name) assert resp == test_data volume = client.by_id_volume(volume_name) engine = get_volume_engine(volume) assert volume.size == str(EXPANDED_VOLUME_SIZE*Gi) assert volume.size == engine.size
def restore_inc_test(client, core_api, volume_name, pod): # NOQA std_volume = create_and_check_volume(client, volume_name, 2, SIZE) lht_host_id = get_self_host_id() std_volume.attach(hostId=lht_host_id) std_volume = common.wait_for_volume_healthy(client, volume_name) with pytest.raises(Exception) as e: std_volume.activate(frontend="blockdev") assert "already in active mode" in str(e.value) data0 = {'len': 4 * 1024, 'pos': 0} data0['content'] = common.generate_random_data(data0['len']) bv, backup0, _, data0 = create_backup(client, volume_name, data0) sb_volume0_name = "sb-0-" + volume_name sb_volume1_name = "sb-1-" + volume_name sb_volume2_name = "sb-2-" + volume_name client.create_volume(name=sb_volume0_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0['url'], frontend="", standby=True) client.create_volume(name=sb_volume1_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0['url'], frontend="", standby=True) client.create_volume(name=sb_volume2_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0['url'], frontend="", standby=True) common.wait_for_volume_restoration_completed(client, sb_volume0_name) common.wait_for_volume_restoration_completed(client, sb_volume1_name) common.wait_for_volume_restoration_completed(client, sb_volume2_name) sb_volume0 = common.wait_for_volume_healthy(client, sb_volume0_name) sb_volume1 = common.wait_for_volume_healthy(client, sb_volume1_name) sb_volume2 = common.wait_for_volume_healthy(client, sb_volume2_name) for i in range(RETRY_COUNTS): sb_volume0 = client.by_id_volume(sb_volume0_name) sb_volume1 = client.by_id_volume(sb_volume1_name) sb_volume2 = client.by_id_volume(sb_volume2_name) sb_engine0 = get_volume_engine(sb_volume0) sb_engine1 = get_volume_engine(sb_volume1) sb_engine2 = get_volume_engine(sb_volume2) if sb_volume0["lastBackup"] != backup0["name"] or \ sb_volume1["lastBackup"] != backup0["name"] or \ sb_volume2["lastBackup"] != backup0["name"] or \ sb_engine0["lastRestoredBackup"] != backup0["name"] or \ sb_engine1["lastRestoredBackup"] != backup0["name"] or \ sb_engine2["lastRestoredBackup"] != backup0["name"]: time.sleep(RETRY_INTERVAL) else: break assert sb_volume0["standby"] is True assert sb_volume0["lastBackup"] == backup0["name"] assert sb_volume0["frontend"] == "" assert sb_volume0["disableFrontend"] is True assert sb_volume0["initialRestorationRequired"] is False sb_engine0 = get_volume_engine(sb_volume0) assert sb_engine0["lastRestoredBackup"] == backup0["name"] assert sb_engine0["requestedBackupRestore"] == backup0["name"] assert sb_volume1["standby"] is True assert sb_volume1["lastBackup"] == backup0["name"] assert sb_volume1["frontend"] == "" assert sb_volume1["disableFrontend"] is True assert sb_volume1["initialRestorationRequired"] is False sb_engine1 = get_volume_engine(sb_volume1) assert sb_engine1["lastRestoredBackup"] == backup0["name"] assert sb_engine1["requestedBackupRestore"] == backup0["name"] assert sb_volume2["standby"] is True assert sb_volume2["lastBackup"] == backup0["name"] assert sb_volume2["frontend"] == "" assert sb_volume2["disableFrontend"] is True assert sb_volume2["initialRestorationRequired"] is False sb_engine2 = get_volume_engine(sb_volume2) assert sb_engine2["lastRestoredBackup"] == backup0["name"] assert sb_engine2["requestedBackupRestore"] == backup0["name"] sb0_snaps = sb_volume0.snapshotList() assert len(sb0_snaps) == 2 for s in sb0_snaps: if s['name'] != "volume-head": sb0_snap = s assert sb0_snaps with pytest.raises(Exception) as e: sb_volume0.snapshotCreate() assert "cannot create snapshot for standby volume" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.snapshotRevert(name=sb0_snap["name"]) assert "cannot revert snapshot for standby volume" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.snapshotDelete(name=sb0_snap["name"]) assert "cannot delete snapshot for standby volume" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.snapshotBackup(name=sb0_snap["name"]) assert "cannot create backup for standby volume" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.pvCreate(pvName=sb_volume0_name) assert "cannot create PV for standby volume" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.pvcCreate(pvcName=sb_volume0_name) assert "cannot create PVC for standby volume" in str(e.value) setting = client.by_id_setting(common.SETTING_BACKUP_TARGET) with pytest.raises(Exception) as e: client.update(setting, value="random.backup.target") assert "cannot modify BackupTarget " \ "since there are existing standby volumes" in str(e.value) with pytest.raises(Exception) as e: sb_volume0.activate(frontend="wrong_frontend") assert "invalid frontend" in str(e.value) activate_standby_volume(client, sb_volume0_name) sb_volume0 = client.by_id_volume(sb_volume0_name) sb_volume0.attach(hostId=lht_host_id) sb_volume0 = common.wait_for_volume_healthy(client, sb_volume0_name) check_volume_data(sb_volume0, data0, False) zero_string = b'\x00'.decode('utf-8') _, backup1, _, data1 = create_backup(client, volume_name, { 'len': 2 * 1024, 'pos': 0, 'content': zero_string * 2 * 1024 }) # use this api to update field `last backup` client.list_backupVolume() check_volume_last_backup(client, sb_volume1_name, backup1['name']) activate_standby_volume(client, sb_volume1_name) sb_volume1 = client.by_id_volume(sb_volume1_name) sb_volume1.attach(hostId=lht_host_id) sb_volume1 = common.wait_for_volume_healthy(client, sb_volume1_name) data0_modified = { 'len': data0['len'] - data1['len'], 'pos': data1['len'], 'content': data0['content'][data1['len']:], } check_volume_data(sb_volume1, data0_modified, False) check_volume_data(sb_volume1, data1) data2 = {'len': 1 * 1024 * 1024, 'pos': 0} data2['content'] = common.generate_random_data(data2['len']) _, backup2, _, data2 = create_backup(client, volume_name, data2) client.list_backupVolume() check_volume_last_backup(client, sb_volume2_name, backup2['name']) activate_standby_volume(client, sb_volume2_name) sb_volume2 = client.by_id_volume(sb_volume2_name) sb_volume2.attach(hostId=lht_host_id) sb_volume2 = common.wait_for_volume_healthy(client, sb_volume2_name) check_volume_data(sb_volume2, data2) # allocated this active volume to a pod sb_volume2.detach() sb_volume2 = common.wait_for_volume_detached(client, sb_volume2_name) create_pv_for_volume(client, core_api, sb_volume2, sb_volume2_name) create_pvc_for_volume(client, core_api, sb_volume2, sb_volume2_name) sb_volume2_pod_name = "pod-" + sb_volume2_name pod['metadata']['name'] = sb_volume2_pod_name pod['spec']['volumes'] = [{ 'name': pod['spec']['containers'][0]['volumeMounts'][0]['name'], 'persistentVolumeClaim': { 'claimName': sb_volume2_name, }, }] create_and_wait_pod(core_api, pod) sb_volume2 = client.by_id_volume(sb_volume2_name) k_status = sb_volume2["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert k_status['pvName'] == sb_volume2_name assert k_status['pvStatus'] == 'Bound' assert len(workloads) == 1 for i in range(RETRY_COUNTS): if workloads[0]['podStatus'] == 'Running': break time.sleep(RETRY_INTERVAL) sb_volume2 = client.by_id_volume(sb_volume2_name) k_status = sb_volume2["kubernetesStatus"] workloads = k_status['workloadsStatus'] assert len(workloads) == 1 assert workloads[0]['podName'] == sb_volume2_pod_name assert workloads[0]['podStatus'] == 'Running' assert not workloads[0]['workloadName'] assert not workloads[0]['workloadType'] assert k_status['namespace'] == 'default' assert k_status['pvcName'] == sb_volume2_name assert not k_status['lastPVCRefAt'] assert not k_status['lastPodRefAt'] delete_and_wait_pod(core_api, sb_volume2_pod_name) delete_and_wait_pvc(core_api, sb_volume2_name) delete_and_wait_pv(core_api, sb_volume2_name) # cleanup std_volume.detach() sb_volume0.detach() sb_volume1.detach() std_volume = common.wait_for_volume_detached(client, volume_name) sb_volume0 = common.wait_for_volume_detached(client, sb_volume0_name) sb_volume1 = common.wait_for_volume_detached(client, sb_volume1_name) sb_volume2 = common.wait_for_volume_detached(client, sb_volume2_name) bv.backupDelete(name=backup2["name"]) bv.backupDelete(name=backup1["name"]) bv.backupDelete(name=backup0["name"]) client.delete(std_volume) client.delete(sb_volume0) client.delete(sb_volume1) client.delete(sb_volume2) wait_for_volume_delete(client, volume_name) wait_for_volume_delete(client, sb_volume0_name) wait_for_volume_delete(client, sb_volume1_name) wait_for_volume_delete(client, sb_volume2_name) volumes = client.list_volume() assert len(volumes) == 0
def snapshot_test(clients, volume_name, base_image): # NOQA for host_id, client in clients.iteritems(): break volume = create_and_check_volume(client, volume_name, base_image=base_image) lht_hostId = get_self_host_id() volume = volume.attach(hostId=lht_hostId) volume = common.wait_for_volume_healthy(client, volume_name) volume = client.by_id_volume(volume_name) positions = {} snap1 = volume.snapshotCreate() snap2_data = write_volume_random_data(volume, positions) snap2 = volume.snapshotCreate() snap3_data = write_volume_random_data(volume, positions) snap3 = volume.snapshotCreate() snapshots = volume.snapshotList() snapMap = {} for snap in snapshots: snapMap[snap["name"]] = snap assert snapMap[snap1["name"]]["name"] == snap1["name"] assert snapMap[snap1["name"]]["removed"] is False assert snapMap[snap2["name"]]["name"] == snap2["name"] assert snapMap[snap2["name"]]["parent"] == snap1["name"] assert snapMap[snap2["name"]]["removed"] is False assert snapMap[snap3["name"]]["name"] == snap3["name"] assert snapMap[snap3["name"]]["parent"] == snap2["name"] assert snapMap[snap3["name"]]["removed"] is False volume.snapshotDelete(name=snap3["name"]) check_volume_data(volume, snap3_data) snapshots = volume.snapshotList(volume=volume_name) snapMap = {} for snap in snapshots: snapMap[snap["name"]] = snap assert snapMap[snap1["name"]]["name"] == snap1["name"] assert snapMap[snap1["name"]]["removed"] is False assert snapMap[snap2["name"]]["name"] == snap2["name"] assert snapMap[snap2["name"]]["parent"] == snap1["name"] assert snapMap[snap2["name"]]["removed"] is False assert snapMap[snap3["name"]]["name"] == snap3["name"] assert snapMap[snap3["name"]]["parent"] == snap2["name"] assert len(snapMap[snap3["name"]]["children"]) == 1 assert "volume-head" in snapMap[snap3["name"]]["children"] assert snapMap[snap3["name"]]["removed"] is True snap = volume.snapshotGet(name=snap3["name"]) assert snap["name"] == snap3["name"] assert snap["parent"] == snap3["parent"] assert len(snap3["children"]) == 1 assert len(snap["children"]) == 1 assert "volume-head" in snap3["children"] assert "volume-head" in snap["children"] assert snap["removed"] is True volume.detach() volume = common.wait_for_volume_detached(client, volume_name) volume.attach(hostId=lht_hostId, disableFrontend=True) common.wait_for_volume_healthy(client, volume_name) volume = client.by_id_volume(volume_name) engine = get_volume_engine(volume) assert volume["disableFrontend"] is True assert volume["frontend"] == "blockdev" assert engine["endpoint"] == "" volume.snapshotRevert(name=snap2["name"]) volume.detach() volume = common.wait_for_volume_detached(client, volume_name) volume.attach(hostId=lht_hostId, disableFrontend=False) common.wait_for_volume_healthy(client, volume_name) volume = client.by_id_volume(volume_name) assert volume["disableFrontend"] is False assert volume["frontend"] == "blockdev" check_volume_data(volume, snap2_data) snapshots = volume.snapshotList(volume=volume_name) snapMap = {} for snap in snapshots: snapMap[snap["name"]] = snap assert snapMap[snap1["name"]]["name"] == snap1["name"] assert snapMap[snap1["name"]]["removed"] is False assert snapMap[snap2["name"]]["name"] == snap2["name"] assert snapMap[snap2["name"]]["parent"] == snap1["name"] assert "volume-head" in snapMap[snap2["name"]]["children"] assert snap3["name"] in snapMap[snap2["name"]]["children"] assert snapMap[snap2["name"]]["removed"] is False assert snapMap[snap3["name"]]["name"] == snap3["name"] assert snapMap[snap3["name"]]["parent"] == snap2["name"] assert len(snapMap[snap3["name"]]["children"]) == 0 assert snapMap[snap3["name"]]["removed"] is True volume.snapshotDelete(name=snap1["name"]) volume.snapshotDelete(name=snap2["name"]) volume.snapshotPurge() wait_for_snapshot_purge(volume, snap1["name"], snap3["name"]) snapshots = volume.snapshotList(volume=volume_name) snapMap = {} for snap in snapshots: snapMap[snap["name"]] = snap assert snap1["name"] not in snapMap assert snap3["name"] not in snapMap # it's the parent of volume-head, so it cannot be purged at this time assert snapMap[snap2["name"]]["name"] == snap2["name"] assert snapMap[snap2["name"]]["parent"] == "" assert "volume-head" in snapMap[snap2["name"]]["children"] assert snapMap[snap2["name"]]["removed"] is True check_volume_data(volume, snap2_data) cleanup_volume(client, volume)
def test_engine_live_upgrade_with_intensive_data_writing( client, core_api, volume_name, pod_make): # NOQA """ Test engine live upgrade with intensive data writing 1. Deploy a compatible new engine image 2. Create a volume(with the old default engine image) with /PV/PVC/Pod and wait for pod to be deployed. 3. Write data to a tmp file in the pod and get the md5sum 4. Upgrade the volume to the new engine image without waiting. 5. Keep copying data from the tmp file to the volume during the live upgrade. 6. Wait until the upgrade completed, verify the volume engine image changed 7. Wait for new replica mode update then check the engine status. 8. Verify all engine and replicas' engine image changed 9. Verify the reference count of the new engine image changed 10. Check the existing data. Then write new data to the upgraded volume and get the md5sum. 11. Delete the pod and wait for the volume detached. Then check engine and replicas's engine image again. 12. Recreate the pod. 13. Check if the attached volume is state `healthy` rather than `degraded`. 14. Check the data. """ default_img = common.get_default_engine_image(client) default_img_name = default_img.name default_img = wait_for_engine_image_ref_count(client, default_img_name, 0) cli_v = default_img.cliAPIVersion cli_minv = default_img.cliAPIMinVersion ctl_v = default_img.controllerAPIVersion ctl_minv = default_img.controllerAPIMinVersion data_v = default_img.dataFormatVersion data_minv = default_img.dataFormatMinVersion engine_upgrade_image = common.get_upgrade_test_image( cli_v, cli_minv, ctl_v, ctl_minv, data_v, data_minv) new_img = client.create_engine_image(image=engine_upgrade_image) new_img_name = new_img.name ei_status_value = get_engine_image_status_value(client, new_img_name) new_img = wait_for_engine_image_state(client, new_img_name, ei_status_value) assert new_img.refCount == 0 assert new_img.noRefSince != "" default_img = common.get_default_engine_image(client) default_img_name = default_img.name pod_name = volume_name + "-pod" pv_name = volume_name + "-pv" pvc_name = volume_name + "-pvc" pod = pod_make(name=pod_name) volume = create_and_check_volume(client, volume_name, num_of_replicas=3, size=str(1 * Gi)) original_engine_image = volume.engineImage assert original_engine_image != engine_upgrade_image create_pv_for_volume(client, core_api, volume, pv_name) create_pvc_for_volume(client, core_api, volume, pvc_name) pod['spec']['volumes'] = [create_pvc_spec(pvc_name)] create_and_wait_pod(core_api, pod) volume = client.by_id_volume(volume_name) assert volume.engineImage == original_engine_image assert volume.currentImage == original_engine_image engine = get_volume_engine(volume) assert engine.engineImage == original_engine_image assert engine.currentImage == original_engine_image for replica in volume.replicas: assert replica.engineImage == original_engine_image assert replica.currentImage == original_engine_image data_path0 = "/tmp/test" data_path1 = "/data/test1" write_pod_volume_random_data(core_api, pod_name, data_path0, RANDOM_DATA_SIZE_LARGE) original_md5sum1 = get_pod_data_md5sum(core_api, pod_name, data_path0) volume.engineUpgrade(image=engine_upgrade_image) # Keep writing data to the volume during the live upgrade copy_pod_volume_data(core_api, pod_name, data_path0, data_path1) # Wait for live upgrade complete wait_for_volume_current_image(client, volume_name, engine_upgrade_image) volume = wait_for_volume_replicas_mode(client, volume_name, "RW") engine = get_volume_engine(volume) assert engine.engineImage == engine_upgrade_image check_volume_endpoint(volume) wait_for_engine_image_ref_count(client, default_img_name, 0) wait_for_engine_image_ref_count(client, new_img_name, 1) volume_file_md5sum1 = get_pod_data_md5sum(core_api, pod_name, data_path1) assert volume_file_md5sum1 == original_md5sum1 data_path2 = "/data/test2" write_pod_volume_random_data(core_api, pod_name, data_path2, RANDOM_DATA_SIZE_SMALL) original_md5sum2 = get_pod_data_md5sum(core_api, pod_name, data_path2) delete_and_wait_pod(core_api, pod_name) volume = wait_for_volume_detached(client, volume_name) assert len(volume.replicas) == 3 assert volume.engineImage == engine_upgrade_image engine = get_volume_engine(volume) assert engine.engineImage == engine_upgrade_image for replica in volume.replicas: assert replica.engineImage == engine_upgrade_image create_and_wait_pod(core_api, pod) common.wait_for_volume_healthy(client, volume_name) volume_file_md5sum1 = get_pod_data_md5sum(core_api, pod_name, data_path1) assert volume_file_md5sum1 == original_md5sum1 volume_file_md5sum2 = get_pod_data_md5sum(core_api, pod_name, data_path2) assert volume_file_md5sum2 == original_md5sum2
def engine_live_upgrade_rollback_test(client, core_api, volume_name, backing_image=""): # NOQA default_img = common.get_default_engine_image(client) default_img_name = default_img.name default_img = wait_for_engine_image_ref_count(client, default_img_name, 0) cli_v = default_img.cliAPIVersion cli_minv = default_img.cliAPIMinVersion ctl_v = default_img.controllerAPIVersion ctl_minv = default_img.controllerAPIMinVersion data_v = default_img.dataFormatVersion data_minv = default_img.dataFormatMinVersion wrong_engine_upgrade_image = common.get_compatibility_test_image( cli_v, cli_minv, ctl_v, ctl_minv, data_v, data_minv) new_img = client.create_engine_image(image=wrong_engine_upgrade_image) new_img_name = new_img.name ei_status_value = get_engine_image_status_value(client, new_img_name) new_img = wait_for_engine_image_state(client, new_img_name, ei_status_value) assert new_img.refCount == 0 assert new_img.noRefSince != "" default_img = common.get_default_engine_image(client) default_img_name = default_img.name client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2, backingImage=backing_image) volume = common.wait_for_volume_detached(client, volume_name) wait_for_engine_image_ref_count(client, default_img_name, 1) assert volume.backingImage == backing_image original_engine_image = volume.engineImage assert original_engine_image != wrong_engine_upgrade_image host_id = get_self_host_id() volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) data = write_volume_random_data(volume) volume.engineUpgrade(image=wrong_engine_upgrade_image) volume = client.by_id_volume(volume.name) assert volume.engineImage == wrong_engine_upgrade_image assert volume.currentImage == original_engine_image with pytest.raises(Exception): # this will timeout wait_for_volume_current_image(client, volume_name, wrong_engine_upgrade_image) # rollback volume.engineUpgrade(image=original_engine_image) wait_for_volume_current_image(client, volume_name, original_engine_image) volume = wait_for_volume_replicas_mode(client, volume_name, "RW") assert volume.engineImage == original_engine_image assert volume.currentImage == original_engine_image engine = get_volume_engine(volume) assert engine.engineImage == original_engine_image assert engine.currentImage == original_engine_image volume = common.wait_for_volume_replica_count(client, volume_name, REPLICA_COUNT) check_volume_data(volume, data) assert volume.state == common.VOLUME_STATE_ATTACHED assert volume.robustness == common.VOLUME_ROBUSTNESS_HEALTHY # try again, this time let's try detach volume.engineUpgrade(image=wrong_engine_upgrade_image) volume = client.by_id_volume(volume.name) assert volume.engineImage == wrong_engine_upgrade_image assert volume.currentImage == original_engine_image with pytest.raises(Exception): # this will timeout wait_for_volume_current_image(client, volume_name, wrong_engine_upgrade_image) volume.detach(hostId="") volume = wait_for_volume_current_image(client, volume_name, wrong_engine_upgrade_image) # all the images would be updated assert volume.engineImage == wrong_engine_upgrade_image engine = get_volume_engine(volume) assert engine.engineImage == wrong_engine_upgrade_image volume = common.wait_for_volume_replica_count(client, volume_name, REPLICA_COUNT) for replica in volume.replicas: assert replica.engineImage == wrong_engine_upgrade_image # upgrade to the correct image when offline volume.engineUpgrade(image=original_engine_image) volume = wait_for_volume_current_image(client, volume_name, original_engine_image) volume = client.by_id_volume(volume.name) assert volume.engineImage == original_engine_image volume.attach(hostId=host_id) common.wait_for_volume_healthy(client, volume_name) volume = wait_for_volume_replicas_mode(client, volume_name, "RW") assert volume.engineImage == original_engine_image assert volume.currentImage == original_engine_image engine = get_volume_engine(volume) assert engine.engineImage == original_engine_image assert engine.currentImage == original_engine_image check_volume_endpoint(volume) for replica in volume.replicas: assert replica.engineImage == original_engine_image assert replica.currentImage == original_engine_image check_volume_data(volume, data) client.delete(volume) wait_for_volume_delete(client, volume_name) client.delete(new_img) wait_for_engine_image_deletion(client, core_api, new_img.name)
def engine_live_upgrade_test(client, core_api, volume_name, backing_image=""): # NOQA default_img = common.get_default_engine_image(client) default_img_name = default_img.name default_img = wait_for_engine_image_ref_count(client, default_img_name, 0) cli_v = default_img.cliAPIVersion cli_minv = default_img.cliAPIMinVersion ctl_v = default_img.controllerAPIVersion ctl_minv = default_img.controllerAPIMinVersion data_v = default_img.dataFormatVersion data_minv = default_img.dataFormatMinVersion engine_upgrade_image = common.get_upgrade_test_image( cli_v, cli_minv, ctl_v, ctl_minv, data_v, data_minv) new_img = client.create_engine_image(image=engine_upgrade_image) new_img_name = new_img.name ei_status_value = get_engine_image_status_value(client, new_img_name) new_img = wait_for_engine_image_state(client, new_img_name, ei_status_value) assert new_img.refCount == 0 assert new_img.noRefSince != "" default_img = common.get_default_engine_image(client) default_img_name = default_img.name client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2, backingImage=backing_image) volume = common.wait_for_volume_detached(client, volume_name) wait_for_engine_image_ref_count(client, default_img_name, 1) assert volume.name == volume_name assert volume.backingImage == backing_image original_engine_image = volume.engineImage assert original_engine_image != engine_upgrade_image host_id = get_self_host_id() volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) assert volume.engineImage == original_engine_image assert volume.currentImage == original_engine_image engine = get_volume_engine(volume) assert engine.engineImage == original_engine_image assert engine.currentImage == original_engine_image for replica in volume.replicas: assert replica.engineImage == original_engine_image assert replica.currentImage == original_engine_image data = write_volume_random_data(volume) volume.engineUpgrade(image=engine_upgrade_image) wait_for_volume_current_image(client, volume_name, engine_upgrade_image) # Need to wait for Longhorn to get and update the mode for new replicas volume = wait_for_volume_replicas_mode(client, volume_name, "RW") engine = get_volume_engine(volume) assert engine.engineImage == engine_upgrade_image check_volume_endpoint(volume) wait_for_engine_image_ref_count(client, default_img_name, 0) wait_for_engine_image_ref_count(client, new_img_name, 1) count = 0 # old replica may be in deletion process for replica in volume.replicas: if replica.currentImage == engine_upgrade_image: count += 1 assert count == REPLICA_COUNT check_volume_data(volume, data) volume.detach(hostId="") volume = common.wait_for_volume_detached(client, volume_name) assert len(volume.replicas) == REPLICA_COUNT assert volume.engineImage == engine_upgrade_image engine = get_volume_engine(volume) assert engine.engineImage == engine_upgrade_image for replica in volume.replicas: assert replica.engineImage == engine_upgrade_image volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) assert volume.engineImage == engine_upgrade_image assert volume.currentImage == engine_upgrade_image engine = get_volume_engine(volume) assert engine.engineImage == engine_upgrade_image assert engine.currentImage == engine_upgrade_image check_volume_endpoint(volume) for replica in volume.replicas: assert replica.engineImage == engine_upgrade_image assert replica.currentImage == engine_upgrade_image # Make sure detaching didn't somehow interfere with the data. check_volume_data(volume, data) volume.engineUpgrade(image=original_engine_image) wait_for_volume_current_image(client, volume_name, original_engine_image) volume = wait_for_volume_replicas_mode(client, volume_name, "RW") engine = get_volume_engine(volume) assert engine.engineImage == original_engine_image assert engine.currentImage == original_engine_image check_volume_endpoint(volume) wait_for_engine_image_ref_count(client, default_img_name, 1) new_img = wait_for_engine_image_ref_count(client, new_img_name, 0) assert volume.engineImage == original_engine_image count = 0 # old replica may be in deletion process for replica in volume.replicas: if replica.engineImage == original_engine_image: count += 1 assert count == REPLICA_COUNT check_volume_data(volume, data) volume.detach(hostId="") volume = common.wait_for_volume_detached(client, volume_name) assert len(volume.replicas) == REPLICA_COUNT assert volume.engineImage == original_engine_image engine = get_volume_engine(volume) assert engine.engineImage == original_engine_image for replica in volume.replicas: assert replica.engineImage == original_engine_image client.delete(volume) wait_for_volume_delete(client, volume_name) client.delete(new_img) wait_for_engine_image_deletion(client, core_api, new_img.name)