Пример #1
0
def test_empty_backup_volume(clients):  # NOQA
    for host_id, client in clients.iteritems():
        break
    lht_hostId = get_self_host_id()

    volName = generate_volume_name()
    volume = create_and_check_volume(client, volName)

    volume.attach(hostId=lht_hostId)
    volume = common.wait_for_volume_healthy(client, volName)

    bv, b1, snap1, _ = create_backup(client, volName)
    bv.backupDelete(name=b1["name"])
    common.wait_for_backup_delete(b1["name"], bv)

    backup_list = bv.backupList()
    assert len(backup_list) == 0

    # test the empty backup volume can recreate backup
    _, b2, snap2, _ = create_backup(client, volName)

    # test the empty backup volume is still deletable
    bv.backupDelete(name=b2["name"])
    common.wait_for_backup_delete(b1["name"], bv)
    bv = client.by_id_backupVolume(volName)
    client.delete(bv)
    common.wait_for_backup_volume_delete(client, volName)
    cleanup_volume(client, volume)
Пример #2
0
def backup_test(
        dev,
        address,  # NOQA
        volume_name,
        engine_name,
        backup_target):
    offset = 0
    length = 128

    snap1_data = random_string(length)
    verify_data(dev, offset, snap1_data)
    snap1_checksum = checksum_dev(dev)
    snap1 = cmd.snapshot_create(address)

    backup1_info = create_backup(address, snap1, backup_target)
    assert backup1_info["VolumeName"] == volume_name
    assert backup1_info["Size"] == BLOCK_SIZE_STR

    snap2_data = random_string(length)
    verify_data(dev, offset, snap2_data)
    snap2_checksum = checksum_dev(dev)
    snap2 = cmd.snapshot_create(address)

    backup2_info = create_backup(address, snap2, backup_target)
    assert backup2_info["VolumeName"] == volume_name
    assert backup2_info["Size"] == BLOCK_SIZE_STR

    snap3_data = random_string(length)
    verify_data(dev, offset, snap3_data)
    snap3_checksum = checksum_dev(dev)
    snap3 = cmd.snapshot_create(address)

    backup3_info = create_backup(address, snap3, backup_target)
    assert backup3_info["VolumeName"] == volume_name
    assert backup3_info["Size"] == BLOCK_SIZE_STR

    restore_with_frontend(address, engine_name, backup3_info["URL"])

    readed = read_dev(dev, offset, length)
    assert readed == snap3_data
    c = checksum_dev(dev)
    assert c == snap3_checksum

    rm_backups(address, engine_name, [backup3_info["URL"]])

    restore_with_frontend(address, engine_name, backup1_info["URL"])
    readed = read_dev(dev, offset, length)
    assert readed == snap1_data
    c = checksum_dev(dev)
    assert c == snap1_checksum

    rm_backups(address, engine_name, [backup1_info["URL"]])

    restore_with_frontend(address, engine_name, backup2_info["URL"])
    readed = read_dev(dev, offset, length)
    assert readed == snap2_data
    c = checksum_dev(dev)
    assert c == snap2_checksum

    rm_backups(address, engine_name, [backup2_info["URL"]])
Пример #3
0
def backup_hole_with_backing_file_test(
        backup_target,  # NOQA
        grpc_backing_controller,  # NOQA
        grpc_backing_replica1,  # NOQA
        grpc_backing_replica2):  # NOQA
    address = grpc_backing_controller.address

    dev = get_backing_dev(grpc_backing_replica1, grpc_backing_replica2,
                          grpc_backing_controller)

    offset1 = 512
    length1 = 256

    offset2 = 640
    length2 = 256

    boundary_offset = 0
    boundary_length = 4100  # just pass 4096 into next 4k

    hole_offset = 2 * 1024 * 1024
    hole_length = 1024

    snap1_data = random_string(length1)
    verify_data(dev, offset1, snap1_data)
    snap1_checksum = checksum_dev(dev)
    snap1 = cmd.snapshot_create(address)

    boundary_data_backup1 = read_dev(dev, boundary_offset, boundary_length)
    hole_data_backup1 = read_dev(dev, hole_offset, hole_length)
    backup1_info = create_backup(address, snap1, backup_target)

    snap2_data = random_string(length2)
    verify_data(dev, offset2, snap2_data)
    snap2_checksum = checksum_dev(dev)
    snap2 = cmd.snapshot_create(address)

    boundary_data_backup2 = read_dev(dev, boundary_offset, boundary_length)
    hole_data_backup2 = read_dev(dev, hole_offset, hole_length)
    backup2_info = create_backup(address, snap2, backup_target)

    restore_with_frontend(address, ENGINE_BACKING_NAME, backup1_info["URL"])
    readed = read_dev(dev, boundary_offset, boundary_length)
    assert readed == boundary_data_backup1
    readed = read_dev(dev, hole_offset, hole_length)
    assert readed == hole_data_backup1
    c = checksum_dev(dev)
    assert c == snap1_checksum

    restore_with_frontend(address, ENGINE_BACKING_NAME, backup2_info["URL"])
    readed = read_dev(dev, boundary_offset, boundary_length)
    assert readed == boundary_data_backup2
    readed = read_dev(dev, hole_offset, hole_length)
    assert readed == hole_data_backup2
    c = checksum_dev(dev)
    assert c == snap2_checksum
Пример #4
0
def backup_with_backing_file_test(
        backup_target,  # NOQA
        grpc_backing_controller,  # NOQA
        grpc_backing_replica1,  # NOQA
        grpc_backing_replica2):  # NOQA
    address = grpc_backing_controller.address

    dev = get_backing_dev(grpc_backing_replica1, grpc_backing_replica2,
                          grpc_backing_controller)

    offset = 0
    length = 256

    snap0 = cmd.snapshot_create(address)
    before = read_dev(dev, offset, length)
    assert before != ""
    snap0_checksum = checksum_dev(dev)

    exists = read_from_backing_file(offset, length)
    assert before == exists

    backup0_info = create_backup(address, snap0, backup_target)
    assert backup0_info["VolumeName"] == VOLUME_BACKING_NAME

    backup_test(dev, address, VOLUME_BACKING_NAME, ENGINE_BACKING_NAME,
                backup_target)

    restore_with_frontend(address, ENGINE_BACKING_NAME, backup0_info["URL"])
    after = read_dev(dev, offset, length)
    assert before == after
    c = checksum_dev(dev)
    assert c == snap0_checksum

    rm_backups(address, ENGINE_BACKING_NAME, [backup0_info["URL"]])
Пример #5
0
def test_backup_volume_deletion(
        grpc_replica1,
        grpc_replica2,  # NOQA
        grpc_controller,
        backup_targets):  # NOQA
    offset = 0
    length = 128
    address = grpc_controller.address

    for backup_target in backup_targets:
        dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)
        snap_data = random_string(length)
        verify_data(dev, offset, snap_data)
        snap = cmd.snapshot_create(address)

        backup_info = create_backup(address, snap, backup_target)
        assert backup_info["VolumeName"] == VOLUME_NAME
        assert backup_info["Size"] == BLOCK_SIZE_STR
        assert snap in backup_info["SnapshotName"]

        cmd.backup_volume_rm(address, VOLUME_NAME, backup_target)
        info = cmd.backup_volume_list(address, VOLUME_NAME, backup_target)
        assert "cannot find" in info[VOLUME_NAME]["Messages"]["error"]

        cmd.sync_agent_server_reset(address)
        cleanup_controller(grpc_controller)
        cleanup_replica(grpc_replica1)
        cleanup_replica(grpc_replica2)
Пример #6
0
def test_deleting_backup_volume(clients):  # NOQA
    for host_id, client in clients.iteritems():
        break
    lht_hostId = get_self_host_id()

    volName = generate_volume_name()
    volume = create_and_check_volume(client, volName)

    volume.attach(hostId=lht_hostId)
    volume = common.wait_for_volume_healthy(client, volName)

    bv, _, snap1, _ = create_backup(client, volName)
    _, _, snap2, _ = create_backup(client, volName)

    bv = client.by_id_backupVolume(volName)
    client.delete(bv)
    common.wait_for_backup_volume_delete(client, volName)
    cleanup_volume(client, volume)
Пример #7
0
def snapshot_tree_backup_test(
        backup_target,
        engine_name,  # NOQA
        grpc_controller,
        grpc_replica1,
        grpc_replica2):  # NOQA
    address = grpc_controller.address

    dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)
    offset = 0
    length = 128
    backup = {}

    snap, data = snapshot_tree_build(dev, address, engine_name, offset, length)

    backup["0b"] = create_backup(address, snap["0b"], backup_target)["URL"]
    backup["0c"] = create_backup(address, snap["0c"], backup_target)["URL"]
    backup["1c"] = create_backup(address, snap["1c"], backup_target)["URL"]
    backup["2b"] = create_backup(address, snap["2b"], backup_target)["URL"]
    backup["2c"] = create_backup(address, snap["2c"], backup_target)["URL"]
    backup["3c"] = create_backup(address, snap["3c"], backup_target)["URL"]

    snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length,
                                     backup, data, "0b")
    snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length,
                                     backup, data, "0c")
    snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length,
                                     backup, data, "1c")
    snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length,
                                     backup, data, "2b")
    snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length,
                                     backup, data, "2c")
    snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length,
                                     backup, data, "3c")
Пример #8
0
def backup_labels_test(clients,
                       random_labels,
                       volume_name,
                       size=SIZE,
                       base_image=""):  # NOQA
    for _, client in clients.iteritems():
        break
    host_id = get_self_host_id()

    volume = create_and_check_volume(client, volume_name, 2, size, base_image)

    volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, volume_name)

    setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)
    # test backupTarget for multiple settings
    backupstores = common.get_backupstore_url()
    for backupstore in backupstores:
        if common.is_backupTarget_s3(backupstore):
            backupsettings = backupstore.split("$")
            setting = client.update(setting, value=backupsettings[0])
            assert setting["value"] == backupsettings[0]

            credential = client.by_id_setting(
                common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)
            credential = client.update(credential, value=backupsettings[1])
            assert credential["value"] == backupsettings[1]
        else:
            setting = client.update(setting, value=backupstore)
            assert setting["value"] == backupstore
            credential = client.by_id_setting(
                common.SETTING_BACKUP_TARGET_CREDENTIAL_SECRET)
            credential = client.update(credential, value="")
            assert credential["value"] == ""

        bv, b, _, _ = create_backup(client, volume_name, labels=random_labels)
        # If we're running the test with a BaseImage, check that this Label is
        # set properly.
        backup = bv.backupGet(name=b["name"])
        if base_image:
            assert backup["labels"].get(common.BASE_IMAGE_LABEL) == base_image
            # One extra Label from the BaseImage being set.
            assert len(backup["labels"]) == len(random_labels) + 1
        else:
            assert len(backup["labels"]) == len(random_labels)

    cleanup_volume(client, volume)
Пример #9
0
def backupstore_test(client, host_id, volname, size):
    bv, b, snap2, data = create_backup(client, volname)

    # test restore
    restoreName = generate_volume_name()
    volume = client.create_volume(name=restoreName,
                                  size=size,
                                  numberOfReplicas=2,
                                  fromBackup=b["url"])

    volume = common.wait_for_volume_restoration_completed(client, restoreName)
    volume = common.wait_for_volume_detached(client, restoreName)
    assert volume["name"] == restoreName
    assert volume["size"] == size
    assert volume["numberOfReplicas"] == 2
    assert volume["state"] == "detached"
    assert volume["initialRestorationRequired"] is False

    volume = volume.attach(hostId=host_id)
    volume = common.wait_for_volume_healthy(client, restoreName)
    check_volume_data(volume, data)
    volume = volume.detach()
    volume = common.wait_for_volume_detached(client, restoreName)

    bv.backupDelete(name=b["name"])

    backups = bv.backupList()
    found = False
    for b in backups:
        if b["snapshotName"] == snap2["name"]:
            found = True
            break
    assert not found

    volume = wait_for_volume_status(client, volume["name"], "lastBackup", "")
    assert volume["lastBackupAt"] == ""

    client.delete(volume)

    volume = wait_for_volume_delete(client, restoreName)
Пример #10
0
def test_listing_backup_volume(clients, base_image=""):  # NOQA
    for host_id, client in clients.iteritems():
        break
    lht_hostId = get_self_host_id()

    # create 3 volumes.
    volume1_name = generate_volume_name()
    volume2_name = generate_volume_name()
    volume3_name = generate_volume_name()

    volume1 = create_and_check_volume(client, volume1_name)
    volume2 = create_and_check_volume(client, volume2_name)
    volume3 = create_and_check_volume(client, volume3_name)

    volume1.attach(hostId=lht_hostId)
    volume1 = common.wait_for_volume_healthy(client, volume1_name)
    volume2.attach(hostId=lht_hostId)
    volume2 = common.wait_for_volume_healthy(client, volume2_name)
    volume3.attach(hostId=lht_hostId)
    volume3 = common.wait_for_volume_healthy(client, volume3_name)

    # we only test NFS here.
    # Since it is difficult to directly remove volume.cfg from s3 buckets
    setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)
    backupstores = common.get_backupstore_url()
    for backupstore in backupstores:
        if common.is_backupTarget_nfs(backupstore):
            updated = False
            for i in range(RETRY_COMMAND_COUNT):
                nfs_url = backupstore.strip("nfs://")
                setting = client.update(setting, value=backupstore)
                assert setting["value"] == backupstore
                setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)
                if "nfs" in setting["value"]:
                    updated = True
                    break
            assert updated

    _, _, snap1, _ = create_backup(client, volume1_name)
    _, _, snap2, _ = create_backup(client, volume2_name)
    _, _, snap3, _ = create_backup(client, volume3_name)

    # invalidate backup volume 1 by renaming volume.cfg to volume.cfg.tmp
    cmd = ["mkdir", "-p", "/mnt/nfs"]
    subprocess.check_output(cmd)
    cmd = ["mount", "-t", "nfs4", nfs_url, "/mnt/nfs"]
    subprocess.check_output(cmd)
    cmd = ["find", "/mnt/nfs", "-type", "d", "-name", volume1_name]
    volume1_backup_volume_path = subprocess.check_output(cmd).strip()

    cmd = ["find", volume1_backup_volume_path, "-name", "volume.cfg"]
    volume1_backup_volume_cfg_path = subprocess.check_output(cmd).strip()
    cmd = [
        "mv", volume1_backup_volume_cfg_path,
        volume1_backup_volume_cfg_path + ".tmp"
    ]
    subprocess.check_output(cmd)
    subprocess.check_output(["sync"])

    found1 = found2 = found3 = False
    for i in range(RETRY_COUNTS):
        bvs = client.list_backupVolume()
        for bv in bvs:
            if bv["name"] == volume1_name:
                if "error" in bv.messages:
                    assert "volume.cfg" in bv.messages["error"].lower()
                    found1 = True
            elif bv["name"] == volume2_name:
                assert not bv.messages
                found2 = True
            elif bv["name"] == volume3_name:
                assert not bv.messages
                found3 = True
        if found1 & found2 & found3:
            break
        time.sleep(RETRY_INTERVAL)
    assert found1 & found2 & found3

    cmd = [
        "mv", volume1_backup_volume_cfg_path + ".tmp",
        volume1_backup_volume_cfg_path
    ]
    subprocess.check_output(cmd)
    subprocess.check_output(["sync"])

    found = False
    for i in range(RETRY_COMMAND_COUNT):
        try:
            bv1, b1 = common.find_backup(client, volume1_name, snap1["name"])
            found = True
            break
        except Exception:
            time.sleep(1)
    assert found
    bv1.backupDelete(name=b1["name"])
    for i in range(RETRY_COMMAND_COUNT):
        found = False
        backups1 = bv1.backupList()
        for b in backups1:
            if b["snapshotName"] == snap1["name"]:
                found = True
                break
    assert not found

    bv2, b2 = common.find_backup(client, volume2_name, snap2["name"])
    bv2.backupDelete(name=b2["name"])
    for i in range(RETRY_COMMAND_COUNT):
        found = False
        backups2 = bv2.backupList()
        for b in backups2:
            if b["snapshotName"] == snap2["name"]:
                found = True
                break
    assert not found

    bv3, b3 = common.find_backup(client, volume3_name, snap3["name"])
    bv3.backupDelete(name=b3["name"])
    for i in range(RETRY_COMMAND_COUNT):
        found = False
        backups3 = bv3.backupList()
        for b in backups3:
            if b["snapshotName"] == snap3["name"]:
                found = True
                break
    assert not found

    volume1.detach()
    volume1 = common.wait_for_volume_detached(client, volume1_name)
    client.delete(volume1)
    wait_for_volume_delete(client, volume1_name)

    volume2.detach()
    volume2 = common.wait_for_volume_detached(client, volume2_name)
    client.delete(volume2)
    wait_for_volume_delete(client, volume2_name)

    volume3.detach()
    volume3 = common.wait_for_volume_detached(client, volume3_name)
    client.delete(volume3)
    wait_for_volume_delete(client, volume3_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Пример #11
0
def restore_inc_test(client, core_api, volume_name, pod):  # NOQA
    std_volume = create_and_check_volume(client, volume_name, 2, SIZE)
    lht_host_id = get_self_host_id()
    std_volume.attach(hostId=lht_host_id)
    std_volume = common.wait_for_volume_healthy(client, volume_name)

    with pytest.raises(Exception) as e:
        std_volume.activate(frontend="blockdev")
        assert "already in active mode" in str(e.value)

    data0 = {'len': 4 * 1024, 'pos': 0}
    data0['content'] = common.generate_random_data(data0['len'])
    bv, backup0, _, data0 = create_backup(client, volume_name, data0)

    sb_volume0_name = "sb-0-" + volume_name
    sb_volume1_name = "sb-1-" + volume_name
    sb_volume2_name = "sb-2-" + volume_name
    client.create_volume(name=sb_volume0_name,
                         size=SIZE,
                         numberOfReplicas=2,
                         fromBackup=backup0['url'],
                         frontend="",
                         standby=True)
    client.create_volume(name=sb_volume1_name,
                         size=SIZE,
                         numberOfReplicas=2,
                         fromBackup=backup0['url'],
                         frontend="",
                         standby=True)
    client.create_volume(name=sb_volume2_name,
                         size=SIZE,
                         numberOfReplicas=2,
                         fromBackup=backup0['url'],
                         frontend="",
                         standby=True)
    common.wait_for_volume_restoration_completed(client, sb_volume0_name)
    common.wait_for_volume_restoration_completed(client, sb_volume1_name)
    common.wait_for_volume_restoration_completed(client, sb_volume2_name)

    sb_volume0 = common.wait_for_volume_healthy(client, sb_volume0_name)
    sb_volume1 = common.wait_for_volume_healthy(client, sb_volume1_name)
    sb_volume2 = common.wait_for_volume_healthy(client, sb_volume2_name)

    for i in range(RETRY_COUNTS):
        sb_volume0 = client.by_id_volume(sb_volume0_name)
        sb_volume1 = client.by_id_volume(sb_volume1_name)
        sb_volume2 = client.by_id_volume(sb_volume2_name)
        sb_engine0 = get_volume_engine(sb_volume0)
        sb_engine1 = get_volume_engine(sb_volume1)
        sb_engine2 = get_volume_engine(sb_volume2)
        if sb_volume0["lastBackup"] != backup0["name"] or \
                sb_volume1["lastBackup"] != backup0["name"] or \
                sb_volume2["lastBackup"] != backup0["name"] or \
                sb_engine0["lastRestoredBackup"] != backup0["name"] or \
                sb_engine1["lastRestoredBackup"] != backup0["name"] or \
                sb_engine2["lastRestoredBackup"] != backup0["name"]:
            time.sleep(RETRY_INTERVAL)
        else:
            break
    assert sb_volume0["standby"] is True
    assert sb_volume0["lastBackup"] == backup0["name"]
    assert sb_volume0["frontend"] == ""
    assert sb_volume0["disableFrontend"] is True
    assert sb_volume0["initialRestorationRequired"] is False
    sb_engine0 = get_volume_engine(sb_volume0)
    assert sb_engine0["lastRestoredBackup"] == backup0["name"]
    assert sb_engine0["requestedBackupRestore"] == backup0["name"]
    assert sb_volume1["standby"] is True
    assert sb_volume1["lastBackup"] == backup0["name"]
    assert sb_volume1["frontend"] == ""
    assert sb_volume1["disableFrontend"] is True
    assert sb_volume1["initialRestorationRequired"] is False
    sb_engine1 = get_volume_engine(sb_volume1)
    assert sb_engine1["lastRestoredBackup"] == backup0["name"]
    assert sb_engine1["requestedBackupRestore"] == backup0["name"]
    assert sb_volume2["standby"] is True
    assert sb_volume2["lastBackup"] == backup0["name"]
    assert sb_volume2["frontend"] == ""
    assert sb_volume2["disableFrontend"] is True
    assert sb_volume2["initialRestorationRequired"] is False
    sb_engine2 = get_volume_engine(sb_volume2)
    assert sb_engine2["lastRestoredBackup"] == backup0["name"]
    assert sb_engine2["requestedBackupRestore"] == backup0["name"]

    sb0_snaps = sb_volume0.snapshotList()
    assert len(sb0_snaps) == 2
    for s in sb0_snaps:
        if s['name'] != "volume-head":
            sb0_snap = s
    assert sb0_snaps
    with pytest.raises(Exception) as e:
        sb_volume0.snapshotCreate()
        assert "cannot create snapshot for standby volume" in str(e.value)
    with pytest.raises(Exception) as e:
        sb_volume0.snapshotRevert(name=sb0_snap["name"])
        assert "cannot revert snapshot for standby volume" in str(e.value)
    with pytest.raises(Exception) as e:
        sb_volume0.snapshotDelete(name=sb0_snap["name"])
        assert "cannot delete snapshot for standby volume" in str(e.value)
    with pytest.raises(Exception) as e:
        sb_volume0.snapshotBackup(name=sb0_snap["name"])
        assert "cannot create backup for standby volume" in str(e.value)
    with pytest.raises(Exception) as e:
        sb_volume0.pvCreate(pvName=sb_volume0_name)
        assert "cannot create PV for standby volume" in str(e.value)
    with pytest.raises(Exception) as e:
        sb_volume0.pvcCreate(pvcName=sb_volume0_name)
        assert "cannot create PVC for standby volume" in str(e.value)
    setting = client.by_id_setting(common.SETTING_BACKUP_TARGET)
    with pytest.raises(Exception) as e:
        client.update(setting, value="random.backup.target")
        assert "cannot modify BackupTarget " \
               "since there are existing standby volumes" in str(e.value)
    with pytest.raises(Exception) as e:
        sb_volume0.activate(frontend="wrong_frontend")
        assert "invalid frontend" in str(e.value)

    activate_standby_volume(client, sb_volume0_name)
    sb_volume0 = client.by_id_volume(sb_volume0_name)
    sb_volume0.attach(hostId=lht_host_id)
    sb_volume0 = common.wait_for_volume_healthy(client, sb_volume0_name)
    check_volume_data(sb_volume0, data0, False)

    zero_string = b'\x00'.decode('utf-8')
    _, backup1, _, data1 = create_backup(client, volume_name, {
        'len': 2 * 1024,
        'pos': 0,
        'content': zero_string * 2 * 1024
    })
    # use this api to update field `last backup`
    client.list_backupVolume()
    check_volume_last_backup(client, sb_volume1_name, backup1['name'])
    activate_standby_volume(client, sb_volume1_name)
    sb_volume1 = client.by_id_volume(sb_volume1_name)
    sb_volume1.attach(hostId=lht_host_id)
    sb_volume1 = common.wait_for_volume_healthy(client, sb_volume1_name)
    data0_modified = {
        'len': data0['len'] - data1['len'],
        'pos': data1['len'],
        'content': data0['content'][data1['len']:],
    }
    check_volume_data(sb_volume1, data0_modified, False)
    check_volume_data(sb_volume1, data1)

    data2 = {'len': 1 * 1024 * 1024, 'pos': 0}
    data2['content'] = common.generate_random_data(data2['len'])
    _, backup2, _, data2 = create_backup(client, volume_name, data2)
    client.list_backupVolume()
    check_volume_last_backup(client, sb_volume2_name, backup2['name'])
    activate_standby_volume(client, sb_volume2_name)
    sb_volume2 = client.by_id_volume(sb_volume2_name)
    sb_volume2.attach(hostId=lht_host_id)
    sb_volume2 = common.wait_for_volume_healthy(client, sb_volume2_name)
    check_volume_data(sb_volume2, data2)

    # allocated this active volume to a pod
    sb_volume2.detach()
    sb_volume2 = common.wait_for_volume_detached(client, sb_volume2_name)

    create_pv_for_volume(client, core_api, sb_volume2, sb_volume2_name)
    create_pvc_for_volume(client, core_api, sb_volume2, sb_volume2_name)

    sb_volume2_pod_name = "pod-" + sb_volume2_name
    pod['metadata']['name'] = sb_volume2_pod_name
    pod['spec']['volumes'] = [{
        'name':
        pod['spec']['containers'][0]['volumeMounts'][0]['name'],
        'persistentVolumeClaim': {
            'claimName': sb_volume2_name,
        },
    }]
    create_and_wait_pod(core_api, pod)

    sb_volume2 = client.by_id_volume(sb_volume2_name)
    k_status = sb_volume2["kubernetesStatus"]
    workloads = k_status['workloadsStatus']
    assert k_status['pvName'] == sb_volume2_name
    assert k_status['pvStatus'] == 'Bound'
    assert len(workloads) == 1
    for i in range(RETRY_COUNTS):
        if workloads[0]['podStatus'] == 'Running':
            break
        time.sleep(RETRY_INTERVAL)
        sb_volume2 = client.by_id_volume(sb_volume2_name)
        k_status = sb_volume2["kubernetesStatus"]
        workloads = k_status['workloadsStatus']
        assert len(workloads) == 1
    assert workloads[0]['podName'] == sb_volume2_pod_name
    assert workloads[0]['podStatus'] == 'Running'
    assert not workloads[0]['workloadName']
    assert not workloads[0]['workloadType']
    assert k_status['namespace'] == 'default'
    assert k_status['pvcName'] == sb_volume2_name
    assert not k_status['lastPVCRefAt']
    assert not k_status['lastPodRefAt']

    delete_and_wait_pod(core_api, sb_volume2_pod_name)
    delete_and_wait_pvc(core_api, sb_volume2_name)
    delete_and_wait_pv(core_api, sb_volume2_name)

    # cleanup
    std_volume.detach()
    sb_volume0.detach()
    sb_volume1.detach()
    std_volume = common.wait_for_volume_detached(client, volume_name)
    sb_volume0 = common.wait_for_volume_detached(client, sb_volume0_name)
    sb_volume1 = common.wait_for_volume_detached(client, sb_volume1_name)
    sb_volume2 = common.wait_for_volume_detached(client, sb_volume2_name)

    bv.backupDelete(name=backup2["name"])
    bv.backupDelete(name=backup1["name"])
    bv.backupDelete(name=backup0["name"])

    client.delete(std_volume)
    client.delete(sb_volume0)
    client.delete(sb_volume1)
    client.delete(sb_volume2)

    wait_for_volume_delete(client, volume_name)
    wait_for_volume_delete(client, sb_volume0_name)
    wait_for_volume_delete(client, sb_volume1_name)
    wait_for_volume_delete(client, sb_volume2_name)

    volumes = client.list_volume()
    assert len(volumes) == 0
Пример #12
0
def restore_to_file_with_backing_file_test(
        backup_target,  # NOQA
        grpc_backing_controller,  # NOQA
        grpc_backing_replica1,  # NOQA
        grpc_backing_replica2):  # NOQA
    address = grpc_backing_controller.address

    backing_dev = get_backing_dev(grpc_backing_replica1, grpc_backing_replica2,
                                  grpc_backing_controller)

    length0 = 4 * 1024
    length1 = 256
    length2 = 128
    offset0 = 0
    offset1 = length1 + offset0
    offset2 = length2 + offset0

    output_raw_path = file(OUTPUT_FILE_RAW)
    output_qcow2_path = file(OUTPUT_FILE_QCOW2)

    # create 1 empty snapshot.
    # data in output image == data in backing
    check_backing()
    check_empty_volume(backing_dev)
    snap0 = cmd.snapshot_create(address)
    backup = create_backup(address, snap0, backup_target)["URL"]

    volume_data = read_dev(backing_dev, offset0, length0)
    backing_data = read_from_backing_file(offset0, length0)
    dev_checksum = checksum_dev(backing_dev)
    assert volume_data != ""
    assert volume_data == backing_data

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_raw_path, IMAGE_FORMAT_RAW)
    output0_raw = read_file(output_raw_path, offset0, length0)
    output0_checksum = checksum_data(read_file(output_raw_path, 0, SIZE))
    assert output0_raw == backing_data
    assert output0_checksum == dev_checksum
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_qcow2_path, IMAGE_FORMAT_QCOW2)
    output0_qcow2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length0)
    output0_checksum = checksum_data(
        read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE))
    assert output0_qcow2 == backing_data
    assert output0_qcow2 == volume_data
    assert output0_checksum == dev_checksum
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    rm_backups(address, ENGINE_BACKING_NAME, [backup])

    # create 1 snapshot with 256B data.
    # output = snap1(offset0, length1) + backing(offset1, ...)
    snap1_data = random_string(length1)
    verify_data(backing_dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create(address)
    backup = create_backup(address, snap1, backup_target)["URL"]

    volume_data = read_dev(backing_dev, offset0, length0)
    backing_data = read_from_backing_file(offset1, length0 - offset1)
    dev_checksum = checksum_dev(backing_dev)

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_raw_path, IMAGE_FORMAT_RAW)
    output1_raw_snap1 = read_file(output_raw_path, offset0, length1)
    output1_raw_backing = read_file(output_raw_path, offset1,
                                    length0 - offset1)
    output1_checksum = checksum_data(read_file(output_raw_path, 0, SIZE))
    assert output1_raw_snap1 == snap1_data
    assert output1_raw_backing == backing_data
    assert output1_raw_snap1 + output1_raw_backing == volume_data
    assert output1_checksum == dev_checksum
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_qcow2_path, IMAGE_FORMAT_QCOW2)
    output1_qcow2_snap1 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length1)
    output1_qcow2_backing = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset1, length0 - offset1)
    output1_checksum = checksum_data(
        read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE))
    assert output1_qcow2_snap1 == snap1_data
    assert output1_qcow2_backing == backing_data
    assert output1_qcow2_snap1 + output1_qcow2_backing == volume_data
    assert output1_checksum == dev_checksum
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    snapshot_revert_with_frontend(address, ENGINE_BACKING_NAME, snap0)
    rm_snaps(address, [snap1])
    rm_backups(address, ENGINE_BACKING_NAME, [backup])
    check_backing()
    check_empty_volume(backing_dev)

    # create 2 snapshots with 256B data and 128B data
    # output = snap2(offset0, length1 - length2) +
    #          snap1(offset2, length2) + backing(offset2, ...)
    snap1_data = random_string(length1)
    verify_data(backing_dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create(address)
    snap2_data = random_string(length2)
    verify_data(backing_dev, offset0, snap2_data)
    snap2 = cmd.snapshot_create(address)
    backup = create_backup(address, snap2, backup_target)["URL"]

    volume_data = read_dev(backing_dev, offset0, length0)
    backing_data = read_from_backing_file(offset1, length0 - offset1)
    dev_checksum = checksum_dev(backing_dev)

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_raw_path, IMAGE_FORMAT_RAW)
    output2_raw_snap2 = read_file(output_raw_path, offset0, length2)
    output2_raw_snap1 = read_file(output_raw_path, offset2, length1 - length2)
    output2_raw_backing = read_file(output_raw_path, offset1,
                                    length0 - offset1)
    output2_checksum = checksum_data(read_file(output_raw_path, 0, SIZE))
    assert output2_raw_snap2 == snap2_data
    assert output2_raw_snap1 == snap1_data[offset2:length1]
    assert output2_raw_backing == backing_data
    assert \
        volume_data == \
        output2_raw_snap2 + output2_raw_snap1 + output2_raw_backing
    assert output2_checksum == dev_checksum
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_qcow2_path, IMAGE_FORMAT_QCOW2)
    output2_qcow2_snap2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length2)
    output2_qcow2_snap1 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset2, length1 - length2)
    output2_qcow2_backing = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset1, length0 - offset1)
    output2_checksum = checksum_data(
        read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE))
    assert output2_qcow2_snap2 == snap2_data
    assert output2_qcow2_snap1 == snap1_data[offset2:length1]
    assert output2_qcow2_backing == backing_data
    assert \
        volume_data == \
        output2_qcow2_snap2 + output2_qcow2_snap1 + output1_qcow2_backing
    assert output2_checksum == dev_checksum
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    snapshot_revert_with_frontend(address, ENGINE_BACKING_NAME, snap0)
    rm_snaps(address, [snap1, snap2])
    rm_backups(address, ENGINE_BACKING_NAME, [backup])
    check_backing()
    check_empty_volume(backing_dev)
Пример #13
0
def restore_to_file_without_backing_file_test(
        backup_target,  # NOQA
        grpc_controller,  # NOQA
        grpc_replica1,  # NOQA
        grpc_replica2):  # NOQA
    address = grpc_controller.address

    dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)

    length0 = 256
    length1 = 128
    offset0 = 0
    offset1 = length1 + offset0

    output_raw_path = file(OUTPUT_FILE_RAW)
    output_qcow2_path = file(OUTPUT_FILE_QCOW2)

    # create 1 empty snapshot for converting to init state.
    snap0 = cmd.snapshot_create(address)

    # create 1 snapshot with 256B data.
    # output = snap2(offset0, length1)
    snap1_data = random_string(length0)
    verify_data(dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create(address)
    backup = create_backup(address, snap1, backup_target)["URL"]

    cmd.restore_to_file(address, backup, "", output_raw_path, IMAGE_FORMAT_RAW)
    output1_raw = read_file(output_raw_path, offset0, length0)
    assert output1_raw == snap1_data
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(address, backup, "", output_qcow2_path,
                        IMAGE_FORMAT_QCOW2)
    output1_qcow2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length0)
    assert output1_qcow2 == snap1_data
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    snapshot_revert_with_frontend(address, ENGINE_NAME, snap0)
    rm_snaps(address, [snap1])
    rm_backups(address, ENGINE_NAME, [backup])

    # create 2 snapshots with 256B data and 128B data
    # output = snap2(offset0, length1 - length2) +
    #          snap1(offset2, length2)
    snap1_data = random_string(length0)
    verify_data(dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create(address)
    snap2_data = random_string(length1)
    verify_data(dev, offset0, snap2_data)
    snap2 = cmd.snapshot_create(address)
    backup = create_backup(address, snap2, backup_target)["URL"]

    cmd.restore_to_file(address, backup, "", output_raw_path, IMAGE_FORMAT_RAW)
    output2_raw_snap2 = read_file(output_raw_path, offset0, length1)
    output2_raw_snap1 = read_file(output_raw_path, offset1, length0 - length1)
    assert output2_raw_snap2 == snap2_data
    assert output2_raw_snap1 == snap1_data[offset1:length0]

    cmd.restore_to_file(address, backup, "", output_qcow2_path,
                        IMAGE_FORMAT_QCOW2)
    output2_qcow2_snap2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length1)
    output2_qcow2_snap1 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset1, length0 - length1)
    assert output2_qcow2_snap2 == snap2_data
    assert output2_qcow2_snap1 == snap1_data[offset1:length0]
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    snapshot_revert_with_frontend(address, ENGINE_NAME, snap0)
    rm_snaps(address, [snap1, snap2])
    rm_backups(address, ENGINE_NAME, [backup])
Пример #14
0
def restore_inc_test(
        grpc_engine_manager,  # NOQA
        grpc_controller,  # NOQA
        grpc_replica1,
        grpc_replica2,  # NOQA
        grpc_dr_controller,  # NOQA
        grpc_dr_replica1,
        grpc_dr_replica2,  # NOQA
        backup_target):  # NOQA
    address = grpc_controller.address

    dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)

    zero_string = b'\x00'.decode('utf-8')

    # backup0: 256 random data in 1st block
    length0 = 256
    snap0_data = random_string(length0)
    verify_data(dev, 0, snap0_data)
    verify_data(dev, BLOCK_SIZE, snap0_data)
    snap0 = cmd.snapshot_create(address)
    backup0 = create_backup(address, snap0, backup_target)["URL"]
    backup0_name = cmd.backup_inspect(address, backup0)['Name']

    # backup1: 32 random data + 32 zero data + 192 random data in 1st block
    length1 = 32
    offset1 = 32
    snap1_data = zero_string * length1
    verify_data(dev, offset1, snap1_data)
    snap1 = cmd.snapshot_create(address)
    backup1 = create_backup(address, snap1, backup_target)["URL"]
    backup1_name = cmd.backup_inspect(address, backup1)['Name']

    # backup2: 32 random data + 256 random data in 1st block,
    #          256 random data in 2nd block
    length2 = 256
    offset2 = 32
    snap2_data = random_string(length2)
    verify_data(dev, offset2, snap2_data)
    verify_data(dev, BLOCK_SIZE, snap2_data)
    snap2 = cmd.snapshot_create(address)
    backup2 = create_backup(address, snap2, backup_target)["URL"]
    backup2_name = cmd.backup_inspect(address, backup2)['Name']

    # backup3: 64 zero data + 192 random data in 1st block
    length3 = 64
    offset3 = 0
    verify_data(dev, offset3, zero_string * length3)
    verify_data(dev, length2, zero_string * offset2)
    verify_data(dev, BLOCK_SIZE, zero_string * length2)
    snap3 = cmd.snapshot_create(address)
    backup3 = create_backup(address, snap3, backup_target)["URL"]
    backup3_name = cmd.backup_inspect(address, backup3)['Name']

    # backup4: 256 random data in 1st block
    length4 = 256
    offset4 = 0
    snap4_data = random_string(length4)
    verify_data(dev, offset4, snap4_data)
    snap4 = cmd.snapshot_create(address)
    backup4 = create_backup(address, snap4, backup_target)["URL"]
    backup4_name = cmd.backup_inspect(address, backup4)['Name']

    # start no-frontend volume
    # start dr volume (no frontend)
    dr_address = grpc_dr_controller.address
    start_no_frontend_volume(grpc_engine_manager, grpc_dr_controller,
                             grpc_dr_replica1, grpc_dr_replica2)

    cmd.backup_restore(dr_address, backup0)
    wait_for_restore_completion(dr_address, backup0)
    verify_no_frontend_data(grpc_engine_manager, 0, snap0_data,
                            grpc_dr_controller)

    # mock restore crash/error
    delta_file1 = "volume-delta-" + backup0_name + ".img"
    if "vfs" in backup_target:
        command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME]
        backup_volume_path = subprocess.check_output(command).strip()
        command = ["find", backup_volume_path, "-name", "*blk"]
        blocks = subprocess.check_output(command).split()
        assert len(blocks) != 0
        for blk in blocks:
            command = ["mv", blk, blk + ".tmp"]
            subprocess.check_output(command).strip()
        # should fail
        is_failed = False
        cmd.restore_inc(dr_address, backup1, backup0_name)
        for i in range(RETRY_COUNTS):
            rs = cmd.restore_status(dr_address)
            for status in rs.values():
                if status['backupURL'] != backup1:
                    break
                if 'error' in status.keys():
                    if status['error'] != "":
                        assert 'no such file or directory' in \
                               status['error']
                        is_failed = True
            if is_failed:
                break
            time.sleep(RETRY_INTERVAL)
        assert is_failed

        assert path.exists(FIXED_REPLICA_PATH1 + delta_file1)
        assert path.exists(FIXED_REPLICA_PATH2 + delta_file1)
        for blk in blocks:
            command = ["mv", blk + ".tmp", blk]
            subprocess.check_output(command)

    data1 = \
        snap0_data[0:offset1] + snap1_data + \
        snap0_data[offset1+length1:]
    # race condition: last restoration has failed
    # but `isRestoring` hasn't been cleanup
    for i in range(RETRY_COUNTS):
        try:
            restore_incrementally(dr_address, backup1, backup0_name)
            break
        except subprocess.CalledProcessError as e:
            if "already in progress" not in e.output:
                time.sleep(RETRY_INTERVAL)
            else:
                raise e

    verify_no_frontend_data(grpc_engine_manager, 0, data1, grpc_dr_controller)

    assert not path.exists(FIXED_REPLICA_PATH1 + delta_file1)
    assert not path.exists(FIXED_REPLICA_PATH2 + delta_file1)

    status = cmd.restore_status(dr_address)
    compare_last_restored_with_backup(status, backup1_name)

    data2 = \
        data1[0:offset2] + snap2_data + \
        zero_string * (BLOCK_SIZE - length2 - offset2) + snap2_data
    restore_incrementally(dr_address, backup2, backup1_name)
    verify_no_frontend_data(grpc_engine_manager, 0, data2, grpc_dr_controller)

    delta_file2 = "volume-delta-" + backup1_name + ".img"
    assert not path.exists(FIXED_REPLICA_PATH1 + delta_file2)
    assert not path.exists(FIXED_REPLICA_PATH2 + delta_file2)

    status = cmd.restore_status(dr_address)
    compare_last_restored_with_backup(status, backup2_name)

    # mock race condition
    with pytest.raises(subprocess.CalledProcessError) as e:
        restore_incrementally(dr_address, backup1, backup0_name)
        assert "doesn't match lastRestored" in e

    data3 = zero_string * length3 + data2[length3:length2]
    restore_incrementally(dr_address, backup3, backup2_name)
    verify_no_frontend_data(grpc_engine_manager, 0, data3, grpc_dr_controller)

    delta_file3 = "volume-delta-" + backup3_name + ".img"
    assert not path.exists(FIXED_REPLICA_PATH1 + delta_file3)
    assert not path.exists(FIXED_REPLICA_PATH2 + delta_file3)
    status = cmd.restore_status(dr_address)
    compare_last_restored_with_backup(status, backup3_name)

    # mock corner case: invalid last-restored backup
    rm_backups(address, ENGINE_NAME, [backup3])
    # actually it is full restoration
    restore_incrementally(dr_address, backup4, backup3_name)
    verify_no_frontend_data(grpc_engine_manager, 0, snap4_data,
                            grpc_dr_controller)
    status = cmd.restore_status(dr_address)
    compare_last_restored_with_backup(status, backup4_name)

    if "vfs" in backup_target:
        command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME]
        backup_volume_path = subprocess.check_output(command).strip()
        command = ["find", backup_volume_path, "-name", "*tempoary"]
        tmp_files = subprocess.check_output(command).split()
        assert len(tmp_files) == 0

    cleanup_no_frontend_volume(grpc_engine_manager, grpc_dr_controller,
                               grpc_dr_replica1, grpc_dr_replica2)

    rm_backups(address, ENGINE_NAME, [backup0, backup1, backup2, backup4])

    cmd.sync_agent_server_reset(address)
    cleanup_controller(grpc_controller)
    cleanup_replica(grpc_replica1)
    cleanup_replica(grpc_replica2)
Пример #15
0
def test_backup_type(
        grpc_replica1,
        grpc_replica2,  # NOQA
        grpc_controller,
        backup_targets):  # NOQA
    for backup_target in backup_targets:
        address = grpc_controller.address
        block_size = 2 * 1024 * 1024

        dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)

        zero_string = b'\x00'.decode('utf-8')

        # backup0: 256 random data in 1st block
        length0 = 256
        snap0_data = random_string(length0)
        verify_data(dev, 0, snap0_data)
        verify_data(dev, block_size, snap0_data)
        snap0 = cmd.snapshot_create(address)
        backup0 = create_backup(address, snap0, backup_target)
        backup0_url = backup0["URL"]
        assert backup0['IsIncremental'] is False

        # backup1: 32 random data + 32 zero data + 192 random data in 1st block
        length1 = 32
        offset1 = 32
        snap1_data = zero_string * length1
        verify_data(dev, offset1, snap1_data)
        snap1 = cmd.snapshot_create(address)
        backup1 = create_backup(address, snap1, backup_target)
        backup1_url = backup1["URL"]
        assert backup1['IsIncremental'] is True

        # backup2: 32 random data + 256 random data in 1st block,
        #          256 random data in 2nd block
        length2 = 256
        offset2 = 32
        snap2_data = random_string(length2)
        verify_data(dev, offset2, snap2_data)
        verify_data(dev, block_size, snap2_data)
        snap2 = cmd.snapshot_create(address)
        backup2 = create_backup(address, snap2, backup_target)
        backup2_url = backup2["URL"]
        assert backup2['IsIncremental'] is True

        rm_backups(address, ENGINE_NAME, [backup2_url])

        # backup3: 64 zero data + 192 random data in 1st block
        length3 = 64
        offset3 = 0
        verify_data(dev, offset3, zero_string * length3)
        verify_data(dev, length2, zero_string * offset2)
        verify_data(dev, block_size, zero_string * length2)
        snap3 = cmd.snapshot_create(address)
        backup3 = create_backup(address, snap3, backup_target)
        backup3_url = backup3["URL"]
        assert backup3['IsIncremental'] is False

        # backup4: 256 random data in 1st block
        length4 = 256
        offset4 = 0
        snap4_data = random_string(length4)
        verify_data(dev, offset4, snap4_data)
        snap4 = cmd.snapshot_create(address)
        backup4 = create_backup(address, snap4, backup_target)
        backup4_url = backup4["URL"]
        assert backup4['IsIncremental'] is True

        rm_backups(address, ENGINE_NAME,
                   [backup0_url, backup1_url, backup3_url, backup4_url])

        cmd.sync_agent_server_reset(address)
        cleanup_replica(grpc_replica1)
        cleanup_replica(grpc_replica2)
        cleanup_controller(grpc_controller)