Esempio n. 1
0
def backup_test(
        dev,
        address,  # NOQA
        volume_name,
        engine_name,
        backup_target):
    offset = 0
    length = 128

    snap1_data = random_string(length)
    verify_data(dev, offset, snap1_data)
    snap1_checksum = checksum_dev(dev)
    snap1 = cmd.snapshot_create(address)

    backup1_info = create_backup(address, snap1, backup_target)
    assert backup1_info["VolumeName"] == volume_name
    assert backup1_info["Size"] == BLOCK_SIZE_STR

    snap2_data = random_string(length)
    verify_data(dev, offset, snap2_data)
    snap2_checksum = checksum_dev(dev)
    snap2 = cmd.snapshot_create(address)

    backup2_info = create_backup(address, snap2, backup_target)
    assert backup2_info["VolumeName"] == volume_name
    assert backup2_info["Size"] == BLOCK_SIZE_STR

    snap3_data = random_string(length)
    verify_data(dev, offset, snap3_data)
    snap3_checksum = checksum_dev(dev)
    snap3 = cmd.snapshot_create(address)

    backup3_info = create_backup(address, snap3, backup_target)
    assert backup3_info["VolumeName"] == volume_name
    assert backup3_info["Size"] == BLOCK_SIZE_STR

    restore_with_frontend(address, engine_name, backup3_info["URL"])

    readed = read_dev(dev, offset, length)
    assert readed == snap3_data
    c = checksum_dev(dev)
    assert c == snap3_checksum

    rm_backups(address, engine_name, [backup3_info["URL"]])

    restore_with_frontend(address, engine_name, backup1_info["URL"])
    readed = read_dev(dev, offset, length)
    assert readed == snap1_data
    c = checksum_dev(dev)
    assert c == snap1_checksum

    rm_backups(address, engine_name, [backup1_info["URL"]])

    restore_with_frontend(address, engine_name, backup2_info["URL"])
    readed = read_dev(dev, offset, length)
    assert readed == snap2_data
    c = checksum_dev(dev)
    assert c == snap2_checksum

    rm_backups(address, engine_name, [backup2_info["URL"]])
Esempio n. 2
0
def test_ha_remove_extra_disks(
        grpc_controller,  # NOQA
        grpc_replica1,
        grpc_replica2):  # NOQA
    address = grpc_controller.address

    prepare_backup_dir(BACKUP_DIR)
    open_replica(grpc_replica1)

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 0

    r1_url = grpc_replica1.url
    v = grpc_controller.volume_start(replicas=[r1_url])
    assert v.name == VOLUME_NAME
    assert v.replicaCount == 1

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 1
    assert replicas[0].mode == "RW"

    dev = get_blockdev(VOLUME_NAME)

    wasted_data = random_string(128)
    data_offset = 1024
    verify_data(dev, data_offset, wasted_data)

    # now replica1 contains extra data in a snapshot
    cmd.snapshot_create(address)

    cleanup_controller(grpc_controller)

    open_replica(grpc_replica2)
    replicas = grpc_controller.replica_list()
    assert len(replicas) == 0

    r2_url = grpc_replica2.url
    v = grpc_controller.volume_start(replicas=[r2_url])
    assert v.name == VOLUME_NAME
    assert v.replicaCount == 1

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 1
    assert replicas[0].mode == "RW"

    dev = get_blockdev(VOLUME_NAME)

    data = random_string(128)
    data_offset = 1024
    verify_data(dev, data_offset, data)

    r1 = grpc_replica1.replica_reload()
    print(r1)

    cmd.add_replica(address, r1_url)
    wait_for_rebuild_complete(address)

    verify_data(dev, data_offset, data)
Esempio n. 3
0
def backup_hole_with_backing_file_test(
        backup_target,  # NOQA
        grpc_backing_controller,  # NOQA
        grpc_backing_replica1,  # NOQA
        grpc_backing_replica2):  # NOQA
    address = grpc_backing_controller.address

    dev = get_backing_dev(grpc_backing_replica1, grpc_backing_replica2,
                          grpc_backing_controller)

    offset1 = 512
    length1 = 256

    offset2 = 640
    length2 = 256

    boundary_offset = 0
    boundary_length = 4100  # just pass 4096 into next 4k

    hole_offset = 2 * 1024 * 1024
    hole_length = 1024

    snap1_data = random_string(length1)
    verify_data(dev, offset1, snap1_data)
    snap1_checksum = checksum_dev(dev)
    snap1 = cmd.snapshot_create(address)

    boundary_data_backup1 = read_dev(dev, boundary_offset, boundary_length)
    hole_data_backup1 = read_dev(dev, hole_offset, hole_length)
    backup1_info = create_backup(address, snap1, backup_target)

    snap2_data = random_string(length2)
    verify_data(dev, offset2, snap2_data)
    snap2_checksum = checksum_dev(dev)
    snap2 = cmd.snapshot_create(address)

    boundary_data_backup2 = read_dev(dev, boundary_offset, boundary_length)
    hole_data_backup2 = read_dev(dev, hole_offset, hole_length)
    backup2_info = create_backup(address, snap2, backup_target)

    restore_with_frontend(address, ENGINE_BACKING_NAME, backup1_info["URL"])
    readed = read_dev(dev, boundary_offset, boundary_length)
    assert readed == boundary_data_backup1
    readed = read_dev(dev, hole_offset, hole_length)
    assert readed == hole_data_backup1
    c = checksum_dev(dev)
    assert c == snap1_checksum

    restore_with_frontend(address, ENGINE_BACKING_NAME, backup2_info["URL"])
    readed = read_dev(dev, boundary_offset, boundary_length)
    assert readed == boundary_data_backup2
    readed = read_dev(dev, hole_offset, hole_length)
    assert readed == hole_data_backup2
    c = checksum_dev(dev)
    assert c == snap2_checksum
def test_basic_rw(dev):  # NOQA
    for i in range(0, 10):
        base = random.randint(1, SIZE - PAGE_SIZE)
        offset = (base // PAGE_SIZE) * PAGE_SIZE
        length = base - offset
        data = random_string(length)
        verify_data(dev, offset, data)
Esempio n. 5
0
def test_backup_volume_deletion(
        grpc_replica1,
        grpc_replica2,  # NOQA
        grpc_controller,
        backup_targets):  # NOQA
    offset = 0
    length = 128
    address = grpc_controller.address

    for backup_target in backup_targets:
        dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)
        snap_data = random_string(length)
        verify_data(dev, offset, snap_data)
        snap = cmd.snapshot_create(address)

        backup_info = create_backup(address, snap, backup_target)
        assert backup_info["VolumeName"] == VOLUME_NAME
        assert backup_info["Size"] == BLOCK_SIZE_STR
        assert snap in backup_info["SnapshotName"]

        cmd.backup_volume_rm(address, VOLUME_NAME, backup_target)
        info = cmd.backup_volume_list(address, VOLUME_NAME, backup_target)
        assert "cannot find" in info[VOLUME_NAME]["Messages"]["error"]

        cmd.sync_agent_server_reset(address)
        cleanup_controller(grpc_controller)
        cleanup_replica(grpc_replica1)
        cleanup_replica(grpc_replica2)
def test_rw_with_metric(
        grpc_controller,  # NOQA
        grpc_replica1,
        grpc_replica2):  # NOQA
    rw_dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)

    replies = grpc_controller.metric_get()
    # skip the first metric since its fields are 0
    next(replies).metric

    for i in range(0, 5):
        base = random.randint(1, SIZE - PAGE_SIZE)
        offset = (base // PAGE_SIZE) * PAGE_SIZE
        length = base - offset
        data = random_string(length)
        verify_data(rw_dev, offset, data)

        while 1:
            try:
                metric = next(replies).metric
                # it's hard to confirm the accurate value of metric
                assert metric.readBandwidth != 0
                assert metric.writeBandwidth != 0
                assert metric.iOPS != 0
                break
            except StopIteration:
                time.sleep(1)
def test_frontend_switch(
        grpc_controller_no_frontend,  # NOQA
        grpc_replica1,
        grpc_replica2):  # NOQA

    open_replica(grpc_replica1)
    open_replica(grpc_replica2)

    replicas = grpc_controller_no_frontend.replica_list()
    assert len(replicas) == 0

    r1_url = grpc_replica1.url
    r2_url = grpc_replica2.url
    v = grpc_controller_no_frontend.volume_start(replicas=[r1_url, r2_url])
    assert v.name == VOLUME_NO_FRONTEND_NAME
    assert v.replicaCount == 2
    assert v.frontend == ""

    start_engine_frontend(ENGINE_NO_FRONTEND_NAME)

    dev = get_blockdev(volume=VOLUME_NO_FRONTEND_NAME)

    data = random_string(128)
    data_offset = 1024
    verify_data(dev, data_offset, data)

    shutdown_engine_frontend(ENGINE_NO_FRONTEND_NAME)

    start_engine_frontend(ENGINE_NO_FRONTEND_NAME)

    dev = get_blockdev(volume=VOLUME_NO_FRONTEND_NAME)
    verify_read(dev, data_offset, data)

    shutdown_engine_frontend(ENGINE_NO_FRONTEND_NAME)
Esempio n. 8
0
def test_ha_single_replica_failure(
        grpc_controller,  # NOQA
        grpc_replica1,
        grpc_replica2):  # NOQA
    open_replica(grpc_replica1)
    open_replica(grpc_replica2)

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 0

    r1_url = grpc_replica1.url
    r2_url = grpc_replica2.url
    v = grpc_controller.volume_start(replicas=[r1_url, r2_url])
    assert v.replicaCount == 2

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 2
    assert replicas[0].mode == "RW"
    assert replicas[1].mode == "RW"

    dev = get_blockdev(VOLUME_NAME)

    data = random_string(128)
    data_offset = 1024
    verify_data(dev, data_offset, data)

    cleanup_replica(grpc_replica2)

    verify_async(dev, 10, 128, 1)

    verify_replica_state(grpc_controller, 1, "ERR")

    verify_read(dev, data_offset, data)
Esempio n. 9
0
def test_snapshot_tree_rebuild(
        grpc_controller,  # NOQA
        grpc_replica1,
        grpc_replica2):  # NOQA
    address = grpc_controller.address

    offset = 0
    length = 128

    open_replica(grpc_replica1)
    open_replica(grpc_replica2)

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 0

    r1_url = grpc_replica1.url
    r2_url = grpc_replica2.url
    v = grpc_controller.volume_start(replicas=[r1_url, r2_url])
    assert v.name == VOLUME_NAME
    assert v.replicaCount == 2

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 2
    assert replicas[0].mode == "RW"
    assert replicas[1].mode == "RW"

    dev = get_blockdev(VOLUME_NAME)

    snap, snap_data = snapshot_tree_build(dev, address, ENGINE_NAME, offset,
                                          length)

    data = random_string(128)
    data_offset = 1024
    verify_data(dev, data_offset, data)

    # Cleanup replica2
    cleanup_replica(grpc_replica2)

    verify_async(dev, 10, 128, 1)

    verify_replica_state(grpc_controller, 1, "ERR")

    verify_read(dev, data_offset, data)

    grpc_controller.replica_delete(replicas[1].address)

    # Rebuild replica2
    open_replica(grpc_replica2)
    cmd.add_replica(address, r2_url)
    wait_for_rebuild_complete(address)

    verify_async(dev, 10, 128, 1)

    verify_replica_state(grpc_controller, 1, "RW")

    snapshot_tree_verify(dev, address, ENGINE_NAME, offset, length, snap,
                         snap_data)
def test_beyond_boundary(dev):  # NOQA
    # check write at the boundary
    data = random_string(128)
    verify_data(dev, SIZE - 1 - 128, data)

    # out of bounds
    with pytest.raises(EnvironmentError) as err:
        write_dev(dev, SIZE, "1")
    assert 'No space left' in str(err.value)
    assert len(read_dev(dev, SIZE, 1)) == 0

    # normal writes to verify controller/replica survival
    test_basic_rw(dev)
Esempio n. 11
0
def restore_to_file_with_backing_file_test(
        backup_target,  # NOQA
        grpc_backing_controller,  # NOQA
        grpc_backing_replica1,  # NOQA
        grpc_backing_replica2):  # NOQA
    address = grpc_backing_controller.address

    backing_dev = get_backing_dev(grpc_backing_replica1, grpc_backing_replica2,
                                  grpc_backing_controller)

    length0 = 4 * 1024
    length1 = 256
    length2 = 128
    offset0 = 0
    offset1 = length1 + offset0
    offset2 = length2 + offset0

    output_raw_path = file(OUTPUT_FILE_RAW)
    output_qcow2_path = file(OUTPUT_FILE_QCOW2)

    # create 1 empty snapshot.
    # data in output image == data in backing
    check_backing()
    check_empty_volume(backing_dev)
    snap0 = cmd.snapshot_create(address)
    backup = create_backup(address, snap0, backup_target)["URL"]

    volume_data = read_dev(backing_dev, offset0, length0)
    backing_data = read_from_backing_file(offset0, length0)
    dev_checksum = checksum_dev(backing_dev)
    assert volume_data != ""
    assert volume_data == backing_data

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_raw_path, IMAGE_FORMAT_RAW)
    output0_raw = read_file(output_raw_path, offset0, length0)
    output0_checksum = checksum_data(
        read_file(output_raw_path, 0, SIZE).encode('utf-8'))
    assert output0_raw == backing_data
    assert output0_checksum == dev_checksum
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_qcow2_path, IMAGE_FORMAT_QCOW2)
    output0_qcow2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length0)
    output0_checksum = checksum_data(
        read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE))
    assert output0_qcow2.decode('utf-8') == backing_data
    assert output0_qcow2.decode('utf-8') == volume_data
    assert output0_checksum == dev_checksum
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    rm_backups(address, ENGINE_BACKING_NAME, [backup])

    # create 1 snapshot with 256B data.
    # output = snap1(offset0, length1) + backing(offset1, ...)
    snap1_data = random_string(length1)
    verify_data(backing_dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create(address)
    backup = create_backup(address, snap1, backup_target)["URL"]

    volume_data = read_dev(backing_dev, offset0, length0)
    backing_data = read_from_backing_file(offset1, length0 - offset1)
    dev_checksum = checksum_dev(backing_dev)

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_raw_path, IMAGE_FORMAT_RAW)
    output1_raw_snap1 = read_file(output_raw_path, offset0, length1)
    output1_raw_backing = read_file(output_raw_path, offset1,
                                    length0 - offset1)
    output1_checksum = checksum_data(
        read_file(output_raw_path, 0, SIZE).encode('utf-8'))
    assert output1_raw_snap1 == snap1_data
    assert output1_raw_backing == backing_data
    assert output1_raw_snap1 + output1_raw_backing == volume_data
    assert output1_checksum == dev_checksum
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_qcow2_path, IMAGE_FORMAT_QCOW2)
    output1_qcow2_snap1 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length1)
    output1_qcow2_backing = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset1, length0 - offset1)
    output1_checksum = checksum_data(
        read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE))
    assert output1_qcow2_snap1.decode('utf-8') == snap1_data
    assert output1_qcow2_backing.decode('utf-8') == backing_data
    assert output1_qcow2_snap1.decode('utf-8') + \
        output1_qcow2_backing.decode('utf-8') == volume_data
    assert output1_checksum == dev_checksum
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    snapshot_revert_with_frontend(address, ENGINE_BACKING_NAME, snap0)
    rm_snaps(address, [snap1])
    rm_backups(address, ENGINE_BACKING_NAME, [backup])
    check_backing()
    check_empty_volume(backing_dev)

    # create 2 snapshots with 256B data and 128B data
    # output = snap2(offset0, length1 - length2) +
    #          snap1(offset2, length2) + backing(offset2, ...)
    snap1_data = random_string(length1)
    verify_data(backing_dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create(address)
    snap2_data = random_string(length2)
    verify_data(backing_dev, offset0, snap2_data)
    snap2 = cmd.snapshot_create(address)
    backup = create_backup(address, snap2, backup_target)["URL"]

    volume_data = read_dev(backing_dev, offset0, length0)
    backing_data = read_from_backing_file(offset1, length0 - offset1)
    dev_checksum = checksum_dev(backing_dev)

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_raw_path, IMAGE_FORMAT_RAW)
    output2_raw_snap2 = read_file(output_raw_path, offset0, length2)
    output2_raw_snap1 = read_file(output_raw_path, offset2, length1 - length2)
    output2_raw_backing = read_file(output_raw_path, offset1,
                                    length0 - offset1)
    output2_checksum = checksum_data(
        read_file(output_raw_path, 0, SIZE).encode('utf-8'))
    assert output2_raw_snap2 == snap2_data
    assert output2_raw_snap1 == snap1_data[offset2:length1]
    assert output2_raw_backing == backing_data
    assert \
        volume_data == \
        output2_raw_snap2 + output2_raw_snap1 + output2_raw_backing
    assert output2_checksum == dev_checksum
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW),
                        output_qcow2_path, IMAGE_FORMAT_QCOW2)
    output2_qcow2_snap2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length2)
    output2_qcow2_snap1 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset2, length1 - length2)
    output2_qcow2_backing = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset1, length0 - offset1)
    output2_checksum = checksum_data(
        read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE))
    assert output2_qcow2_snap2.decode('utf-8') == snap2_data
    assert output2_qcow2_snap1.decode('utf-8') == snap1_data[offset2:length1]
    assert output2_qcow2_backing.decode('utf-8') == backing_data
    assert \
        volume_data == \
        output2_qcow2_snap2.decode('utf-8') + \
        output2_qcow2_snap1.decode('utf-8') + \
        output1_qcow2_backing.decode('utf-8')
    assert output2_checksum == dev_checksum
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    snapshot_revert_with_frontend(address, ENGINE_BACKING_NAME, snap0)
    rm_snaps(address, [snap1, snap2])
    rm_backups(address, ENGINE_BACKING_NAME, [backup])
    check_backing()
    check_empty_volume(backing_dev)
Esempio n. 12
0
def test_ha_double_replica_rebuild(
        grpc_controller,  # NOQA
        grpc_replica1,
        grpc_replica2):  # NOQA
    open_replica(grpc_replica1)
    open_replica(grpc_replica2)

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 0

    r1_url = grpc_replica1.url
    r2_url = grpc_replica2.url
    v = grpc_controller.volume_start(replicas=[r1_url, r2_url])
    assert v.name == VOLUME_NAME
    assert v.replicaCount == 2

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 2
    assert replicas[0].mode == "RW"
    assert replicas[1].mode == "RW"

    dev = get_blockdev(VOLUME_NAME)

    data1 = random_string(128)
    data1_offset = 1024
    verify_data(dev, data1_offset, data1)

    # Close replica2
    r2 = grpc_replica2.replica_get()
    assert r2.revisionCounter == 1
    grpc_replica2.replica_close()

    verify_async(dev, 10, 128, 1)

    verify_replica_state(grpc_controller, 1, "ERR")

    verify_read(dev, data1_offset, data1)

    data2 = random_string(128)
    data2_offset = 512
    verify_data(dev, data2_offset, data2)

    # Close replica1
    r1 = grpc_replica1.replica_get()
    assert r1.revisionCounter == 12  # 1 + 10 + 1
    grpc_replica1.replica_close()

    # Restart volume
    cleanup_controller(grpc_controller)

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 0

    # NOTE the order is reversed here
    r1_url = grpc_replica1.url
    r2_url = grpc_replica2.url
    v = grpc_controller.volume_start(replicas=[r2_url, r1_url])
    assert v.replicaCount == 2

    # replica2 is out because of lower revision counter
    replicas = grpc_controller.replica_list()
    assert len(replicas) == 2
    assert replicas[0].mode == "ERR"
    assert replicas[1].mode == "RW"

    verify_read(dev, data1_offset, data1)
    verify_read(dev, data2_offset, data2)

    # Rebuild replica2
    r2 = grpc_replica2.replica_get()
    assert r2.revisionCounter == 1
    grpc_replica2.replica_close()

    grpc_controller.replica_delete(replicas[0].address)

    cmd.add_replica(grpc_controller.address, r2_url)
    wait_for_rebuild_complete(grpc_controller.address)

    verify_async(dev, 10, 128, 1)

    verify_replica_state(grpc_controller, 1, "RW")

    verify_read(dev, data1_offset, data1)
    verify_read(dev, data2_offset, data2)

    r1 = grpc_replica1.replica_get()
    r2 = grpc_replica2.replica_get()
    assert r1.revisionCounter == 22  # 1 + 10 + 1 + 10
    assert r2.revisionCounter == 22  # must be in sync with r1
Esempio n. 13
0
def test_upgrade(
        grpc_engine_manager,  # NOQA
        grpc_controller,  # NOQA
        grpc_fixed_dir_replica1,
        grpc_fixed_dir_replica2,  # NOQA
        grpc_extra_replica1,
        grpc_extra_replica2):  # NOQA

    dev = get_dev(grpc_fixed_dir_replica1, grpc_fixed_dir_replica2,
                  grpc_controller)

    offset = 0
    length = 128

    data = random_string(length)
    verify_data(dev, offset, data)

    # both set pointed to the same volume underlying
    r1_url = grpc_fixed_dir_replica1.url
    r2_url = grpc_fixed_dir_replica2.url
    upgrade_r1_url = grpc_extra_replica1.url
    upgrade_r2_url = grpc_extra_replica2.url

    v = grpc_controller.volume_start(replicas=[r1_url, r2_url])
    assert v.replicaCount == 2

    upgrade_e = grpc_engine_manager.engine_upgrade(
        ENGINE_NAME, LONGHORN_UPGRADE_BINARY, SIZE,
        [upgrade_r1_url, upgrade_r2_url])
    assert upgrade_e.spec.binary == LONGHORN_UPGRADE_BINARY

    verify_data(dev, offset, data)

    grpc_controller.client_upgrade(upgrade_e.spec.listen)
    wait_for_process_running(grpc_engine_manager, ENGINE_NAME,
                             INSTANCE_MANAGER_TYPE_ENGINE)

    # cannot start with same binary
    with pytest.raises(grpc.RpcError):
        grpc_engine_manager.engine_upgrade(ENGINE_NAME,
                                           LONGHORN_UPGRADE_BINARY, SIZE,
                                           [r1_url, r2_url])
    verify_data(dev, offset, data)

    # cannot start with wrong replica, would trigger rollback
    with pytest.raises(grpc.RpcError):
        grpc_engine_manager.engine_upgrade(ENGINE_NAME,
                                           LONGHORN_UPGRADE_BINARY, SIZE,
                                           ["random"])
    verify_data(dev, offset, data)

    grpc_fixed_dir_replica1 = cleanup_replica(grpc_fixed_dir_replica1)
    grpc_fixed_dir_replica2 = cleanup_replica(grpc_fixed_dir_replica2)
    open_replica(grpc_fixed_dir_replica1)
    open_replica(grpc_fixed_dir_replica2)

    e = grpc_engine_manager.engine_upgrade(ENGINE_NAME, LONGHORN_BINARY, SIZE,
                                           [r1_url, r2_url])
    assert e.spec.binary == LONGHORN_BINARY

    verify_data(dev, offset, data)

    grpc_controller.client_upgrade(e.spec.listen)
    wait_for_process_running(grpc_engine_manager, ENGINE_NAME,
                             INSTANCE_MANAGER_TYPE_ENGINE)
Esempio n. 14
0
def restore_inc_test(
        grpc_engine_manager,  # NOQA
        grpc_controller,  # NOQA
        grpc_replica1,
        grpc_replica2,  # NOQA
        grpc_dr_controller,  # NOQA
        grpc_dr_replica1,
        grpc_dr_replica2,  # NOQA
        backup_target):  # NOQA
    address = grpc_controller.address

    dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)

    zero_string = b'\x00'.decode('utf-8')

    # backup0: 256 random data in 1st block
    length0 = 256
    snap0_data = random_string(length0)
    verify_data(dev, 0, snap0_data)
    verify_data(dev, BLOCK_SIZE, snap0_data)
    snap0 = cmd.snapshot_create(address)
    backup0 = create_backup(address, snap0, backup_target)["URL"]
    backup0_name = cmd.backup_inspect(address, backup0)['Name']

    # backup1: 32 random data + 32 zero data + 192 random data in 1st block
    length1 = 32
    offset1 = 32
    snap1_data = zero_string * length1
    verify_data(dev, offset1, snap1_data)
    snap1 = cmd.snapshot_create(address)
    backup1 = create_backup(address, snap1, backup_target)["URL"]
    backup1_name = cmd.backup_inspect(address, backup1)['Name']

    # backup2: 32 random data + 256 random data in 1st block,
    #          256 random data in 2nd block
    length2 = 256
    offset2 = 32
    snap2_data = random_string(length2)
    verify_data(dev, offset2, snap2_data)
    verify_data(dev, BLOCK_SIZE, snap2_data)
    snap2 = cmd.snapshot_create(address)
    backup2 = create_backup(address, snap2, backup_target)["URL"]
    backup2_name = cmd.backup_inspect(address, backup2)['Name']

    # backup3: 64 zero data + 192 random data in 1st block
    length3 = 64
    offset3 = 0
    verify_data(dev, offset3, zero_string * length3)
    verify_data(dev, length2, zero_string * offset2)
    verify_data(dev, BLOCK_SIZE, zero_string * length2)
    snap3 = cmd.snapshot_create(address)
    backup3 = create_backup(address, snap3, backup_target)["URL"]
    backup3_name = cmd.backup_inspect(address, backup3)['Name']

    # backup4: 256 random data in 1st block
    length4 = 256
    offset4 = 0
    snap4_data = random_string(length4)
    verify_data(dev, offset4, snap4_data)
    snap4 = cmd.snapshot_create(address)
    backup4 = create_backup(address, snap4, backup_target)["URL"]
    backup4_name = cmd.backup_inspect(address, backup4)['Name']

    # start no-frontend volume
    # start dr volume (no frontend)
    dr_address = grpc_dr_controller.address
    start_no_frontend_volume(grpc_engine_manager, grpc_dr_controller,
                             grpc_dr_replica1, grpc_dr_replica2)

    cmd.backup_restore(dr_address, backup0)
    wait_for_restore_completion(dr_address, backup0)
    verify_no_frontend_data(grpc_engine_manager, 0, snap0_data,
                            grpc_dr_controller)

    # mock restore crash/error
    delta_file1 = "volume-delta-" + backup0_name + ".img"
    if "vfs" in backup_target:
        command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME]
        backup_volume_path = subprocess.check_output(command).strip()
        command = ["find", backup_volume_path, "-name", "*blk"]
        blocks = subprocess.check_output(command).split()
        assert len(blocks) != 0
        for blk in blocks:
            command = ["mv", blk, blk + ".tmp".encode('utf-8')]
            subprocess.check_output(command).strip()
        # should fail
        is_failed = False
        cmd.restore_inc(dr_address, backup1, backup0_name)
        for i in range(RETRY_COUNTS):
            rs = cmd.restore_status(dr_address)
            for status in rs.values():
                if status['backupURL'] != backup1:
                    break
                if 'error' in status.keys():
                    if status['error'] != "":
                        assert 'no such file or directory' in \
                               status['error']
                        is_failed = True
            if is_failed:
                break
            time.sleep(RETRY_INTERVAL)
        assert is_failed

        assert path.exists(FIXED_REPLICA_PATH1 + delta_file1)
        assert path.exists(FIXED_REPLICA_PATH2 + delta_file1)
        for blk in blocks:
            command = ["mv", blk + ".tmp".encode('utf-8'), blk]
            subprocess.check_output(command)

    data1 = \
        snap0_data[0:offset1] + snap1_data + \
        snap0_data[offset1+length1:]
    # race condition: last restoration has failed
    # but `isRestoring` hasn't been cleanup
    for i in range(RETRY_COUNTS):
        try:
            restore_incrementally(dr_address, backup1, backup0_name)
            break
        except subprocess.CalledProcessError as e:
            if "already in progress" not in e.output:
                time.sleep(RETRY_INTERVAL)
            else:
                raise e

    verify_no_frontend_data(grpc_engine_manager, 0, data1, grpc_dr_controller)

    assert not path.exists(FIXED_REPLICA_PATH1 + delta_file1)
    assert not path.exists(FIXED_REPLICA_PATH2 + delta_file1)

    status = cmd.restore_status(dr_address)
    compare_last_restored_with_backup(status, backup1_name)

    data2 = \
        data1[0:offset2] + snap2_data + \
        zero_string * (BLOCK_SIZE - length2 - offset2) + snap2_data
    restore_incrementally(dr_address, backup2, backup1_name)
    verify_no_frontend_data(grpc_engine_manager, 0, data2, grpc_dr_controller)

    delta_file2 = "volume-delta-" + backup1_name + ".img"
    assert not path.exists(FIXED_REPLICA_PATH1 + delta_file2)
    assert not path.exists(FIXED_REPLICA_PATH2 + delta_file2)

    status = cmd.restore_status(dr_address)
    compare_last_restored_with_backup(status, backup2_name)

    # mock race condition
    with pytest.raises(subprocess.CalledProcessError) as e:
        restore_incrementally(dr_address, backup1, backup0_name)
        assert "doesn't match lastRestored" in e

    data3 = zero_string * length3 + data2[length3:length2]
    restore_incrementally(dr_address, backup3, backup2_name)
    verify_no_frontend_data(grpc_engine_manager, 0, data3, grpc_dr_controller)

    delta_file3 = "volume-delta-" + backup3_name + ".img"
    assert not path.exists(FIXED_REPLICA_PATH1 + delta_file3)
    assert not path.exists(FIXED_REPLICA_PATH2 + delta_file3)
    status = cmd.restore_status(dr_address)
    compare_last_restored_with_backup(status, backup3_name)

    # mock corner case: invalid last-restored backup
    rm_backups(address, ENGINE_NAME, [backup3])
    # actually it is full restoration
    restore_incrementally(dr_address, backup4, backup3_name)
    verify_no_frontend_data(grpc_engine_manager, 0, snap4_data,
                            grpc_dr_controller)
    status = cmd.restore_status(dr_address)
    compare_last_restored_with_backup(status, backup4_name)

    if "vfs" in backup_target:
        command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME]
        backup_volume_path = subprocess.check_output(command).strip()
        command = ["find", backup_volume_path, "-name", "*tempoary"]
        tmp_files = subprocess.check_output(command).split()
        assert len(tmp_files) == 0

    cleanup_no_frontend_volume(grpc_engine_manager, grpc_dr_controller,
                               grpc_dr_replica1, grpc_dr_replica2)

    rm_backups(address, ENGINE_NAME, [backup0, backup1, backup2, backup4])

    cmd.sync_agent_server_reset(address)
    cleanup_controller(grpc_controller)
    cleanup_replica(grpc_replica1)
    cleanup_replica(grpc_replica2)
Esempio n. 15
0
def volume_expansion_with_backup_test(
        grpc_engine_manager,  # NOQA
        grpc_controller,  # NOQA
        grpc_dr_controller,  # NOQA
        grpc_replica1,
        grpc_replica2,  # NOQA
        grpc_dr_replica1,  # NOQA
        grpc_dr_replica2,  # NOQA
        volume_name,
        engine_name,
        backup_target):  # NOQA
    address = grpc_controller.address
    dr_address = grpc_dr_controller.address
    dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)
    start_no_frontend_volume(grpc_engine_manager, grpc_dr_controller,
                             grpc_dr_replica1, grpc_dr_replica2)

    try:
        cmd.backup_volume_rm(address, volume_name, backup_target)
    except Exception:
        pass

    data0_len = random_length(PAGE_SIZE)
    data0 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE),
                 data0_len, random_string(data0_len))
    snap0 = Snapshot(dev, data0, address)

    backup0_info = create_backup(address, snap0.name, backup_target)
    assert backup0_info["VolumeName"] == volume_name
    assert backup0_info["Size"] == str(BLOCK_SIZE)

    cmd.backup_restore(dr_address, backup0_info["URL"])
    wait_for_restore_completion(dr_address, backup0_info["URL"])
    verify_no_frontend_data(grpc_engine_manager, data0.offset, data0.content,
                            grpc_dr_controller)

    grpc_controller.volume_expand(EXPAND_SIZE)
    wait_for_volume_expansion(grpc_controller, EXPAND_SIZE)
    check_block_device_size(volume_name, EXPAND_SIZE)

    data1_len = random_length(PAGE_SIZE)
    data1 = Data(random.randrange(SIZE, EXPAND_SIZE - PAGE_SIZE, PAGE_SIZE),
                 data1_len, random_string(data1_len))
    snap1 = Snapshot(dev, data1, address)

    backup1_info = create_backup(address, snap1.name, backup_target,
                                 EXPAND_SIZE_STR)
    assert backup1_info["VolumeName"] == volume_name
    assert backup1_info["Size"] == str(2 * BLOCK_SIZE)

    backup_volumes = cmd.backup_volume_list(address, volume_name,
                                            backup_target)
    assert volume_name in backup_volumes
    assert backup_volumes[volume_name]["Size"] == EXPAND_SIZE_STR

    # incremental restoration will implicitly expand the volume first
    restore_incrementally(dr_address, backup1_info["URL"],
                          backup0_info["Name"])
    check_dr_volume_block_device_size(grpc_engine_manager, grpc_dr_controller,
                                      EXPAND_SIZE)
    verify_no_frontend_data(grpc_engine_manager, data1.offset, data1.content,
                            grpc_dr_controller)

    cmd.backup_volume_rm(grpc_controller.address, volume_name, backup_target)
Esempio n. 16
0
def volume_expansion_with_snapshots_test(
        dev,
        grpc_controller,  # NOQA
        volume_name,
        engine_name,
        original_data):
    # the default size is 4MB, will expand it to 8MB
    address = grpc_controller.address
    zero_char = b'\x00'.decode('utf-8')

    # write the data to the original part then do expansion
    data1_len = random_length(PAGE_SIZE)
    data1 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE),
                 data1_len, random_string(data1_len))
    snap1 = Snapshot(dev, data1, address)

    grpc_controller.volume_expand(EXPAND_SIZE)
    wait_for_volume_expansion(grpc_controller, EXPAND_SIZE)
    check_block_device_size(volume_name, EXPAND_SIZE)

    snap1.verify_data()
    assert \
        dev.readat(0, SIZE) == \
        original_data[0:data1.offset] + data1.content + \
        original_data[data1.offset+data1.length:]
    assert dev.readat(SIZE, SIZE) == zero_char * SIZE

    # write the data to both the original part and the expanded part
    data2_len = random_length(PAGE_SIZE)
    data2 = Data(SIZE - PAGE_SIZE, data2_len, random_string(data2_len))
    snap2 = Snapshot(dev, data2, address)
    data3_len = random_length(PAGE_SIZE)
    data3 = Data(random.randrange(SIZE, EXPAND_SIZE - PAGE_SIZE, PAGE_SIZE),
                 data3_len, random_string(data3_len))
    snap3 = Snapshot(dev, data3, address)
    snap1.verify_data()
    snap2.verify_data()
    snap3.verify_data()
    assert \
        dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \
        data3.content + zero_char*(EXPAND_SIZE-data3.offset-data3.length)

    data4_len = random_length(PAGE_SIZE)
    data4 = Data(data1.offset, data4_len, random_string(data4_len))
    snap4 = Snapshot(dev, data4, address)
    snap4.verify_data()

    # revert to snap1 then see if we can still r/w the existing data
    # and expanded part
    snapshot_revert_with_frontend(address, engine_name, snap1.name)
    assert \
        dev.readat(0, SIZE) == \
        original_data[0:data1.offset] + data1.content + \
        original_data[data1.offset+data1.length:]
    assert dev.readat(SIZE, SIZE) == zero_char * SIZE

    data5_len = random_length(PAGE_SIZE)
    data5 = Data(random.randrange(SIZE, EXPAND_SIZE - PAGE_SIZE, PAGE_SIZE),
                 data5_len, random_string(data5_len))
    snap5 = Snapshot(dev, data5, address)
    snap5.verify_data()
    assert \
        dev.readat(SIZE, SIZE) == zero_char*(data5.offset-SIZE) + \
        data5.content + zero_char*(EXPAND_SIZE-data5.offset-data5.length)

    # delete and purge the snap1. it will coalesce with the larger snap2
    cmd.snapshot_rm(address, snap1.name)
    cmd.snapshot_purge(address)
    wait_for_purge_completion(address)
    assert \
        dev.readat(0, SIZE) == \
        original_data[0:data1.offset] + data1.content + \
        original_data[data1.offset+data1.length:]
    assert \
        dev.readat(SIZE, SIZE) == zero_char*(data5.offset-SIZE) + \
        data5.content + zero_char*(EXPAND_SIZE-data5.offset-data5.length)
Esempio n. 17
0
def test_backup_type(
        grpc_replica1,
        grpc_replica2,  # NOQA
        grpc_controller,
        backup_targets):  # NOQA
    for backup_target in backup_targets:
        address = grpc_controller.address
        block_size = 2 * 1024 * 1024

        dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)

        zero_string = b'\x00'.decode('utf-8')

        # backup0: 256 random data in 1st block
        length0 = 256
        snap0_data = random_string(length0)
        verify_data(dev, 0, snap0_data)
        verify_data(dev, block_size, snap0_data)
        snap0 = cmd.snapshot_create(address)
        backup0 = create_backup(address, snap0, backup_target)
        backup0_url = backup0["URL"]
        assert backup0['IsIncremental'] is False

        # backup1: 32 random data + 32 zero data + 192 random data in 1st block
        length1 = 32
        offset1 = 32
        snap1_data = zero_string * length1
        verify_data(dev, offset1, snap1_data)
        snap1 = cmd.snapshot_create(address)
        backup1 = create_backup(address, snap1, backup_target)
        backup1_url = backup1["URL"]
        assert backup1['IsIncremental'] is True

        # backup2: 32 random data + 256 random data in 1st block,
        #          256 random data in 2nd block
        length2 = 256
        offset2 = 32
        snap2_data = random_string(length2)
        verify_data(dev, offset2, snap2_data)
        verify_data(dev, block_size, snap2_data)
        snap2 = cmd.snapshot_create(address)
        backup2 = create_backup(address, snap2, backup_target)
        backup2_url = backup2["URL"]
        assert backup2['IsIncremental'] is True

        rm_backups(address, ENGINE_NAME, [backup2_url])

        # backup3: 64 zero data + 192 random data in 1st block
        length3 = 64
        offset3 = 0
        verify_data(dev, offset3, zero_string * length3)
        verify_data(dev, length2, zero_string * offset2)
        verify_data(dev, block_size, zero_string * length2)
        snap3 = cmd.snapshot_create(address)
        backup3 = create_backup(address, snap3, backup_target)
        backup3_url = backup3["URL"]
        assert backup3['IsIncremental'] is False

        # backup4: 256 random data in 1st block
        length4 = 256
        offset4 = 0
        snap4_data = random_string(length4)
        verify_data(dev, offset4, snap4_data)
        snap4 = cmd.snapshot_create(address)
        backup4 = create_backup(address, snap4, backup_target)
        backup4_url = backup4["URL"]
        assert backup4['IsIncremental'] is True

        rm_backups(address, ENGINE_NAME,
                   [backup0_url, backup1_url, backup3_url, backup4_url])

        cmd.sync_agent_server_reset(address)
        cleanup_replica(grpc_replica1)
        cleanup_replica(grpc_replica2)
        cleanup_controller(grpc_controller)
Esempio n. 18
0
def test_expansion_with_rebuild(
        grpc_controller,  # NOQA
        grpc_replica1,
        grpc_replica2):  # NOQA
    address = grpc_controller.address
    dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 2
    assert replicas[0].mode == "RW"
    assert replicas[1].mode == "RW"

    # the default size is 4MB, will expand it to 8MB
    address = grpc_controller.address
    zero_char = b'\x00'.decode('utf-8')
    original_data = zero_char * SIZE

    # write the data to the original part then do expansion
    data1_len = random_length(PAGE_SIZE)
    data1 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE),
                 data1_len, random_string(data1_len))
    snap1 = Snapshot(dev, data1, address)

    grpc_controller.volume_expand(EXPAND_SIZE)
    wait_for_volume_expansion(grpc_controller, EXPAND_SIZE)
    check_block_device_size(VOLUME_NAME, EXPAND_SIZE)

    snap1.verify_data()
    assert \
        dev.readat(0, SIZE) == \
        original_data[0:data1.offset] + data1.content + \
        original_data[data1.offset+data1.length:]
    assert dev.readat(SIZE, SIZE) == zero_char * SIZE

    # write the data to both the original part and the expanded part
    data2_len = random_length(PAGE_SIZE)
    data2 = Data(SIZE - PAGE_SIZE, data2_len, random_string(data2_len))
    snap2 = Snapshot(dev, data2, address)
    data3_len = random_length(PAGE_SIZE)
    data3 = Data(random.randrange(SIZE, EXPAND_SIZE - PAGE_SIZE, PAGE_SIZE),
                 data3_len, random_string(data3_len))
    snap3 = Snapshot(dev, data3, address)
    snap1.verify_data()
    snap2.verify_data()
    snap3.verify_data()
    assert \
        dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \
        data3.content + zero_char*(EXPAND_SIZE-data3.offset-data3.length)

    # Cleanup replica2
    cleanup_replica(grpc_replica2)
    verify_replica_state(grpc_controller, 1, "ERR")
    grpc_controller.replica_delete(replicas[1].address)

    # Rebuild replica2.
    open_replica(grpc_replica2)
    # The newly opened replica2 will be expanded automatically
    cmd.add_replica(address, grpc_replica2.url)
    wait_for_rebuild_complete(address)
    verify_replica_state(grpc_controller, 1, "RW")

    # Cleanup replica1 then check if the rebuilt replica2 works fine
    cleanup_replica(grpc_replica1)
    verify_replica_state(grpc_controller, 0, "ERR")
    grpc_controller.replica_delete(replicas[0].address)

    assert \
        dev.readat(0, SIZE) == \
        original_data[0:data1.offset] + data1.content + \
        original_data[data1.offset+data1.length:data2.offset] + \
        data2.content + \
        original_data[data2.offset+data2.length:]
    assert \
        dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \
        data3.content + zero_char*(EXPAND_SIZE-data3.offset-data3.length)

    data4_len = random_length(PAGE_SIZE)
    data4 = Data(data1.offset, data4_len, random_string(data4_len))
    snap4 = Snapshot(dev, data4, address)
    snap4.verify_data()
Esempio n. 19
0
def restore_to_file_without_backing_file_test(
        backup_target,  # NOQA
        grpc_controller,  # NOQA
        grpc_replica1,  # NOQA
        grpc_replica2):  # NOQA
    address = grpc_controller.address

    dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller)

    length0 = 256
    length1 = 128
    offset0 = 0
    offset1 = length1 + offset0

    output_raw_path = file(OUTPUT_FILE_RAW)
    output_qcow2_path = file(OUTPUT_FILE_QCOW2)

    # create 1 empty snapshot for converting to init state.
    snap0 = cmd.snapshot_create(address)

    # create 1 snapshot with 256B data.
    # output = snap2(offset0, length1)
    snap1_data = random_string(length0)
    verify_data(dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create(address)
    backup = create_backup(address, snap1, backup_target)["URL"]

    cmd.restore_to_file(address, backup, "", output_raw_path, IMAGE_FORMAT_RAW)
    output1_raw = read_file(output_raw_path, offset0, length0)
    assert output1_raw == snap1_data
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(address, backup, "", output_qcow2_path,
                        IMAGE_FORMAT_QCOW2)
    output1_qcow2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length0)
    assert output1_qcow2.decode('utf-8') == snap1_data
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    snapshot_revert_with_frontend(address, ENGINE_NAME, snap0)
    rm_snaps(address, [snap1])
    rm_backups(address, ENGINE_NAME, [backup])

    # create 2 snapshots with 256B data and 128B data
    # output = snap2(offset0, length1 - length2) +
    #          snap1(offset2, length2)
    snap1_data = random_string(length0)
    verify_data(dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create(address)
    snap2_data = random_string(length1)
    verify_data(dev, offset0, snap2_data)
    snap2 = cmd.snapshot_create(address)
    backup = create_backup(address, snap2, backup_target)["URL"]

    cmd.restore_to_file(address, backup, "", output_raw_path, IMAGE_FORMAT_RAW)
    output2_raw_snap2 = read_file(output_raw_path, offset0, length1)
    output2_raw_snap1 = read_file(output_raw_path, offset1, length0 - length1)
    assert output2_raw_snap2 == snap2_data
    assert output2_raw_snap1 == snap1_data[offset1:length0]

    cmd.restore_to_file(address, backup, "", output_qcow2_path,
                        IMAGE_FORMAT_QCOW2)
    output2_qcow2_snap2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length1)
    output2_qcow2_snap1 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset1, length0 - length1)
    assert output2_qcow2_snap2.decode('utf-8') == snap2_data
    assert output2_qcow2_snap1.decode('utf-8') == snap1_data[offset1:length0]
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    snapshot_revert_with_frontend(address, ENGINE_NAME, snap0)
    rm_snaps(address, [snap1, snap2])
    rm_backups(address, ENGINE_NAME, [backup])
Esempio n. 20
0
def test_ha_single_replica_rebuild(
        grpc_controller,  # NOQA
        grpc_replica1,
        grpc_replica2):  # NOQA
    address = grpc_controller.address

    open_replica(grpc_replica1)
    open_replica(grpc_replica2)

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 0

    r1_url = grpc_replica1.url
    r2_url = grpc_replica2.url
    v = grpc_controller.volume_start(replicas=[r1_url, r2_url])
    assert v.replicaCount == 2

    replicas = grpc_controller.replica_list()
    assert len(replicas) == 2
    assert replicas[0].mode == "RW"
    assert replicas[1].mode == "RW"

    dev = get_blockdev(VOLUME_NAME)

    data = random_string(128)
    data_offset = 1024
    verify_data(dev, data_offset, data)

    # Cleanup replica2
    cleanup_replica(grpc_replica2)

    verify_async(dev, 10, 128, 1)

    verify_replica_state(grpc_controller, 1, "ERR")

    verify_read(dev, data_offset, data)

    grpc_controller.replica_delete(replicas[1].address)

    # Rebuild replica2
    open_replica(grpc_replica2)
    cmd.add_replica(address, r2_url)
    wait_for_rebuild_complete(address)

    verify_async(dev, 10, 128, 1)

    verify_replica_state(grpc_controller, 1, "RW")

    verify_read(dev, data_offset, data)

    # WORKAROUND for unable to remove the parent of volume head
    newsnap = cmd.snapshot_create(address)

    info = cmd.snapshot_info(address)
    assert len(info) == 3
    sysnap = info[newsnap]["parent"]
    assert info[sysnap]["parent"] == ""
    assert newsnap in info[sysnap]["children"]
    assert info[sysnap]["usercreated"] is False
    assert info[sysnap]["removed"] is False

    cmd.snapshot_purge(address)
    wait_for_purge_completion(address)

    info = cmd.snapshot_info(address)
    assert len(info) == 2
    assert info[newsnap] is not None
    assert info[VOLUME_HEAD] is not None
Esempio n. 21
0
def snapshot_tree_create_node(dev, address, offset, length, snap, data, name):
    data[name] = random_string(length)
    verify_data(dev, offset, data[name])
    snap[name] = cmd.snapshot_create(address)