def test_encrypted_target_but_unencrypted_target_exists():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src", encrypted=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    create_dataset("data/dst")

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            properties: false
            encryption:
              key: password
              key-format: passphrase
              key-location: $TrueNAS
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    error = run_replication_test(definition, False)
    assert "but it already exists and is not encrypted" in error.error
def test_encrypted_target_local():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src")
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    create_dataset("data/dst", True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    error = run_replication_test(definition, success=False)
    assert "is it's own encryption root" in error.error
def test_zvol_replication__onto_existing_encrypted_unrelated_dataset():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create -V 10M data/src", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00", shell=True)
    create_dataset("data/dst", encrypted=True)
    subprocess.check_call("zfs create -V 10M data/dst/vol", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/dev/zvol/data/dst/vol bs=1M count=10", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst/vol
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    error = run_replication_test(definition, success=False)

    assert error.error == "Unable to send dataset 'data/src' to existing unrelated encrypted dataset 'data/dst/vol'"
Exemple #4
0
def test_replication_resume__recursive_mount(canmount):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src")
    create_dataset("data/src/child")
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs send -R data/src@2018-10-01_01-00 | zfs recv -s -F data/dst", shell=True)
    if not canmount:
        subprocess.check_call("zfs set canmount=off data/dst", shell=True)
        subprocess.check_call("zfs set canmount=off data/dst/child", shell=True)

    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("(zfs send -i data/src@2018-10-01_01-00 data/src@2018-10-01_02-00 | throttle -b 102400 | zfs recv -s -F data/dst) & "
                          "sleep 1; killall zfs", shell=True)

    assert "receive_resume_token\t1-" in subprocess.check_output("zfs get -H receive_resume_token data/dst",
                                                                 shell=True, encoding="utf-8")

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """))

    run_replication_test(definition)

    mounted = subprocess.check_output("zfs get -H -o value mounted data/dst/child", shell=True, encoding="utf-8")
    if canmount:
        assert mounted == "yes\n"
    else:
        assert mounted == "no\n"
Exemple #5
0
def test_name_regex(caplog, transport, all_names, resume):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src")
    subprocess.check_call("zfs snapshot -r data/src@snap-2", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@manual-1", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@snap-1", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@manual-2", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@snap-3", shell=True)

    if resume:
        subprocess.check_call(
            "zfs send data/src@snap-2 | zfs recv -s -F data/dst", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: false
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    if all_names:
        definition["replication-tasks"]["src"]["name-regex"] = ".*"
    else:
        definition["replication-tasks"]["src"]["name-regex"] = "snap-.*"

    caplog.set_level(logging.INFO)
    run_replication_test(definition)

    assert len(list_snapshots(LocalShell(), "data/dst",
                              False)) == (5 if all_names else 3)

    logs = [
        record.message for record in caplog.get_records("call")
        if "For replication task 'src': doing push" in record.message
    ]
    if all_names:
        if resume:
            assert len(logs) == 1
        else:
            assert len(logs) == 2
    else:
        if resume:
            assert len(logs) == 2
        else:
            assert len(logs) == 3
def test_create_encrypted_target(encryption, key_location, transport):
    encryption["key-location"] = key_location

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)
    if os.path.exists("/tmp/test.key"):
        os.unlink("/tmp/test.key")

    create_dataset("data/src", encrypted=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            properties: false
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["encryption"] = encryption
    definition["replication-tasks"]["src"]["transport"] = transport
    run_replication_test(definition)

    if key_location == "$TrueNAS":
        if encryption["key-format"] != "passphrase":
            assert (
                subprocess.check_output(["midclt", "call", "-job", "pool.dataset.export_key", "data/dst"]).decode().strip() ==
                encryption["key"]
            )
    else:
        assert (
            subprocess.check_output("zfs get -H -o value keylocation data/dst", shell=True).decode().strip() ==
            f'file://{encryption["key-location"]}'
        )
def test_push_replication(compression):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src")
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: ssh
              hostname: 127.0.0.1
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
            retries: 1
    """))
    set_localhost_transport_options(
        definition["replication-tasks"]["src"]["transport"])
    definition["replication-tasks"]["src"]["compression"] = compression

    run_replication_test(definition)
def test_encrypted_target(has_data):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst_parent", shell=True)

    create_dataset("data/src")
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    create_dataset("data/dst_parent", True)
    subprocess.check_call("zfs create data/dst_parent/dst", shell=True)

    if has_data:
        with open("/mnt/data/dst_parent/dst/file", "w"):
            pass

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst_parent/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    error = run_replication_test(definition, success=not has_data)

    if has_data:
        assert "does not have snapshots but has data" in error.error
Exemple #9
0
def test_replication_mount__skip_parent():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    try:
        create_dataset("data/src")

        create_dataset("data/src/UNIX")
        subprocess.check_call("zfs set mountpoint=/UNIX data/src/UNIX",
                              shell=True)

        create_dataset("data/src/UNIX/var")
        subprocess.check_call("zfs set mountpoint=/var data/src/UNIX/var",
                              shell=True)

        create_dataset("data/src/UNIX/var/audit")

        subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                              shell=True)

        create_dataset("data/dst")
        create_dataset("data/dst/server")
        create_dataset("data/dst/server/UNIX")
        create_dataset("data/dst/server/UNIX/var")
        subprocess.check_call(
            "zfs set mountpoint=none data/dst/server/UNIX/var", shell=True)
        create_dataset("data/dst/server/UNIX/var/audit")
        subprocess.check_call(
            "zfs set mountpoint=/data/dst/server/var/audit data/dst/server/UNIX/var/audit",
            shell=True)
        subprocess.check_call("zfs set readonly=on data/dst/server",
                              shell=True)

        definition = yaml.safe_load(
            textwrap.dedent("""\
            timezone: "UTC"
    
            periodic-snapshot-tasks:
              src:
                dataset: data/src
                recursive: true
                lifetime: PT1H
                naming-schema: "%Y-%m-%d_%H-%M"
                schedule:
                  minute: "0"
    
            replication-tasks:
              src:
                direction: push
                transport:
                  type: local
                source-dataset: data/src/UNIX
                target-dataset: data/dst/server/UNIX
                recursive: true
                properties: false
                periodic-snapshot-tasks:
                  - src
                auto: true
                retention-policy: none
                readonly: set
        """))

        run_replication_test(definition)

        mounted = subprocess.check_output(
            "zfs get -H -o value mounted data/dst/server/UNIX/var/audit",
            shell=True,
            encoding="utf-8")
        assert mounted == "yes\n"
    finally:
        subprocess.call("zfs destroy -r data/src", shell=True)
Exemple #10
0
def test_replication_resume(caplog, transport, dedup, encrypted):
    if dedup and not hasattr(libzfs.SendFlags, "DEDUP"):
        return

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src", encrypted)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    if encrypted:
        subprocess.check_call("(zfs send -p -w data/src@2018-10-01_01-00 | throttle -b 102400 | zfs recv -s -F data/dst) & "
                              "sleep 1; killall zfs", shell=True)
    else:
        subprocess.check_call("zfs create data/dst", shell=True)
        subprocess.check_call("(zfs send data/src@2018-10-01_01-00 | throttle -b 102400 | zfs recv -s -F data/dst) & "
                              "sleep 1; killall zfs", shell=True)

    assert "receive_resume_token\t1-" in subprocess.check_output("zfs get -H receive_resume_token data/dst",
                                                                 shell=True, encoding="utf-8")

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    definition["replication-tasks"]["src"]["dedup"] = dedup
    if encrypted:
        definition["replication-tasks"]["src"]["properties"] = True

    caplog.set_level(logging.INFO)
    run_replication_test(definition)

    assert any(
        "Resuming replication for destination dataset" in record.message
        for record in caplog.get_records("call")
    )

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst", False)) == 1

    if not encrypted:
        assert subprocess.check_output("zfs get -H -o value mounted data/dst", shell=True, encoding="utf-8") == "yes\n"
def test_replicate_to_existing_dataset_structure(recursive, exclude,
                                                 src_has_child,
                                                 dst_child_mounted,
                                                 dst_child_has_own_contents,
                                                 deeply_nested, transport):
    if not recursive and exclude:
        return
    if dst_child_mounted and dst_child_has_own_contents:
        return

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    src = "data/src"
    create_dataset(src)
    if src_has_child:
        if deeply_nested:
            src = "data/src/deep"
            create_dataset(src)
        create_dataset(f"{src}/child")
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    dst = "data/dst"
    create_dataset(dst)
    if deeply_nested:
        dst = "data/dst/deep"
        create_dataset(dst)
    create_dataset(f"{dst}/child")
    if not dst_child_mounted:
        subprocess.check_call(f"zfs umount {dst}/child", shell=True)
        if dst_child_has_own_contents:
            with open(f"/mnt/{dst}/child/file", "w") as f:
                pass

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: false
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["recursive"] = recursive
    if exclude:
        definition["replication-tasks"]["src"]["exclude"] = [f"{src}/child"]
    definition["replication-tasks"]["src"]["transport"] = transport

    if not recursive or exclude or not src_has_child or dst_child_has_own_contents:
        error = run_replication_test(definition, success=False)

        assert "Refusing to overwrite existing data" in error.error
    else:
        run_replication_test(definition)
def test_push_replication(dst_parent_is_readonly, dst_exists, transport,
                          replicate, properties, compression, encrypted,
                          dst_parent_encrypted):
    if transport["type"] != "ssh" and compression:
        return
    if replicate and not properties:
        return

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst_parent", shell=True)

    create_dataset("data/src", encrypted)
    subprocess.check_call("zfs set test:property=test-value data/src",
                          shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    create_dataset("data/dst_parent", dst_parent_encrypted)
    if dst_exists:
        subprocess.check_call("zfs create data/dst_parent/dst", shell=True)
    if dst_parent_is_readonly:
        subprocess.check_call("zfs set readonly=on data/dst_parent",
                              shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst_parent/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    definition["replication-tasks"]["src"]["replicate"] = replicate
    definition["replication-tasks"]["src"]["properties"] = properties
    if compression:
        definition["replication-tasks"]["src"]["compression"] = compression
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    observer = Mock()
    zettarepl.set_observer(observer)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    if dst_exists and properties and encrypted and not dst_parent_encrypted:
        error = observer.call_args_list[-1][0][0]
        assert isinstance(error, ReplicationTaskError), error
        assert error.error == (
            "Unable to send encrypted dataset 'data/src' to existing unencrypted or unrelated "
            "dataset 'data/dst_parent/dst'")
        return

    error = observer.call_args_list[-1][0][0]
    assert isinstance(error, ReplicationTaskSuccess), error

    assert len(list_snapshots(local_shell, "data/dst_parent/dst", False)) == 2

    assert (("test-value" in subprocess.check_output(
        "zfs get test:property data/dst_parent/dst",
        shell=True,
        encoding="utf-8")) == properties)

    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)

    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    error = observer.call_args_list[-1][0][0]
    assert isinstance(error, ReplicationTaskSuccess), error

    assert len(list_snapshots(local_shell, "data/dst_parent/dst", False)) == 3
def test_replication_resume__replicate(caplog, kill_timeout):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src")
    create_dataset("data/src/child")
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/child/blob bs=1M count=1",
        shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    subprocess.check_call(
        "(zfs send -R data/src@2018-10-01_01-00 | throttle -b 102400 | zfs recv -s -F data/dst) & "
        f"sleep {kill_timeout}; killall zfs",
        shell=True)
    assert "receive_resume_token\t1-" in subprocess.check_output(
        "zfs get -H receive_resume_token data/dst",
        shell=True,
        encoding="utf-8")

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            replicate: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """))

    caplog.set_level(logging.INFO)
    run_replication_test(definition)

    assert any("Discarding receive_resume_token" in record.message
               for record in caplog.get_records("call"))

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst", False)) == 1
    assert len(list_snapshots(local_shell, "data/dst/child", False)) == 1