Exemplo n.º 1
0
def test_properties_override(transport):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2019-11-08_15-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: false
            properties: true
            properties-override:
              compression: gzip-9
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport

    run_replication_test(definition)

    assert (subprocess.check_output(
        "zfs get -H compression data/dst", encoding="utf-8",
        shell=True).split("\n")[0].split("\t")[2] == "gzip-9")
Exemplo n.º 2
0
def test_properties_exclude_override_does_not_break_volume(config):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create -V 1m data/src/vol", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2019-11-08_15-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            properties: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"].update(config)

    run_replication_test(definition)
Exemplo n.º 3
0
def test_encrypted_target_replication_from_scratch():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src", True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    create_dataset("data/dst", True)
    subprocess.check_call("zfs snapshot data/dst@2018-10-01_01-00", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            allow-from-scratch: true
            retention-policy: none
            retries: 1
    """))
    run_replication_test(definition)
def test_creates_intermediate_datasets():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/deeply", shell=True)
    subprocess.call("zfs destroy -r data/deeply", shell=True)

    subprocess.check_call("zfs create -V 1M data/src", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/deeply/nested/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    run_replication_test(definition)

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/deeply/nested/dst",
                              False)) == 2
Exemplo n.º 5
0
def test_readonly(readonly, dataset_ops):
    dataset_ops, error = dataset_ops

    if dataset_ops and readonly == "ignore":
        return

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/sub1", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-02_01-00", shell=True)

    subprocess.check_call("zfs send -R data/src@2018-10-01_01-00 | zfs recv data/dst", shell=True)

    for dataset, op in dataset_ops:
        subprocess.check_call(f"zfs set readonly={op} {dataset}", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["readonly"] = readonly

    if readonly == "require" and error is not None:
        e = run_replication_test(definition, success=False)

        assert e.error == error
    else:
        run_replication_test(definition)

        if readonly == "ignore":
            assert subprocess.check_output(
                "zfs get -H -o value readonly data/dst/sub1", shell=True, encoding="utf-8"
            ) == "off\n"
        else:
            assert subprocess.check_output(
                "zfs get -H -o value,source readonly data/dst", shell=True, encoding="utf-8"
            ) == "on\tlocal\n"
            assert subprocess.check_output(
                "zfs get -H -o value,source readonly data/dst/sub1", shell=True, encoding="utf-8"
            ) == "on\tinherited from data/dst\n"
def test_replication_resume(caplog, transport, dedup):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call(
        "dd if=/dev/zero of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call(
        "(zfs send data/src@2018-10-01_01-00 | throttle -b 102400 | zfs recv -s -F data/dst) & "
        "sleep 1; killall zfs",
        shell=True)

    assert "receive_resume_token\t1-" in subprocess.check_output(
        "zfs get -H receive_resume_token data/dst",
        shell=True,
        encoding="utf-8")

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    definition["replication-tasks"]["src"]["dedup"] = dedup

    caplog.set_level(logging.INFO)
    run_replication_test(definition)

    assert any("Resuming replication for destination dataset" in record.message
               for record in caplog.get_records("call"))

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst", False)) == 1
Exemplo n.º 7
0
def test_name_regex(caplog, transport, all_names, resume):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src")
    subprocess.check_call("zfs snapshot -r data/src@snap-2", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@manual-1", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@snap-1", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@manual-2", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@snap-3", shell=True)

    if resume:
        subprocess.check_call(
            "zfs send data/src@snap-2 | zfs recv -s -F data/dst", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: false
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    if all_names:
        definition["replication-tasks"]["src"]["name-regex"] = ".*"
    else:
        definition["replication-tasks"]["src"]["name-regex"] = "snap-.*"

    caplog.set_level(logging.INFO)
    run_replication_test(definition)

    assert len(list_snapshots(LocalShell(), "data/dst",
                              False)) == (5 if all_names else 3)

    logs = [
        record.message for record in caplog.get_records("call")
        if "For replication task 'src': doing push" in record.message
    ]
    if all_names:
        if resume:
            assert len(logs) == 1
        else:
            assert len(logs) == 2
    else:
        if resume:
            assert len(logs) == 2
        else:
            assert len(logs) == 3
Exemplo n.º 8
0
def test_replication_resume__recursive_mount(canmount):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src")
    create_dataset("data/src/child")
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs send -R data/src@2018-10-01_01-00 | zfs recv -s -F data/dst", shell=True)
    if not canmount:
        subprocess.check_call("zfs set canmount=off data/dst", shell=True)
        subprocess.check_call("zfs set canmount=off data/dst/child", shell=True)

    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("(zfs send -i data/src@2018-10-01_01-00 data/src@2018-10-01_02-00 | throttle -b 102400 | zfs recv -s -F data/dst) & "
                          "sleep 1; killall zfs", shell=True)

    assert "receive_resume_token\t1-" in subprocess.check_output("zfs get -H receive_resume_token data/dst",
                                                                 shell=True, encoding="utf-8")

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """))

    run_replication_test(definition)

    mounted = subprocess.check_output("zfs get -H -o value mounted data/dst/child", shell=True, encoding="utf-8")
    if canmount:
        assert mounted == "yes\n"
    else:
        assert mounted == "no\n"
Exemplo n.º 9
0
def test_property_receive(transport):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    transport["username"] = "******"

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2021-03-10_12-00",
                          shell=True)
    subprocess.check_call("zfs set truenas:customproperty=1 data/src",
                          shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2021-03-10_12-01",
                          shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/dst", shell=True)
    subprocess.check_call("zfs allow user receive,create,mount data/dst/dst",
                          shell=True)
    subprocess.check_call(
        "zfs send data/src@2021-03-10_12-00 | zfs recv -s -F data/dst/dst",
        shell=True)
    subprocess.check_call("zfs umount data/dst/dst", shell=True)
    subprocess.check_call("chown user:user /mnt/data/dst/dst", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst/dst
            recursive: false
            properties: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 2
    """))
    definition["replication-tasks"]["src"]["transport"] = transport

    warning = "cannot receive truenas:customproperty property on data/dst/dst: permission denied"

    assert warning in run_replication_test(definition).warnings

    subprocess.check_call("zfs snapshot -r data/src@2021-03-10_12-02",
                          shell=True)

    assert warning in run_replication_test(definition).warnings
Exemplo n.º 10
0
def test_readonly_dst_does_not_exist(readonly):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/sub1", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-02_01-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst/child
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["readonly"] = readonly

    run_replication_test(definition)

    if readonly == "ignore":
        assert subprocess.check_output(
            "zfs get -H -o value readonly data/dst/child/sub1",
            shell=True,
            encoding="utf-8") == "off\n"
    else:
        assert subprocess.check_output(
            "zfs get -H -o value,source readonly data/dst/child",
            shell=True,
            encoding="utf-8") == "on\tlocal\n"
        assert subprocess.check_output(
            "zfs get -H -o value,source readonly data/dst/child/sub1",
            shell=True,
            encoding="utf-8") == "on\tinherited from data/dst/child\n"
Exemplo n.º 11
0
def test_nothing_to_replicate():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@manual-snap", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
    """))

    assert run_replication_test(definition, success=False).error == (
        "Dataset 'data/src' does not have any matching snapshots to replicate"
    )
Exemplo n.º 12
0
def test_zvol_replication__onto_existing_dataset():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create -V 1M data/src", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    error = run_replication_test(definition, success=False)

    assert error.error == "Source 'data/src' is a volume, but target 'data/dst' already exists and is a filesystem"
def test_rewording_is_not_earlier_than_it(transport, direction):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["direction"] = direction
    definition["replication-tasks"]["src"]["transport"] = transport
    if direction == "push":
        definition["replication-tasks"]["src"][
            "also-include-naming-schema"] = "%Y-%m-%d_%H-%M"
    else:
        definition["replication-tasks"]["src"][
            "naming-schema"] = "%Y-%m-%d_%H-%M"

    error = run_replication_test(definition, False)
    assert ("is newer than" in error.error
            and "but has an older date" in error.error)
Exemplo n.º 14
0
def test_zvol_replication__onto_existing_encrypted_unrelated_dataset():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create -V 10M data/src", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00", shell=True)
    create_dataset("data/dst", encrypted=True)
    subprocess.check_call("zfs create -V 10M data/dst/vol", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/dev/zvol/data/dst/vol bs=1M count=10", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst/vol
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    error = run_replication_test(definition, success=False)

    assert error.error == "Unable to send dataset 'data/src' to existing unrelated encrypted dataset 'data/dst/vol'"
Exemplo n.º 15
0
def test_encrypted_target_but_unencrypted_target_exists():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src", encrypted=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    create_dataset("data/dst")

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            properties: false
            encryption:
              key: password
              key-format: passphrase
              key-location: $TrueNAS
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    error = run_replication_test(definition, False)
    assert "but it already exists and is not encrypted" in error.error
def test_target_without_snapshots_but_with_data(zvol, mounted, snapdir):
    if zvol and (not mounted or snapdir):
        return

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    if zvol:
        subprocess.check_call("zfs create -V 1m data/src", shell=True)
    else:
        subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00",
                          shell=True)

    if zvol:
        subprocess.check_call("zfs create -V 20m data/dst", shell=True)
        subprocess.check_call(
            "dd if=/dev/urandom of=/dev/zvol/data/dst bs=15m count=1",
            shell=True)
    else:
        subprocess.check_call("zfs create data/dst", shell=True)
        if snapdir:
            subprocess.check_call("zfs set snapdir=visible data/dst",
                                  shell=True)
        if mounted:
            bs = "1k"
        else:
            bs = "15m"
        subprocess.check_call(
            f"dd if=/dev/urandom of=/mnt/data/dst/test bs={bs} count=1",
            shell=True)
        if not mounted:
            subprocess.check_call("zfs unmount data/dst", shell=True)
    time.sleep(5)  # "used" property is not updated immediately

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    error = run_replication_test(definition, success=False)

    assert "Refusing to overwrite existing data" in error.error
Exemplo n.º 17
0
def test_multiple_source_datasets():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/internal", shell=True)
    subprocess.check_call("zfs create data/src/internal/DISK1", shell=True)
    subprocess.check_call("zfs create data/src/internal/DISK1/Apps", shell=True)
    subprocess.check_call("zfs create data/src/internal/DISK1/ISO", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/core", shell=True)
    subprocess.check_call("zfs send -R data/src/internal/DISK1@2018-10-01_01-00 | "
                          "zfs recv data/dst/core/tsaukpaetra", shell=True)

    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset:
              - data/src/internal/DISK1/Apps
              - data/src/internal/DISK1/ISO
            target-dataset: data/dst/core/tsaukpaetra
            recursive: false
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))

    run_replication_test(definition)

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst/core/tsaukpaetra/Apps", False)) == 2
    assert len(list_snapshots(local_shell, "data/dst/core/tsaukpaetra/ISO", False)) == 2
Exemplo n.º 18
0
def test_create_encrypted_target(encryption, key_location, transport):
    encryption["key-location"] = key_location

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)
    if os.path.exists("/tmp/test.key"):
        os.unlink("/tmp/test.key")

    create_dataset("data/src", encrypted=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            properties: false
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["encryption"] = encryption
    definition["replication-tasks"]["src"]["transport"] = transport
    run_replication_test(definition)

    if key_location == "$TrueNAS":
        if encryption["key-format"] != "passphrase":
            assert (
                subprocess.check_output(["midclt", "call", "-job", "pool.dataset.export_key", "data/dst"]).decode().strip() ==
                encryption["key"]
            )
    else:
        assert (
            subprocess.check_output("zfs get -H -o value keylocation data/dst", shell=True).decode().strip() ==
            f'file://{encryption["key-location"]}'
        )
Exemplo n.º 19
0
def test_push_replication(compression):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src")
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: ssh
              hostname: 127.0.0.1
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
            retries: 1
    """))
    set_localhost_transport_options(
        definition["replication-tasks"]["src"]["transport"])
    definition["replication-tasks"]["src"]["compression"] = compression

    run_replication_test(definition)
Exemplo n.º 20
0
def test_keeps_mount_structure():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/child", shell=True)
    subprocess.check_call("zfs create data/src/child/grandchild", shell=True)
    with open("/mnt/data/src/child/grandchild/file", "w") as f:
        pass
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    run_replication_test(definition)

    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00",
                          shell=True)
    run_replication_test(definition)

    assert os.path.exists("/mnt/data/dst/child/grandchild/file")
Exemplo n.º 21
0
def test_rewording_is_not_earlier_than_it(transport, direction):
    if transport["type"] == "ssh+netcat":
        uname = os.uname()
        if uname.sysname == "FreeBSD" and uname.release.startswith("12"):
            # FIXME: https://jira.ixsystems.com/browse/NAS-106452
            return

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["direction"] = direction
    definition["replication-tasks"]["src"]["transport"] = transport
    if direction == "push":
        definition["replication-tasks"]["src"][
            "also-include-naming-schema"] = "%Y-%m-%d_%H-%M"
    else:
        definition["replication-tasks"]["src"][
            "naming-schema"] = "%Y-%m-%d_%H-%M"

    error = run_replication_test(definition, False)
    assert ("is newer than" in error.error
            and "but has an older date" in error.error)
def test_encrypted_target(has_data):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst_parent", shell=True)

    create_dataset("data/src")
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    create_dataset("data/dst_parent", True)
    subprocess.check_call("zfs create data/dst_parent/dst", shell=True)

    if has_data:
        with open("/mnt/data/dst_parent/dst/file", "w"):
            pass

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst_parent/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    error = run_replication_test(definition, success=not has_data)

    if has_data:
        assert "does not have snapshots but has data" in error.error
Exemplo n.º 23
0
def test_readonly_require_zvol():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create -V 1M data/src", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    subprocess.check_call("zfs create -V 1M data/dst", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            readonly: require
            retention-policy: none
            retries: 1
    """))
    error = run_replication_test(definition, success=False)

    assert error.error == (
        "Target dataset 'data/dst' exists and does not have readonly=on property, but replication task is set up to "
        "require this property. Refusing to replicate. Please run \"zfs set readonly=on data/dst\" on the target "
        "system to fix this.")
def test_preserves_clone_origin():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/iocage", shell=True)
    subprocess.check_call("zfs create data/src/iocage/child", shell=True)
    subprocess.check_call("zfs create data/src/iocage/child/dataset",
                          shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/iocage/child/dataset/blob bs=1M count=1",
        shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2019-11-08_14-00",
                          shell=True)
    subprocess.check_call("zfs create data/src/iocage/another", shell=True)
    subprocess.check_call("zfs create data/src/iocage/another/child",
                          shell=True)
    subprocess.check_call(
        "zfs clone data/src/iocage/child/dataset@2019-11-08_14-00 "
        "data/src/iocage/another/child/clone",
        shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2019-11-08_15-00",
                          shell=True)

    assert (subprocess.check_output(
        "zfs get -H origin data/src/iocage/another/child/clone",
        encoding="utf-8",
        shell=True).split("\n")[0].split("\t")[2] ==
            "data/src/iocage/child/dataset@2019-11-08_14-00")
    assert int(
        subprocess.check_output(
            "zfs get -H -p used data/src/iocage/another/child/clone",
            encoding="utf-8",
            shell=True).split("\n")[0].split("\t")[2]) < 2e6

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            properties: true
            replicate: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))

    run_replication_test(definition)

    assert (subprocess.check_output(
        "zfs get -H origin data/dst/iocage/another/child/clone",
        encoding="utf-8",
        shell=True).split("\n")[0].split("\t")[2] ==
            "data/dst/iocage/child/dataset@2019-11-08_14-00")
    assert int(
        subprocess.check_output(
            "zfs get -H -p used data/dst/iocage/another/child/clone",
            encoding="utf-8",
            shell=True).split("\n")[0].split("\t")[2]) < 2e6
Exemplo n.º 25
0
def test_replication_mount__skip_parent():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    try:
        create_dataset("data/src")

        create_dataset("data/src/UNIX")
        subprocess.check_call("zfs set mountpoint=/UNIX data/src/UNIX",
                              shell=True)

        create_dataset("data/src/UNIX/var")
        subprocess.check_call("zfs set mountpoint=/var data/src/UNIX/var",
                              shell=True)

        create_dataset("data/src/UNIX/var/audit")

        subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                              shell=True)

        create_dataset("data/dst")
        create_dataset("data/dst/server")
        create_dataset("data/dst/server/UNIX")
        create_dataset("data/dst/server/UNIX/var")
        subprocess.check_call(
            "zfs set mountpoint=none data/dst/server/UNIX/var", shell=True)
        create_dataset("data/dst/server/UNIX/var/audit")
        subprocess.check_call(
            "zfs set mountpoint=/data/dst/server/var/audit data/dst/server/UNIX/var/audit",
            shell=True)
        subprocess.check_call("zfs set readonly=on data/dst/server",
                              shell=True)

        definition = yaml.safe_load(
            textwrap.dedent("""\
            timezone: "UTC"
    
            periodic-snapshot-tasks:
              src:
                dataset: data/src
                recursive: true
                lifetime: PT1H
                naming-schema: "%Y-%m-%d_%H-%M"
                schedule:
                  minute: "0"
    
            replication-tasks:
              src:
                direction: push
                transport:
                  type: local
                source-dataset: data/src/UNIX
                target-dataset: data/dst/server/UNIX
                recursive: true
                properties: false
                periodic-snapshot-tasks:
                  - src
                auto: true
                retention-policy: none
                readonly: set
        """))

        run_replication_test(definition)

        mounted = subprocess.check_output(
            "zfs get -H -o value mounted data/dst/server/UNIX/var/audit",
            shell=True,
            encoding="utf-8")
        assert mounted == "yes\n"
    finally:
        subprocess.call("zfs destroy -r data/src", shell=True)
Exemplo n.º 26
0
def test_replication_resume__replicate(caplog, kill_timeout):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src")
    create_dataset("data/src/child")
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/child/blob bs=1M count=1",
        shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    subprocess.check_call(
        "(zfs send -R data/src@2018-10-01_01-00 | throttle -b 102400 | zfs recv -s -F data/dst) & "
        f"sleep {kill_timeout}; killall zfs",
        shell=True)
    assert "receive_resume_token\t1-" in subprocess.check_output(
        "zfs get -H receive_resume_token data/dst",
        shell=True,
        encoding="utf-8")

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            replicate: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """))

    caplog.set_level(logging.INFO)
    run_replication_test(definition)

    assert any("Discarding receive_resume_token" in record.message
               for record in caplog.get_records("call"))

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst", False)) == 1
    assert len(list_snapshots(local_shell, "data/dst/child", False)) == 1
def test_replicate_to_existing_dataset_structure(recursive, exclude,
                                                 src_has_child,
                                                 dst_child_mounted,
                                                 dst_child_has_own_contents,
                                                 deeply_nested, transport):
    if not recursive and exclude:
        return
    if dst_child_mounted and dst_child_has_own_contents:
        return

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    src = "data/src"
    create_dataset(src)
    if src_has_child:
        if deeply_nested:
            src = "data/src/deep"
            create_dataset(src)
        create_dataset(f"{src}/child")
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    dst = "data/dst"
    create_dataset(dst)
    if deeply_nested:
        dst = "data/dst/deep"
        create_dataset(dst)
    create_dataset(f"{dst}/child")
    if not dst_child_mounted:
        subprocess.check_call(f"zfs umount {dst}/child", shell=True)
        if dst_child_has_own_contents:
            with open(f"/mnt/{dst}/child/file", "w") as f:
                pass

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: false
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["recursive"] = recursive
    if exclude:
        definition["replication-tasks"]["src"]["exclude"] = [f"{src}/child"]
    definition["replication-tasks"]["src"]["transport"] = transport

    if not recursive or exclude or not src_has_child or dst_child_has_own_contents:
        error = run_replication_test(definition, success=False)

        assert "Refusing to overwrite existing data" in error.error
    else:
        run_replication_test(definition)
Exemplo n.º 28
0
def test_dst(naming_schemas):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent(f"""\
        timezone: "Europe/Moscow"

        periodic-snapshot-tasks:
          task1:
            dataset: data/src
            recursive: true
            naming-schema: "{naming_schemas[0]}"
            schedule:
              minute: "*"
              hour: "*"
              day-of-month: "*"
              month: "*"
              day-of-week: "*"
          task2:
            dataset: data/src
            recursive: true
            naming-schema: "{naming_schemas[1]}"
            schedule:
              minute: "*"
              hour: "*"
              day-of-month: "*"
              month: "*"
              day-of-week: "*"
    """))

    run_periodic_snapshot_test(
        definition,
        datetime(2010, 10, 30, 22, 0, 0,
                 tzinfo=pytz.UTC).astimezone(pytz.timezone("Europe/Moscow")))

    local_shell = LocalShell()
    assert list_snapshots(local_shell, "data/src", False) == [
        Snapshot("data/src", "auto-2010-10-31-02-00"),
        Snapshot("data/src", "auto-2010-10-31-02-00:0400"),
    ]

    run_periodic_snapshot_test(
        definition,
        datetime(2010, 10, 30, 23, 0, 0,
                 tzinfo=pytz.UTC).astimezone(pytz.timezone("Europe/Moscow")),
        False,
    )

    assert list_snapshots(local_shell, "data/src", False) == [
        Snapshot("data/src", "auto-2010-10-31-02-00"),
        Snapshot("data/src", "auto-2010-10-31-02-00:0300"),
        Snapshot("data/src", "auto-2010-10-31-02-00:0400"),
    ]

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
            - "auto-%Y-%m-%d-%H-%M"
            - "auto-%Y-%m-%d-%H-%M%z"
            auto: false
            retention-policy: none
            retries: 1
    """))
    run_replication_test(definition)

    assert list_snapshots(local_shell, "data/dst", False) == [
        Snapshot("data/dst", "auto-2010-10-31-02-00"),
        Snapshot("data/dst", "auto-2010-10-31-02-00:0300"),
        Snapshot("data/dst", "auto-2010-10-31-02-00:0400"),
    ]