예제 #1
0
def test_push_replication(transport, properties):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs set test:property=test-value data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    definition["replication-tasks"]["src"]["properties"] = properties
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert len(list_snapshots(local_shell, "data/dst", False)) == 2

    assert (
        ("test-value" in subprocess.check_output("zfs get test:property data/dst", shell=True, encoding="utf-8")) ==
        properties
    )

    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)

    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert len(list_snapshots(local_shell, "data/dst", False)) == 3
예제 #2
0
def test_push_replication():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)

    definition = Definition.from_data(yaml.load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """)))

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert len(list_snapshots(local_shell, "data/dst", False)) == 2

    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)

    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert len(list_snapshots(local_shell, "data/dst", False)) == 3
예제 #3
0
def test_source_retention_multiple_sources():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/a", shell=True)
    subprocess.check_call("zfs create data/src/b", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/a", shell=True)
    subprocess.check_call("zfs create data/dst/b", shell=True)
    subprocess.check_call("zfs snapshot -r data/dst@2018-10-01_00-00", shell=True)
    subprocess.check_call("zfs snapshot -r data/dst@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot -r data/dst@2018-10-01_02-00", shell=True)

    definition = Definition.from_data(yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: [data/src/a, data/src/b]
            target-dataset: data/dst
            recursive: false
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: source
            hold-pending-snapshots: true
    """)))

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._run_remote_retention(datetime(2018, 10, 1, 3, 0))

    assert list_snapshots(local_shell, "data/dst/a", False) == [Snapshot("data/dst/a", "2018-10-01_02-00")]
    assert list_snapshots(local_shell, "data/dst/b", False) == [Snapshot("data/dst/b", "2018-10-01_02-00")]
예제 #4
0
def test_does_not_remove_the_last_snapshot_left():
    subprocess.call("zfs destroy -r data/src", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2020-05-07_00-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2020-05-23_00-00", shell=True)

    data = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: false
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "*"
              hour: "*"
              day-of-month: "*"
              month: "*"
              day-of-week: "*"
            lifetime: P30D
    """))
    definition = Definition.from_data(data)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._run_local_retention(datetime(2020, 6, 25, 0, 0))

    assert list_snapshots(local_shell, "data/src",
                          False) == [Snapshot("data/src", "2020-05-23_00-00")]
def test_creates_intermediate_datasets():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/deeply", shell=True)
    subprocess.call("zfs destroy -r data/deeply", shell=True)

    subprocess.check_call("zfs create -V 1M data/src", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/deeply/nested/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    run_replication_test(definition)

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/deeply/nested/dst",
                              False)) == 2
예제 #6
0
def test_zfs_hold(hold):
    try:
        subprocess.call("zfs destroy -r data/src", shell=True)
        subprocess.call("zfs destroy -r data/dst", shell=True)

        subprocess.check_call("zfs create data/dst", shell=True)
        for snapshot in snapshots:
            subprocess.check_call(
                f"zfs snapshot {snapshot.dataset}@{snapshot.name}", shell=True)
        for i in hold:
            snapshot = snapshots[i]
            subprocess.check_call(
                f"zfs hold keep {snapshot.dataset}@{snapshot.name}",
                shell=True)

        local_shell = LocalShell()
        destroy_snapshots(local_shell, snapshots)

        assert list_snapshots(local_shell, "data/dst",
                              False) == [snapshots[i] for i in hold]
    finally:
        for snapshot in snapshots:
            subprocess.call(
                f"zfs release keep {snapshot.dataset}@{snapshot.name}",
                shell=True)
예제 #7
0
def test_push_replication():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/child", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """))

    definition = Definition.from_data(definition)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert sum(1 for m in zettarepl.observer.call_args_list
               if isinstance(m[0][0], ReplicationTaskSuccess)) == 1

    subprocess.check_call("zfs destroy -r data/src/child", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert sum(1 for m in zettarepl.observer.call_args_list
               if isinstance(m[0][0], ReplicationTaskSuccess)) == 2

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst/child", False)) == 1
예제 #8
0
def test_zvol_replication(as_root):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    if as_root:
        subprocess.check_call("zfs create -V 1M data/src", shell=True)
    else:
        subprocess.check_call("zfs create data/src", shell=True)
        subprocess.check_call("zfs create -V 1M data/src/zvol", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert len(list_snapshots(local_shell, "data/dst", False)) == 2
    if not as_root:
        assert len(list_snapshots(local_shell, "data/dst/zvol", False)) == 2
예제 #9
0
def test_replication_resume(caplog):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call(
        "dd if=/dev/zero of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call(
        "(zfs send data/src@2018-10-01_01-00 | throttle -b 102400 | zfs recv -s -F data/dst) & "
        "sleep 1; killall zfs",
        shell=True)

    assert "receive_resume_token\t1-" in subprocess.check_output(
        "zfs get -H receive_resume_token data/dst",
        shell=True,
        encoding="utf-8")

    definition = Definition.from_data(
        yaml.load(
            textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          - id: src
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          - id: src
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """)))

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._run_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))

    assert any("Resuming replication for dst_dataset" in record.message
               for record in caplog.get_records("call"))

    assert len(list_snapshots(local_shell, "data/dst", False)) == 1
예제 #10
0
def test_multiple_source_datasets():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/internal", shell=True)
    subprocess.check_call("zfs create data/src/internal/DISK1", shell=True)
    subprocess.check_call("zfs create data/src/internal/DISK1/Apps", shell=True)
    subprocess.check_call("zfs create data/src/internal/DISK1/ISO", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/core", shell=True)
    subprocess.check_call("zfs send -R data/src/internal/DISK1@2018-10-01_01-00 | "
                          "zfs recv data/dst/core/tsaukpaetra", shell=True)

    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset:
              - data/src/internal/DISK1/Apps
              - data/src/internal/DISK1/ISO
            target-dataset: data/dst/core/tsaukpaetra
            recursive: false
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))

    run_replication_test(definition)

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst/core/tsaukpaetra/Apps", False)) == 2
    assert len(list_snapshots(local_shell, "data/dst/core/tsaukpaetra/ISO", False)) == 2
예제 #11
0
def test_replication_resume(caplog, transport, dedup):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call(
        "dd if=/dev/zero of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call(
        "(zfs send data/src@2018-10-01_01-00 | throttle -b 102400 | zfs recv -s -F data/dst) & "
        "sleep 1; killall zfs",
        shell=True)

    assert "receive_resume_token\t1-" in subprocess.check_output(
        "zfs get -H receive_resume_token data/dst",
        shell=True,
        encoding="utf-8")

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    definition["replication-tasks"]["src"]["dedup"] = dedup

    caplog.set_level(logging.INFO)
    run_replication_test(definition)

    assert any("Resuming replication for destination dataset" in record.message
               for record in caplog.get_records("call"))

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst", False)) == 1
예제 #12
0
def test_name_regex(caplog, transport, all_names, resume):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    create_dataset("data/src")
    subprocess.check_call("zfs snapshot -r data/src@snap-2", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@manual-1", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@snap-1", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@manual-2", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@snap-3", shell=True)

    if resume:
        subprocess.check_call(
            "zfs send data/src@snap-2 | zfs recv -s -F data/dst", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: false
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    if all_names:
        definition["replication-tasks"]["src"]["name-regex"] = ".*"
    else:
        definition["replication-tasks"]["src"]["name-regex"] = "snap-.*"

    caplog.set_level(logging.INFO)
    run_replication_test(definition)

    assert len(list_snapshots(LocalShell(), "data/dst",
                              False)) == (5 if all_names else 3)

    logs = [
        record.message for record in caplog.get_records("call")
        if "For replication task 'src': doing push" in record.message
    ]
    if all_names:
        if resume:
            assert len(logs) == 1
        else:
            assert len(logs) == 2
    else:
        if resume:
            assert len(logs) == 2
        else:
            assert len(logs) == 3
예제 #13
0
def test_parent_is_empty_child_is_not():
    subprocess.call("zfs destroy -r data/src", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/child", shell=True)
    subprocess.check_call("zfs create data/src/child/grandchild", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/file_1 bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2020-04-21-20-27",
                          shell=True)

    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/child/grandchild/file_1 bs=1M count=1",
        shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          internal:
            dataset: data/src
            recursive: true
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "*"
              hour: "*"
              day-of-month: "*"
              month: "*"
              day-of-week: "*"
            allow-empty: false
    """))

    run_periodic_snapshot_test(definition, datetime(2020, 4, 21, 20, 28))

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/src", False)) == 2
    assert len(list_snapshots(local_shell, "data/src/child", False)) == 2
    assert len(list_snapshots(local_shell, "data/src/child/grandchild",
                              False)) == 2
예제 #14
0
def test_does_not_remove_the_last_snapshot_left(snapshots__removal_dates__result):
    snapshots, removal_dates, result = snapshots__removal_dates__result

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/src2", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/child", shell=True)
    subprocess.check_call("zfs create data/src2", shell=True)
    for snapshot in snapshots:
        subprocess.check_call(f"zfs snapshot {snapshot}", shell=True)

    data = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: false
            naming-schema: "%Y-%m-%d-%H-%M"
            schedule:
              minute: "*"
              hour: "*"
              day-of-month: "*"
              month: "*"
              day-of-week: "*"
            lifetime: P30D
    """))
    definition = Definition.from_data(data)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell, use_removal_dates=True)
    zettarepl.set_tasks(definition.tasks)
    with patch("zettarepl.zettarepl.get_removal_dates", Mock(return_value=removal_dates)):
        zettarepl._run_local_retention(datetime(2021, 4, 19, 17, 0))

    assert list_snapshots(local_shell, "data/src", False) + list_snapshots(local_shell, "data/src2", False) == [
        snapshots[i] for i in result
    ]
def test_push_remote_retention(retention_policy, remains):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs snapshot data/dst@2018-10-01_00-00", shell=True)
    subprocess.check_call("zfs snapshot data/dst@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/dst@2018-10-01_02-00", shell=True)
    subprocess.check_call("zfs snapshot data/dst@2018-10-01_03-00", shell=True)

    data = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
    """))
    data["replication-tasks"]["src"].update(**retention_policy)
    definition = Definition.from_data(data)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._run_remote_retention(datetime(2018, 10, 1, 3, 0))

    assert list_snapshots(local_shell, "data/dst", False) == remains
예제 #16
0
def test_hold_pending_snapshots(hold_pending_snapshots, remains):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_00-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs snapshot data/dst@2018-10-01_00-00", shell=True)

    definition = Definition.from_data(
        yaml.load(
            textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          - id: src
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          - id: src
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: source
            hold-pending-snapshots: """ + yaml.dump(hold_pending_snapshots) +
                            """
    """)))

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._run_local_retention(datetime(2018, 10, 1, 3, 0))

    assert list_snapshots(local_shell, "data/src", False) == remains
예제 #17
0
def test_snapshot_exclude():
    subprocess.call("zfs destroy -r data/src", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    for dataset in ["DISK1", "DISK1/Apps", "DISK1/ISO", "waggnas"]:
        subprocess.check_call(f"zfs create data/src/{dataset}", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          internal:
            dataset: data/src
            recursive: true
            exclude:
            - data/src/waggnas
            lifetime: "P7W"
            naming-schema: "auto-%Y%m%d.%H%M%S-2w"
            schedule:
              minute: "0"
              hour: "6"
              day-of-month: "*"
              month: "*"
              day-of-week: "*"
              begin: "06:00"
              end: "18:00"
    """))

    run_periodic_snapshot_test(definition, datetime(2020, 1, 17, 6, 0))

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/src", False)) == 1
    assert len(list_snapshots(local_shell, "data/src/DISK1/Apps", False)) == 1
    assert len(list_snapshots(local_shell, "data/src/DISK1/ISO", False)) == 1
    assert len(list_snapshots(local_shell, "data/src/waggnas", False)) == 0
예제 #18
0
def test_zfs_clone():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    for snapshot in snapshots:
        subprocess.check_call(
            f"zfs snapshot {snapshot.dataset}@{snapshot.name}", shell=True)
    subprocess.check_call(
        f"zfs clone {snapshots[1].dataset}@{snapshots[1].name} data/src",
        shell=True)

    local_shell = LocalShell()
    destroy_snapshots(local_shell, snapshots)

    assert list_snapshots(local_shell, "data/dst", False) == [snapshots[1]]
예제 #19
0
def test_subsequent_snapshots():
    subprocess.call("zfs destroy -r data/src", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/file_1 bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@snap-1", shell=True)

    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/file_2 bs=1M count=1", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          one-week:
            dataset: data/src
            recursive: false
            naming-schema: "%Y-%m-%d_%H-%M-1w"
            lifetime: P7D
            allow-empty: false
            schedule:
              minute: "*"
              hour: "*"
              day-of-month: "*"
              month: "*"
              day-of-week: "*"
          two-weeks:
            dataset: data/src
            recursive: false
            naming-schema: "%Y-%m-%d_%H-%M-2w"
            lifetime: P14D
            allow-empty: false
            schedule:
              minute: "*"
              hour: "*"
              day-of-month: "*"
              month: "*"
              day-of-week: "*"
    """))

    run_periodic_snapshot_test(definition, datetime(2020, 3, 11, 19, 36))

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/src", False)) == 3
예제 #20
0
    def periodic_snapshot_task_snapshots(self, task):
        snapshots = list_snapshots(LocalShell(), task["dataset"], task["recursive"])
        zettarepl_task = PeriodicSnapshotTask.from_data(None, self.middleware.call_sync(
            "zettarepl.periodic_snapshot_task_definition", task,
        ))
        snapshot_owner = PeriodicSnapshotTaskSnapshotOwner(datetime.utcnow(), zettarepl_task)

        task_snapshots = set()
        for snapshot in snapshots:
            if snapshot_owner.owns_dataset(snapshot.dataset):
                try:
                    parsed_snapshot_name = parse_snapshot_name(snapshot.name, task["naming_schema"])
                except ValueError:
                    pass
                else:
                    if snapshot_owner.owns_snapshot(snapshot.dataset, parsed_snapshot_name):
                        task_snapshots.add(str(snapshot))

        return task_snapshots
예제 #21
0
def get_snapshots_to_send_with_name_pattern(src_snapshots, dst_snapshots,
                                            replication_task, src_shell,
                                            src_dataset):
    filtered_src_snapshots = list(
        filter(replication_task.name_pattern.match, src_snapshots))
    filtered_dst_snapshots = list(
        filter(replication_task.name_pattern.match, dst_snapshots))
    to_replicate = set(filtered_src_snapshots) - set(filtered_dst_snapshots)
    if not to_replicate:
        return None, []

    # Only query createtxg if we have something to replicate as this operation is expensive
    src_snapshots = [
        snapshot.name for snapshot in list_snapshots(src_shell, src_dataset,
                                                     False, "createtxg")
    ]

    incremental_base = None
    snapshots_to_send = src_snapshots
    # Find the newest common snapshot and send the rest
    for i, snapshot in enumerate(src_snapshots):
        if snapshot in dst_snapshots:
            incremental_base = snapshot
            snapshots_to_send = src_snapshots[i + 1:]

    filtered_snapshots_to_send = list(
        filter(replication_task.name_pattern.match, snapshots_to_send))

    include_intermediate = False
    if snapshots_to_send == filtered_snapshots_to_send:
        if len(filtered_snapshots_to_send) > 1:
            if incremental_base is None:
                filtered_snapshots_to_send = [
                    filtered_snapshots_to_send[0],
                    filtered_snapshots_to_send[-1]
                ]
            else:
                filtered_snapshots_to_send = [filtered_snapshots_to_send[-1]]

            include_intermediate = True

    return incremental_base, filtered_snapshots_to_send, include_intermediate
예제 #22
0
def test_hold_pending_snapshots(retention_policy, remains):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs snapshot data/dst@2018-10-01_00-00", shell=True)
    subprocess.check_call("zfs snapshot data/dst@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/dst@2018-10-01_02-00", shell=True)
    subprocess.check_call("zfs snapshot data/dst@2018-10-01_03-00", shell=True)

    data = yaml.load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          - id: src
            direction: pull
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            naming-schema: "%Y-%m-%d_%H-%M"
            recursive: true
            auto: true
    """))
    data["replication-tasks"][0].update(**retention_policy)
    definition = Definition.from_data(data)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._run_local_retention(datetime(2018, 10, 1, 3, 0))

    assert list_snapshots(local_shell, "data/dst", False) == remains
예제 #23
0
def test_allow_empty(allow_empty, is_empty):
    subprocess.call("zfs destroy -r data/src", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/file_1 bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@snap-1", shell=True)

    if not is_empty:
        subprocess.check_call(
            "dd if=/dev/urandom of=/mnt/data/src/file_2 bs=1M count=1",
            shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          internal:
            dataset: data/src
            recursive: false
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "*"
              hour: "*"
              day-of-month: "*"
              month: "*"
              day-of-week: "*"
    """))
    definition["periodic-snapshot-tasks"]["internal"][
        "allow-empty"] = allow_empty

    run_periodic_snapshot_test(definition, datetime(2020, 3, 11, 19, 36))

    local_shell = LocalShell()
    assert len(
        list_snapshots(local_shell, "data/src",
                       False)) == (1 if is_empty and not allow_empty else 2)
예제 #24
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        if (self.replication_process.properties and isinstance(
                exc_val, ExecException
        ) and exc_val.stdout.endswith(
                f"cannot mount '{self.replication_process.target_dataset}': mountpoint or dataset is busy\n"
        )):
            if self.replication_process.direction == ReplicationDirection.PUSH:
                dst_shell = self.replication_process.remote_shell
            else:
                dst_shell = self.replication_process.local_shell

            try:
                snapshots = list_snapshots(
                    dst_shell, self.replication_process.target_dataset, False)
            except Exception as e:
                logger.warning(
                    "Caught 'mountpoint or dataset is busy' and was not able to list snapshots on destination side: "
                    "%r. Assuming replication failure.", e)
                return

            snapshot = Snapshot(self.replication_process.target_dataset,
                                self.replication_process.snapshot)
            if snapshot not in snapshots:
                logger.warning(
                    "Caught 'mountpoint or dataset is busy' and %r does not exist on destination side. "
                    "Assuming replication failure.",
                    snapshot,
                )
                return

            # It's ok, snapshot was transferred successfully, just were not able to mount dataset on specified
            # mountpoint
            logger.info(
                "Caught 'mountpoint or dataset is busy' but %r is present on remote side. "
                "Assuming replication success.",
                snapshot,
            )
            return True
예제 #25
0
def test_pull_replication():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)

    definition = Definition.from_data(
        yaml.load(
            textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          - id: src
            direction: pull
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: true
            retention-policy: none
    """)))

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._run_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))

    assert len(list_snapshots(local_shell, "data/dst", False)) == 2
def test_parallel_replication():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)

    subprocess.check_call("zfs create data/src/a", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/a/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/a@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/src/b", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/b/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/b@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/a", shell=True)
    subprocess.check_call("zfs create data/dst/b", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src-a:
            dataset: data/src/a
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"
          src-b:
            dataset: data/src/b
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src-a:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/a
            target-dataset: data/dst/a
            recursive: true
            periodic-snapshot-tasks:
              - src-a
            auto: true
            retention-policy: none
            speed-limit: 100000
          src-b:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/b
            target-dataset: data/dst/b
            recursive: true
            periodic-snapshot-tasks:
              - src-b
            auto: true
            retention-policy: none
            speed-limit: 100000
    """))
    set_localhost_transport_options(definition["replication-tasks"]["src-a"]["transport"])
    set_localhost_transport_options(definition["replication-tasks"]["src-b"]["transport"])
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))

    start = time.monotonic()
    wait_replication_tasks_to_complete(zettarepl)
    end = time.monotonic()
    assert 10 <= end - start <= 15

    zettarepl._spawn_retention.assert_called_once()

    assert sum(1 for m in zettarepl.observer.call_args_list if isinstance(m[0][0], ReplicationTaskSuccess)) == 2

    assert len(list_snapshots(local_shell, "data/dst/a", False)) == 1
    assert len(list_snapshots(local_shell, "data/dst/b", False)) == 1

    subprocess.call("zfs destroy -r data/dst", shell=True)
    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/a", shell=True)
    subprocess.check_call("zfs create data/dst/b", shell=True)

    zettarepl._replication_tasks_can_run_in_parallel = Mock(return_value=False)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))

    start = time.monotonic()
    wait_replication_tasks_to_complete(zettarepl)
    end = time.monotonic()
    assert 20 <= end - start <= 25

    assert sum(1 for m in zettarepl.observer.call_args_list if isinstance(m[0][0], ReplicationTaskSuccess)) == 4

    assert len(list_snapshots(local_shell, "data/dst/a", False)) == 1
    assert len(list_snapshots(local_shell, "data/dst/b", False)) == 1
예제 #27
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        m = {}
        valid_errors = ("failed to create mountpoint.*",
                        "mountpoint or dataset is busy")
        valid_pylibzfs_errors = ("failed to create mountpoint.*", )
        if (isinstance(exc_val, ExecException) and (
                # Regular zfs CLI
            (
                re_search_to(
                    m,
                    f"cannot mount '(?P<dataset>.+)': (?P<error>({'|'.join(valid_errors)}))\n",
                    exc_val.stdout,
                ) and
                (m["dataset"] == self.replication_process.target_dataset or
                 (m["error"].startswith("failed to create mountpoint")
                  and m["dataset"].endswith(
                      f"/{self.replication_process.target_dataset}")))
                # py-libzfs
            ) or (re_search_to(
                m,
                f"(?P<error>({'|'.join(valid_pylibzfs_errors)}))\n",
                exc_val.stdout,
            ))) and (self.replication_process.properties if m["error"]
                     == "mountpoint or dataset is busy" else True)):
            if self.replication_process.direction == ReplicationDirection.PUSH:
                dst_shell = self.replication_process.remote_shell
            else:
                dst_shell = self.replication_process.local_shell

            try:
                snapshots = list_snapshots(
                    dst_shell, self.replication_process.target_dataset, False)
            except Exception as e:
                logger.warning(
                    "Caught %r and was not able to list snapshots on destination side: %r. Assuming replication "
                    "failure.", m["error"], e)
                return

            snapshot = Snapshot(self.replication_process.target_dataset,
                                self.replication_process.snapshot)
            if snapshot not in snapshots:
                logger.warning(
                    "Caught %r and %r does not exist on destination side. Assuming replication failure.",
                    m["error"],
                    snapshot,
                )
                return

            # It's ok, snapshot was transferred successfully, just were not able to mount dataset on specified
            # mountpoint
            logger.info(
                "Caught %r but %r is present on remote side. Assuming replication success.",
                m["error"],
                snapshot,
            )
            return True

        if (self.replication_process.incremental_base
                and isinstance(exc_val, ExecException)):
            match = None
            snapshot = None
            incremental_base = None

            # OpenZFS
            m = re.search(
                r"could not send (?P<snapshot>.+):\s*"
                r"incremental source \((?P<incremental_base>.+)\) is not earlier than it",
                exc_val.stdout)
            if m:
                match = m.group(0)
                snapshot = m.group("snapshot")
                incremental_base = m.group("incremental_base")

            # ZoL
            m = re.search(
                r"warning: cannot send (?P<snapshot>.+): not an earlier snapshot from the same fs",
                exc_val.stdout)
            if m:
                match = m.group(0)
                snapshot = m.group("snapshot").strip("'")
                incremental_base = self.replication_process.incremental_base

            if match is not None:
                text = textwrap.dedent(f"""\
                    Replication cannot continue because existing snapshot
                    {incremental_base} is newer than
                    {snapshot}, but has an older date
                    in the snapshot name. To resolve the error, rename
                    {snapshot} with a date that is older than
                    {incremental_base} or delete snapshot
                    {snapshot} from both the source and destination.
                """)
                exc_val.stdout = exc_val.stdout.replace(
                    match, match + f"\n{text.rstrip()}")
                return

        if (isinstance(exc_val, ExecException) and
            (re.search(r"cannot send .+:\s*signal received", exc_val.stdout) or
             "cannot receive new filesystem stream: checksum mismatch or incomplete stream"
             in exc_val.stdout)):
            raise RecoverableReplicationError(str(exc_val)) from None

        if (isinstance(exc_val, ExecException) and (
                # OpenZFS
                re.search(r"cannot send .+: snapshot .+ does not exist",
                          exc_val.stdout) or
                # ZoL
                re.search(r"cannot open '.+@.+': dataset does not exist",
                          exc_val.stdout))):
            raise RecoverableReplicationError(str(exc_val)) from None

        if (isinstance(
                exc_val, ExecException
        ) and "zfs receive -F cannot be used to destroy an encrypted filesystem"
                in exc_val.stdout.strip()):
            if self.replication_process.raw:
                raise ReplicationError(
                    f"Unable to send encrypted dataset {self.replication_process.source_dataset!r} to existing "
                    f"unencrypted or unrelated dataset {self.replication_process.target_dataset!r}"
                ) from None
            else:
                raise ReplicationError(
                    f"Unable to send dataset {self.replication_process.source_dataset!r} to existing unrelated "
                    f"encrypted dataset {self.replication_process.target_dataset!r}"
                ) from None

        if (isinstance(exc_val, ExecException)
                and re.search(r"cannot mount '.+': Insufficient privileges",
                              exc_val.stdout)):
            raise ReplicationError(
                f"{exc_val.stdout.rstrip('.')}. Please make sure replication user has write permissions to its "
                f"parent dataset") from None
예제 #28
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        m = {}
        valid_errors = ("failed to create mountpoint",
                        "mountpoint or dataset is busy")
        valid_pylibzfs_errors = ("failed to create mountpoint", )
        if (isinstance(exc_val, ExecException) and (
                # Regular zfs CLI
            (
                re_search_to(
                    m,
                    f"cannot mount '(?P<dataset>.+)': (?P<error>({'|'.join(valid_errors)}))\n",
                    exc_val.stdout,
                ) and
                (m["dataset"] == self.replication_process.target_dataset or
                 (m["error"] == "failed to create mountpoint" and m["dataset"].
                  endswith(f"/{self.replication_process.target_dataset}")))
                # py-libzfs
            ) or (re_search_to(
                m,
                f"(?P<error>({'|'.join(valid_pylibzfs_errors)}))\n",
                exc_val.stdout,
            ))) and (self.replication_process.properties if m["error"]
                     == "mountpoint or dataset is busy" else True)):
            if self.replication_process.direction == ReplicationDirection.PUSH:
                dst_shell = self.replication_process.remote_shell
            else:
                dst_shell = self.replication_process.local_shell

            try:
                snapshots = list_snapshots(
                    dst_shell, self.replication_process.target_dataset, False)
            except Exception as e:
                logger.warning(
                    "Caught %r and was not able to list snapshots on destination side: %r. Assuming replication "
                    "failure.", m["error"], e)
                return

            snapshot = Snapshot(self.replication_process.target_dataset,
                                self.replication_process.snapshot)
            if snapshot not in snapshots:
                logger.warning(
                    "Caught %r and %r does not exist on destination side. Assuming replication failure.",
                    m["error"],
                    snapshot,
                )
                return

            # It's ok, snapshot was transferred successfully, just were not able to mount dataset on specified
            # mountpoint
            logger.info(
                "Caught %r but %r is present on remote side. Assuming replication success.",
                m["error"],
                snapshot,
            )
            return True

        if (self.replication_process.incremental_base
                and isinstance(exc_val, ExecException)):
            m = re.search(
                r"could not send (?P<snapshot>.+):\s*"
                r"incremental source \((?P<incremental_base>.+)\) is not earlier than it",
                exc_val.stdout)
            if m:
                text = textwrap.dedent(f"""\
                    Replication cannot continue because existing snapshot
                    {m.group('incremental_base')} is newer than
                    {m.group('snapshot')}, but has an older date
                    in the snapshot name. To resolve the error, rename
                    {m.group('snapshot')} with a date that is older than
                    {m.group('incremental_base')} or delete snapshot
                    {m.group('snapshot')} from both the source and destination.
                """)
                exc_val.stdout = exc_val.stdout.replace(
                    m.group(0),
                    m.group(0) + f"\n{text.rstrip()}")
                return

        if (isinstance(exc_val, ExecException) and
            (re.search(r"cannot send .+:\s*signal received", exc_val.stdout) or
             "cannot receive new filesystem stream: checksum mismatch or incomplete stream"
             in exc_val.stdout)):
            raise RecoverableReplicationError(str(exc_val)) from None
예제 #29
0
def test_push_replication(dst_parent_is_readonly, dst_exists, transport,
                          properties, compression):
    if transport["type"] != "ssh" and compression:
        return

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst_parent", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs set test:property=test-value data/src",
                          shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("zfs create data/dst_parent", shell=True)
    if dst_exists:
        subprocess.check_call("zfs create data/dst_parent/dst", shell=True)
    if dst_parent_is_readonly:
        subprocess.check_call("zfs set readonly=on data/dst_parent",
                              shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst_parent/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    definition["replication-tasks"]["src"]["properties"] = properties
    if compression:
        definition["replication-tasks"]["src"]["compression"] = compression
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    observer = Mock()
    zettarepl.set_observer(observer)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    error = observer.call_args_list[-1][0][0]
    assert isinstance(error, ReplicationTaskSuccess), error

    assert len(list_snapshots(local_shell, "data/dst_parent/dst", False)) == 2

    assert (("test-value" in subprocess.check_output(
        "zfs get test:property data/dst_parent/dst",
        shell=True,
        encoding="utf-8")) == properties)

    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)

    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    error = observer.call_args_list[-1][0][0]
    assert isinstance(error, ReplicationTaskSuccess), error

    assert len(list_snapshots(local_shell, "data/dst_parent/dst", False)) == 3
예제 #30
0
def test_replication_retry(caplog, direction):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            transport:
              type: ssh
              hostname: 127.0.0.1
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            auto: false
            retention-policy: none
            speed-limit: 200000
            retries: 2
    """))
    definition["replication-tasks"]["src"]["direction"] = direction
    if direction == "push":
        definition["replication-tasks"]["src"]["periodic-snapshot-tasks"] = [
            "src"
        ]
    else:
        definition["replication-tasks"]["src"]["naming-schema"] = [
            "%Y-%m-%d_%H-%M"
        ]
    set_localhost_transport_options(
        definition["replication-tasks"]["src"]["transport"])
    definition = Definition.from_data(definition)

    caplog.set_level(logging.INFO)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))

    time.sleep(2)
    if direction == "push":
        subprocess.check_output("kill $(pgrep -f '^zfs recv')", shell=True)
    else:
        subprocess.check_output("kill $(pgrep -f '^(zfs send|zfs: sending)')",
                                shell=True)

    wait_replication_tasks_to_complete(zettarepl)

    assert any(" recoverable replication error" in record.message
               for record in caplog.get_records("call"))
    assert any("Resuming replication for destination dataset" in record.message
               for record in caplog.get_records("call"))

    success = zettarepl.observer.call_args_list[-1][0][0]
    assert isinstance(success, ReplicationTaskSuccess), success

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst", False)) == 1