コード例 #1
0
def test_push_replication(transport, properties):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs set test:property=test-value data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    definition["replication-tasks"]["src"]["properties"] = properties
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert len(list_snapshots(local_shell, "data/dst", False)) == 2

    assert (
        ("test-value" in subprocess.check_output("zfs get test:property data/dst", shell=True, encoding="utf-8")) ==
        properties
    )

    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)

    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert len(list_snapshots(local_shell, "data/dst", False)) == 3
コード例 #2
0
def test_push_replication():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/child", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """))

    definition = Definition.from_data(definition)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert sum(1 for m in zettarepl.observer.call_args_list
               if isinstance(m[0][0], ReplicationTaskSuccess)) == 1

    subprocess.check_call("zfs destroy -r data/src/child", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert sum(1 for m in zettarepl.observer.call_args_list
               if isinstance(m[0][0], ReplicationTaskSuccess)) == 2

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst/child", False)) == 1
コード例 #3
0
def test_push_replication():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)

    definition = Definition.from_data(yaml.load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """)))

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert len(list_snapshots(local_shell, "data/dst", False)) == 2

    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)

    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert len(list_snapshots(local_shell, "data/dst", False)) == 3
コード例 #4
0
ファイル: zettarepl.py プロジェクト: b-a-t/zettarepl
    def _run_remote_retention(self, now: datetime):
        push_replication_tasks = list(
            filter(self._is_push_replication_task,
                   select_by_class(ReplicationTask, self.tasks)))
        local_snapshots_grouped = group_snapshots_by_datasets(
            multilist_snapshots(
                self.local_shell,
                replication_tasks_source_datasets_queries(
                    push_replication_tasks)))
        for transport, replication_tasks in self._transport_for_replication_tasks(
                push_replication_tasks):
            shell = self._get_shell(transport)
            remote_snapshots = multilist_snapshots(
                shell,
                [(replication_task.target_dataset, replication_task.recursive)
                 for replication_task in replication_tasks])
            remote_snapshots_grouped = group_snapshots_by_datasets(
                remote_snapshots)
            owners = [
                ExecutedReplicationTaskSnapshotOwner(now, replication_task,
                                                     local_snapshots_grouped,
                                                     remote_snapshots_grouped)
                for replication_task in replication_tasks
            ]

            snapshots_to_destroy = calculate_snapshots_to_remove(
                owners, remote_snapshots)
            logger.info("Retention on transport %r destroying snapshots: %r",
                        transport, snapshots_to_destroy)
            destroy_snapshots(shell, snapshots_to_destroy)
コード例 #5
0
def test_replication_resume(caplog):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call(
        "dd if=/dev/zero of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call(
        "(zfs send data/src@2018-10-01_01-00 | throttle -b 102400 | zfs recv -s -F data/dst) & "
        "sleep 1; killall zfs",
        shell=True)

    assert "receive_resume_token\t1-" in subprocess.check_output(
        "zfs get -H receive_resume_token data/dst",
        shell=True,
        encoding="utf-8")

    definition = Definition.from_data(
        yaml.load(
            textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          - id: src
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          - id: src
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """)))

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._run_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))

    assert any("Resuming replication for dst_dataset" in record.message
               for record in caplog.get_records("call"))

    assert len(list_snapshots(local_shell, "data/dst", False)) == 1
コード例 #6
0
ファイル: test.py プロジェクト: NHGmaniac/zettarepl
def run_periodic_snapshot_test(definition, now, success=True):
    definition = Definition.from_data(definition)
    zettarepl = create_zettarepl(definition)
    zettarepl._run_periodic_snapshot_tasks(now, select_by_class(PeriodicSnapshotTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    if success:
        for call in zettarepl.observer.call_args_list:
            call = call[0][0]
            assert not isinstance(call, PeriodicSnapshotTaskError), success
コード例 #7
0
def test_replication_data_progress():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)

    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: ssh
              hostname: 127.0.0.1
            source-dataset:
            - data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
            - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    set_localhost_transport_options(
        definition["replication-tasks"]["src"]["transport"])
    definition["replication-tasks"]["src"]["speed-limit"] = 10240 * 9

    with patch("zettarepl.replication.run.DatasetSizeObserver.INTERVAL", 5):
        definition = Definition.from_data(definition)
        zettarepl = create_zettarepl(definition)
        zettarepl._spawn_replication_tasks(
            select_by_class(ReplicationTask, definition.tasks))
        wait_replication_tasks_to_complete(zettarepl)

    calls = [
        call for call in zettarepl.observer.call_args_list
        if call[0][0].__class__ == ReplicationTaskDataProgress
    ]

    assert len(calls) == 2

    assert 1024 * 1024 * 0.8 <= calls[0][0][0].src_size <= 1024 * 1024 * 1.2
    assert 0 <= calls[0][0][0].dst_size <= 10240 * 1.2

    assert 1024 * 1024 * 0.8 <= calls[1][0][0].src_size <= 1024 * 1024 * 1.2
    assert 10240 * 6 * 0.8 <= calls[1][0][0].dst_size <= 10240 * 6 * 1.2
コード例 #8
0
ファイル: test.py プロジェクト: NHGmaniac/zettarepl
def run_replication_test(definition, success=True):
    definition = Definition.from_data(definition)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    if success:
        success = zettarepl.observer.call_args_list[-1][0][0]
        assert isinstance(success, ReplicationTaskSuccess), success
    else:
        error = zettarepl.observer.call_args_list[-1][0][0]
        assert isinstance(error, ReplicationTaskError), error
        return error
コード例 #9
0
    def _run_remote_retention(self, now: datetime):
        push_replication_tasks = list(
            filter(self._is_push_replication_task,
                   select_by_class(ReplicationTask, self.tasks)))
        local_snapshots_grouped = group_snapshots_by_datasets(
            multilist_snapshots(
                self.local_shell,
                replication_tasks_source_datasets_queries(
                    push_replication_tasks)))
        for transport, replication_tasks in self._transport_for_replication_tasks(
                push_replication_tasks):
            shell = self._get_retention_shell(transport)
            remote_snapshots_queries = [
                (replication_task.target_dataset, replication_task.recursive)
                for replication_task in replication_tasks
            ]
            try:
                # Prevent hanging remote from breaking all the replications
                with ShellTimeoutContext(3600):
                    remote_snapshots = multilist_snapshots(
                        shell, remote_snapshots_queries)
            except Exception as e:
                logger.warning(
                    "Remote retention failed on %r: error listing snapshots: %r",
                    transport, e)
                continue
            remote_snapshots_grouped = group_snapshots_by_datasets(
                remote_snapshots)
            owners = [
                ExecutedReplicationTaskSnapshotOwner(now, replication_task,
                                                     local_snapshots_grouped,
                                                     remote_snapshots_grouped)
                for replication_task in replication_tasks
            ]

            snapshots_to_destroy = calculate_snapshots_to_remove(
                owners, remote_snapshots)
            logger.info("Retention on %r destroying snapshots: %r", transport,
                        snapshots_to_destroy)
            try:
                # Prevent hanging remote from breaking all the replications
                with ShellTimeoutContext(3600):
                    destroy_snapshots(shell, snapshots_to_destroy)
            except Exception as e:
                logger.warning(
                    "Remote retention failed on %r: error destroying snapshots: %r",
                    transport, e)
                continue
コード例 #10
0
def test_zvol_replication(as_root):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    if as_root:
        subprocess.check_call("zfs create -V 1M data/src", shell=True)
    else:
        subprocess.check_call("zfs create data/src", shell=True)
        subprocess.check_call("zfs create -V 1M data/src/zvol", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert len(list_snapshots(local_shell, "data/dst", False)) == 2
    if not as_root:
        assert len(list_snapshots(local_shell, "data/dst/zvol", False)) == 2
コード例 #11
0
def test_pull_replication():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)

    definition = Definition.from_data(
        yaml.load(
            textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          - id: src
            direction: pull
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: true
            retention-policy: none
    """)))

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._run_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))

    assert len(list_snapshots(local_shell, "data/dst", False)) == 2
コード例 #12
0
    def _run_local_retention(self, now: datetime):
        periodic_snapshot_tasks = select_by_class(PeriodicSnapshotTask,
                                                  self.tasks)
        replication_tasks = select_by_class(ReplicationTask, self.tasks)

        push_replication_tasks_that_can_hold = [
            replication_task for replication_task in replication_tasks
            if replication_task.hold_pending_snapshots
        ]
        pull_replications_tasks = list(
            filter(self._is_pull_replication_task, replication_tasks))

        local_snapshots_queries = []
        local_snapshots_queries.extend([
            (periodic_snapshot_task.dataset, periodic_snapshot_task.recursive)
            for periodic_snapshot_task in periodic_snapshot_tasks
        ])
        local_snapshots_queries.extend(
            replication_tasks_source_datasets_queries(
                push_replication_tasks_that_can_hold))
        local_snapshots_queries.extend([
            (replication_task.target_dataset, replication_task.recursive)
            for replication_task in pull_replications_tasks
        ])
        local_snapshots = multilist_snapshots(self.local_shell,
                                              local_snapshots_queries)
        local_snapshots_grouped = group_snapshots_by_datasets(local_snapshots)

        owners = []
        owners.extend([
            PeriodicSnapshotTaskSnapshotOwner(now, periodic_snapshot_task)
            for periodic_snapshot_task in periodic_snapshot_tasks
        ])

        # These are always only PUSH replication tasks
        for transport, replication_tasks in self._transport_for_replication_tasks(
                push_replication_tasks_that_can_hold):
            shell = self._get_retention_shell(transport)
            owners.extend(
                pending_push_replication_task_snapshot_owners(
                    local_snapshots_grouped, shell, replication_tasks))

        for transport, replication_tasks in self._transport_for_replication_tasks(
                pull_replications_tasks):
            shell = self._get_retention_shell(transport)
            remote_snapshots_queries = replication_tasks_source_datasets_queries(
                replication_tasks)
            try:
                remote_snapshots = multilist_snapshots(
                    shell, remote_snapshots_queries)
            except Exception as e:
                logger.warning(
                    "Local retention failed: error listing snapshots on %r: %r",
                    transport, e)
                return
            remote_snapshots_grouped = group_snapshots_by_datasets(
                remote_snapshots)
            owners.extend([
                executed_pull_replication_task_snapshot_owner(
                    now, replication_task, remote_snapshots_grouped,
                    local_snapshots_grouped)
                for replication_task in replication_tasks
            ])

        snapshots_to_destroy = calculate_snapshots_to_remove(
            owners, local_snapshots)
        logger.info("Retention destroying local snapshots: %r",
                    snapshots_to_destroy)
        destroy_snapshots(self.local_shell, snapshots_to_destroy)
コード例 #13
0
def test_push_replication(dst_parent_is_readonly, dst_exists, transport,
                          properties, compression):
    if transport["type"] != "ssh" and compression:
        return

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst_parent", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs set test:property=test-value data/src",
                          shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    subprocess.check_call("zfs create data/dst_parent", shell=True)
    if dst_exists:
        subprocess.check_call("zfs create data/dst_parent/dst", shell=True)
    if dst_parent_is_readonly:
        subprocess.check_call("zfs set readonly=on data/dst_parent",
                              shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst_parent/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    definition["replication-tasks"]["src"]["properties"] = properties
    if compression:
        definition["replication-tasks"]["src"]["compression"] = compression
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    observer = Mock()
    zettarepl.set_observer(observer)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    error = observer.call_args_list[-1][0][0]
    assert isinstance(error, ReplicationTaskSuccess), error

    assert len(list_snapshots(local_shell, "data/dst_parent/dst", False)) == 2

    assert (("test-value" in subprocess.check_output(
        "zfs get test:property data/dst_parent/dst",
        shell=True,
        encoding="utf-8")) == properties)

    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)

    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    error = observer.call_args_list[-1][0][0]
    assert isinstance(error, ReplicationTaskSuccess), error

    assert len(list_snapshots(local_shell, "data/dst_parent/dst", False)) == 3
コード例 #14
0
def test_preserves_clone_origin():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/iocage", shell=True)
    subprocess.check_call("zfs create data/src/iocage/child", shell=True)
    subprocess.check_call("zfs create data/src/iocage/child/dataset",
                          shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/iocage/child/dataset/blob bs=1M count=1",
        shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2019-11-08_14-00",
                          shell=True)
    subprocess.check_call("zfs create data/src/iocage/another", shell=True)
    subprocess.check_call("zfs create data/src/iocage/another/child",
                          shell=True)
    subprocess.check_call(
        "zfs clone data/src/iocage/child/dataset@2019-11-08_14-00 "
        "data/src/iocage/another/child/clone",
        shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2019-11-08_15-00",
                          shell=True)

    assert (subprocess.check_output(
        "zfs get -H origin data/src/iocage/another/child/clone",
        encoding="utf-8",
        shell=True).split("\n")[0].split("\t")[2] ==
            "data/src/iocage/child/dataset@2019-11-08_14-00")
    assert int(
        subprocess.check_output(
            "zfs get -H -p used data/src/iocage/another/child/clone",
            encoding="utf-8",
            shell=True).split("\n")[0].split("\t")[2]) < 2e6

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            properties: true
            replicate: true
            also-include-naming-schema:
              - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert (subprocess.check_output(
        "zfs get -H origin data/dst/iocage/another/child/clone",
        encoding="utf-8",
        shell=True).split("\n")[0].split("\t")[2] ==
            "data/dst/iocage/child/dataset@2019-11-08_14-00")
    assert int(
        subprocess.check_output(
            "zfs get -H -p used data/dst/iocage/another/child/clone",
            encoding="utf-8",
            shell=True).split("\n")[0].split("\t")[2]) < 2e6
コード例 #15
0
def test_replication_retry(caplog, direction):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            transport:
              type: ssh
              hostname: 127.0.0.1
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            auto: false
            retention-policy: none
            speed-limit: 200000
            retries: 2
    """))
    definition["replication-tasks"]["src"]["direction"] = direction
    if direction == "push":
        definition["replication-tasks"]["src"]["periodic-snapshot-tasks"] = [
            "src"
        ]
    else:
        definition["replication-tasks"]["src"]["naming-schema"] = [
            "%Y-%m-%d_%H-%M"
        ]
    set_localhost_transport_options(
        definition["replication-tasks"]["src"]["transport"])
    definition = Definition.from_data(definition)

    caplog.set_level(logging.INFO)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))

    time.sleep(2)
    if direction == "push":
        subprocess.check_output("kill $(pgrep -f '^zfs recv')", shell=True)
    else:
        subprocess.check_output("kill $(pgrep -f '^(zfs send|zfs: sending)')",
                                shell=True)

    wait_replication_tasks_to_complete(zettarepl)

    assert any(" recoverable replication error" in record.message
               for record in caplog.get_records("call"))
    assert any("Resuming replication for destination dataset" in record.message
               for record in caplog.get_records("call"))

    success = zettarepl.observer.call_args_list[-1][0][0]
    assert isinstance(success, ReplicationTaskSuccess), success

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst", False)) == 1
コード例 #16
0
def test_push_replication(dst_parent_is_readonly, dst_exists, transport, replicate, properties, encrypted,
                          has_encrypted_child, dst_parent_encrypted):
    if replicate and not properties:
        return
    if encrypted and has_encrypted_child:
        # If parent is encrypted, child is also encrypted
        return

    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst_parent", shell=True)

    create_dataset("data/src", encrypted)
    subprocess.check_call("zfs set test:property=test-value data/src", shell=True)
    if has_encrypted_child:
        create_dataset("data/src/child", True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00", shell=True)

    create_dataset("data/dst_parent", dst_parent_encrypted)
    if dst_exists:
        subprocess.check_call("zfs create data/dst_parent/dst", shell=True)
    if dst_parent_is_readonly:
        subprocess.check_call("zfs set readonly=on data/dst_parent", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst_parent/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    definition["replication-tasks"]["src"]["replicate"] = replicate
    definition["replication-tasks"]["src"]["properties"] = properties
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    observer = Mock()
    zettarepl.set_observer(observer)
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    if dst_exists and properties and encrypted and not dst_parent_encrypted:
        error = observer.call_args_list[-1][0][0]
        assert isinstance(error, ReplicationTaskError), error
        assert error.error == ("Unable to send encrypted dataset 'data/src' to existing unencrypted or unrelated "
                               "dataset 'data/dst_parent/dst'")
        return

    error = observer.call_args_list[-1][0][0]
    assert isinstance(error, ReplicationTaskSuccess), error

    assert len(list_snapshots(local_shell, "data/dst_parent/dst", False)) == 2

    assert (
        ("test-value" in subprocess.check_output("zfs get test:property data/dst_parent/dst",
                                                 shell=True, encoding="utf-8")) ==
        properties
    )

    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_03-00", shell=True)

    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    error = observer.call_args_list[-1][0][0]
    assert isinstance(error, ReplicationTaskSuccess), error

    assert len(list_snapshots(local_shell, "data/dst_parent/dst", False)) == 3
コード例 #17
0
def test_parallel_replication_3(max_parallel_replications):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)

    subprocess.check_call("zfs create data/src/a", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/a/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/a@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/src/b", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/b/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/b@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/src/c", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/c/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/c@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/a", shell=True)
    subprocess.check_call("zfs create data/dst/b", shell=True)
    subprocess.check_call("zfs create data/dst/c", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src-a:
            dataset: data/src/a
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"
          src-b:
            dataset: data/src/b
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"
          src-c:
            dataset: data/src/c
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src-a:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/a
            target-dataset: data/dst/a
            recursive: true
            periodic-snapshot-tasks:
              - src-a
            auto: true
            retention-policy: none
            speed-limit: 100000
          src-b:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/b
            target-dataset: data/dst/b
            recursive: true
            periodic-snapshot-tasks:
              - src-b
            auto: true
            retention-policy: none
            speed-limit: 100000
          src-c:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/c
            target-dataset: data/dst/c
            recursive: true
            periodic-snapshot-tasks:
              - src-c
            auto: true
            retention-policy: none
            speed-limit: 100000
    """))
    definition["max-parallel-replication-tasks"] = max_parallel_replications
    set_localhost_transport_options(definition["replication-tasks"]["src-a"]["transport"])
    set_localhost_transport_options(definition["replication-tasks"]["src-b"]["transport"])
    set_localhost_transport_options(definition["replication-tasks"]["src-c"]["transport"])
    definition = Definition.from_data(definition)

    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))

    start = time.monotonic()
    wait_replication_tasks_to_complete(zettarepl)
    end = time.monotonic()

    if max_parallel_replications == 3:
        assert 10 <= end - start <= 15
    else:
        assert 20 <= end - start <= 25
コード例 #18
0
def test_replication_progress(transport):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)

    subprocess.check_call("zfs create data/src/src1", shell=True)
    subprocess.check_call("zfs snapshot data/src/src1@2018-10-01_01-00",
                          shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/src1/blob bs=1M count=1",
        shell=True)
    subprocess.check_call("zfs snapshot data/src/src1@2018-10-01_02-00",
                          shell=True)
    subprocess.check_call("rm /mnt/data/src/src1/blob", shell=True)
    subprocess.check_call("zfs snapshot data/src/src1@2018-10-01_03-00",
                          shell=True)

    subprocess.check_call("zfs create data/src/src2", shell=True)
    subprocess.check_call("zfs snapshot data/src/src2@2018-10-01_01-00",
                          shell=True)
    subprocess.check_call("zfs snapshot data/src/src2@2018-10-01_02-00",
                          shell=True)
    subprocess.check_call("zfs snapshot data/src/src2@2018-10-01_03-00",
                          shell=True)
    subprocess.check_call("zfs snapshot data/src/src2@2018-10-01_04-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            source-dataset:
            - data/src/src1
            - data/src/src2
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
            - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    if transport["type"] == "ssh":
        definition["replication-tasks"]["src"]["speed-limit"] = 10240 * 9

    definition = Definition.from_data(definition)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    calls = [
        call for call in zettarepl.observer.call_args_list
        if call[0][0].__class__ != ReplicationTaskDataProgress
    ]

    result = [
        ReplicationTaskStart("src"),
        ReplicationTaskSnapshotStart("src", "data/src/src1",
                                     "2018-10-01_01-00", 0, 3),
        ReplicationTaskSnapshotSuccess("src", "data/src/src1",
                                       "2018-10-01_01-00", 1, 3),
        ReplicationTaskSnapshotStart("src", "data/src/src1",
                                     "2018-10-01_02-00", 1, 3),
        ReplicationTaskSnapshotSuccess("src", "data/src/src1",
                                       "2018-10-01_02-00", 2, 3),
        ReplicationTaskSnapshotStart("src", "data/src/src1",
                                     "2018-10-01_03-00", 2, 3),
        ReplicationTaskSnapshotSuccess("src", "data/src/src1",
                                       "2018-10-01_03-00", 3, 3),
        ReplicationTaskSnapshotStart("src", "data/src/src2",
                                     "2018-10-01_01-00", 3, 7),
        ReplicationTaskSnapshotSuccess("src", "data/src/src2",
                                       "2018-10-01_01-00", 4, 7),
        ReplicationTaskSnapshotStart("src", "data/src/src2",
                                     "2018-10-01_02-00", 4, 7),
        ReplicationTaskSnapshotSuccess("src", "data/src/src2",
                                       "2018-10-01_02-00", 5, 7),
        ReplicationTaskSnapshotStart("src", "data/src/src2",
                                     "2018-10-01_03-00", 5, 7),
        ReplicationTaskSnapshotSuccess("src", "data/src/src2",
                                       "2018-10-01_03-00", 6, 7),
        ReplicationTaskSnapshotStart("src", "data/src/src2",
                                     "2018-10-01_04-00", 6, 7),
        ReplicationTaskSnapshotSuccess("src", "data/src/src2",
                                       "2018-10-01_04-00", 7, 7),
        ReplicationTaskSuccess("src"),
    ]

    if transport["type"] == "ssh":
        result.insert(
            4,
            ReplicationTaskSnapshotProgress(
                "src",
                "data/src/src1",
                "2018-10-01_02-00",
                1,
                3,
                10240 * 9 * 10,  # We poll for progress every 10 seconds so
                # we would have transferred 10x speed limit
                2162784  # Empirical value
            ))

    for i, message in enumerate(result):
        call = calls[i]

        assert call[0][0].__class__ == message.__class__, calls

        d1 = call[0][0].__dict__
        d2 = message.__dict__

        if isinstance(message, ReplicationTaskSnapshotProgress):
            bytes_sent_1 = d1.pop("bytes_sent")
            bytes_total_1 = d1.pop("bytes_total")
            bytes_sent_2 = d2.pop("bytes_sent")
            bytes_total_2 = d2.pop("bytes_total")

            assert 0.8 <= bytes_sent_1 / bytes_sent_2 <= 1.2
            assert 0.8 <= bytes_total_1 / bytes_total_2 <= 1.2

        assert d1 == d2
コード例 #19
0
def test_parallel_replication():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)

    subprocess.check_call("zfs create data/src/a", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/a/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/a@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/src/b", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/b/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/b@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/a", shell=True)
    subprocess.check_call("zfs create data/dst/b", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src-a:
            dataset: data/src/a
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"
          src-b:
            dataset: data/src/b
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src-a:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/a
            target-dataset: data/dst/a
            recursive: true
            periodic-snapshot-tasks:
              - src-a
            auto: true
            retention-policy: none
            speed-limit: 100000
          src-b:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/b
            target-dataset: data/dst/b
            recursive: true
            periodic-snapshot-tasks:
              - src-b
            auto: true
            retention-policy: none
            speed-limit: 100000
    """))
    set_localhost_transport_options(definition["replication-tasks"]["src-a"]["transport"])
    set_localhost_transport_options(definition["replication-tasks"]["src-b"]["transport"])
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))

    start = time.monotonic()
    wait_replication_tasks_to_complete(zettarepl)
    end = time.monotonic()
    assert 10 <= end - start <= 15

    zettarepl._spawn_retention.assert_called_once()

    assert sum(1 for m in zettarepl.observer.call_args_list if isinstance(m[0][0], ReplicationTaskSuccess)) == 2

    assert len(list_snapshots(local_shell, "data/dst/a", False)) == 1
    assert len(list_snapshots(local_shell, "data/dst/b", False)) == 1

    subprocess.call("zfs destroy -r data/dst", shell=True)
    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/a", shell=True)
    subprocess.check_call("zfs create data/dst/b", shell=True)

    zettarepl._replication_tasks_can_run_in_parallel = Mock(return_value=False)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))

    start = time.monotonic()
    wait_replication_tasks_to_complete(zettarepl)
    end = time.monotonic()
    assert 20 <= end - start <= 25

    assert sum(1 for m in zettarepl.observer.call_args_list if isinstance(m[0][0], ReplicationTaskSuccess)) == 4

    assert len(list_snapshots(local_shell, "data/dst/a", False)) == 1
    assert len(list_snapshots(local_shell, "data/dst/b", False)) == 1
コード例 #20
0
def test_snapshot_gone(transport):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
            retries: 2
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    observer = Mock()
    zettarepl.set_observer(observer)
    zettarepl.set_tasks(definition.tasks)

    deleted = False

    def resume_replications_mock(*args, **kwargs):
        nonlocal deleted
        if not deleted:
            # Snapshots are already listed, and now we remove one of them to simulate PULL replication
            # from remote system that has `allow_empty_snapshots: false`. Only do this once.
            subprocess.check_call("zfs destroy data/src@2018-10-01_01-00",
                                  shell=True)
            deleted = True

        return resume_replications(*args, **kwargs)

    with patch("zettarepl.replication.run.resume_replications",
               resume_replications_mock):
        zettarepl._spawn_replication_tasks(
            select_by_class(ReplicationTask, definition.tasks))
        wait_replication_tasks_to_complete(zettarepl)

    error = observer.call_args_list[-1][0][0]
    assert isinstance(error, ReplicationTaskSuccess), error

    assert len(list_snapshots(local_shell, "data/dst", False)) == 1
コード例 #21
0
def test_replication_progress_resume():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_04-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call(
        "zfs send data/src@2018-10-01_01-00 | zfs recv -s -F data/dst",
        shell=True)
    subprocess.check_call(
        "(zfs send -i data/src@2018-10-01_01-00 data/src@2018-10-01_02-00 | "
        " throttle -b 102400 | zfs recv -s -F data/dst) & "
        "sleep 1; killall zfs",
        shell=True)

    assert "receive_resume_token\t1-" in subprocess.check_output(
        "zfs get -H receive_resume_token data/dst",
        shell=True,
        encoding="utf-8")

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
            - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))

    definition = Definition.from_data(definition)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    calls = [
        call for call in zettarepl.observer.call_args_list
        if call[0][0].__class__ != ReplicationTaskDataProgress
    ]

    result = [
        ReplicationTaskStart("src"),
        ReplicationTaskSnapshotStart("src", "data/src", "2018-10-01_02-00", 0,
                                     3),
        ReplicationTaskSnapshotSuccess("src", "data/src", "2018-10-01_02-00",
                                       1, 3),
        ReplicationTaskSnapshotStart("src", "data/src", "2018-10-01_03-00", 1,
                                     3),
        ReplicationTaskSnapshotSuccess("src", "data/src", "2018-10-01_03-00",
                                       2, 3),
        ReplicationTaskSnapshotStart("src", "data/src", "2018-10-01_04-00", 2,
                                     3),
        ReplicationTaskSnapshotSuccess("src", "data/src", "2018-10-01_04-00",
                                       3, 3),
        ReplicationTaskSuccess("src"),
    ]

    for i, message in enumerate(result):
        call = calls[i]

        assert call[0][0].__class__ == message.__class__

        d1 = call[0][0].__dict__
        d2 = message.__dict__

        assert d1 == d2
コード例 #22
0
def test_parallel_replication():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)

    subprocess.check_call("zfs create data/src/a", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/a/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/a@2018-10-01_01-00",
                          shell=True)

    subprocess.check_call("zfs create data/src/b", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/b/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/b@2018-10-01_01-00",
                          shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/a", shell=True)
    subprocess.check_call("zfs create data/dst/b", shell=True)

    definition = Definition.from_data(
        yaml.load(
            textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src-a:
            dataset: data/src/a
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"
          src-b:
            dataset: data/src/b
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src-a:
            direction: push
            transport:
              type: ssh
              hostname: localhost
              private-key: |
                -----BEGIN RSA PRIVATE KEY-----
                MIIEowIBAAKCAQEA6+D7AWdNnM8T5f2P1j5VIwVABjugtL252iEhhGWTNaR2duCK
                kuZmG3+o55b0vo2mUpjWt+CKsLkVL/e/JgZqzlHYm+MhRs4q7zODo/IEtx/uAVKM
                zqWS0Zs9NRdU1UnhsETjrhhQhNFwx7MUYlB2s8+mAduLbeqRuVIKsXzg5Rz+m3VL
                Wl4R82A10oZl0UPyIcEHAtHMgMVyGzQcNUsxsp/oN40nnkXiXHaXtSMxjEtOPLno
                t21bJ0ZV8RFmXtJqHXgyTTM5maJM3JwqhMHD2tcHodqCcnxvuuWv31pAB+HKQ8IM
                dORYnhZqqs/Bt80gRLQuJBpNeX2/cKPDDMCRnQIDAQABAoIBAQCil6+N9R5rw9Ys
                iA85GDhpbnoGkd2iGNHeiU3oTHgf1uEN6pO61PR3ahUMpmLIYy3N66q+jxoq3Tm8
                meL6HBxNYd+U/Qh4HS89OV45iV80t97ArJ2A6GL+9ypGyXFhoI7giWwEGqCOHSzH
                iyq25k4cfjspNqOyval7fBEA7Vq8smAMDJQE7WIJWzqrTbVAmVf9ho4r5dYxYBNW
                fXWo84DU8K+p0mE0BTokqqMWhKiA5JJG7OZB/iyeW2BWFOdASXvQmh1hRwMzpU4q
                BcZ7cJHz248SNSGMe5R3w7SmLO7PRr1/QkktJNdFmT7o/RGmQh8+KHql6r/vIzMM
                ci60OAxlAoGBAPYsZJZF3HK70fK3kARSzOD1LEVBDTCLnpVVzMSp6thG8cQqfCI5
                pCfT/NcUsCAP6J+yl6dqdtonXISmGolI1s1KCBihs5D4jEdjbg9KbKh68AsHXaD3
                v5L3POJ9hQnI6zJdvCfxniHdUArfyYhqsp1bnCn+85g4ed7BzDqMX2IDAoGBAPVL
                Y45rALw7lsjxJndyFdffJtyAeuwxgJNwWGuY21xhwqPbuwsgLHsGerHNKB5QAJT8
                JOlrcrfC13s6Tt4wmIy/o2h1p9tMaitmVR6pJzEfHyJhSRTbeFybQ9yqlKHuk2tI
                jcUZV/59cyRrjhPKWoVym3Fh/P7D1t1kfdTvBrvfAoGAUH0rVkb5UTo/5xBFsmQw
                QM1o8CvY2CqOa11mWlcERjrMCcuqUrZuCeeyH9DP1WveL3kBROf2fFWqVmTJAGIk
                eXLfOs6EG75of17vOWioJl4r5i8+WccniDH2YkeQHCbpX8puHtFNVt05spSBHG1m
                gTTW1pRZqUet8TuEPxBuj2kCgYAVjCrRruqgnmdvfWeQpI/wp6SlSBAEQZD24q6R
                vRq/8cKEXGAA6TGfGQGcLtZwWzzB2ahwbMTmCZKeO5AECqbL7mWvXm6BYCQPbeza
                Raews/grL/qYf3MCR41djAqEcw22Jeh2QPSu4VxE/cG8UVFEWb335tCvnIp6ZkJ7
                ewfPZwKBgEnc8HH1aq8IJ6vRBePNu6M9ON6PB9qW+ZHHcy47bcGogvYRQk1Ng77G
                LdZpyjWzzmb0Z4kjEYcrlGdbNQf9iaT0r+SJPzwBDG15+fRqK7EJI00UhjB0T67M
                otrkElxOBGqHSOl0jfUBrpSkSHiy0kDc3/cTAWKn0gowaznSlR9N
                -----END RSA PRIVATE KEY-----
              host-key: "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKwIWcodi3rHl0zS3VaddXnwfgIcpp1ECR9KhaB1cyIspgcHOA98wxY8onw+zHxpMUB4pve+t416FFLSkmlJ2f4="
            source-dataset: data/src/a
            target-dataset: data/dst/a
            recursive: true
            periodic-snapshot-tasks:
              - src-a
            auto: true
            retention-policy: none
            speed-limit: 100000
          src-b:
            direction: push
            transport:
              type: ssh
              hostname: localhost
              private-key: |
                -----BEGIN RSA PRIVATE KEY-----
                MIIEowIBAAKCAQEA6+D7AWdNnM8T5f2P1j5VIwVABjugtL252iEhhGWTNaR2duCK
                kuZmG3+o55b0vo2mUpjWt+CKsLkVL/e/JgZqzlHYm+MhRs4q7zODo/IEtx/uAVKM
                zqWS0Zs9NRdU1UnhsETjrhhQhNFwx7MUYlB2s8+mAduLbeqRuVIKsXzg5Rz+m3VL
                Wl4R82A10oZl0UPyIcEHAtHMgMVyGzQcNUsxsp/oN40nnkXiXHaXtSMxjEtOPLno
                t21bJ0ZV8RFmXtJqHXgyTTM5maJM3JwqhMHD2tcHodqCcnxvuuWv31pAB+HKQ8IM
                dORYnhZqqs/Bt80gRLQuJBpNeX2/cKPDDMCRnQIDAQABAoIBAQCil6+N9R5rw9Ys
                iA85GDhpbnoGkd2iGNHeiU3oTHgf1uEN6pO61PR3ahUMpmLIYy3N66q+jxoq3Tm8
                meL6HBxNYd+U/Qh4HS89OV45iV80t97ArJ2A6GL+9ypGyXFhoI7giWwEGqCOHSzH
                iyq25k4cfjspNqOyval7fBEA7Vq8smAMDJQE7WIJWzqrTbVAmVf9ho4r5dYxYBNW
                fXWo84DU8K+p0mE0BTokqqMWhKiA5JJG7OZB/iyeW2BWFOdASXvQmh1hRwMzpU4q
                BcZ7cJHz248SNSGMe5R3w7SmLO7PRr1/QkktJNdFmT7o/RGmQh8+KHql6r/vIzMM
                ci60OAxlAoGBAPYsZJZF3HK70fK3kARSzOD1LEVBDTCLnpVVzMSp6thG8cQqfCI5
                pCfT/NcUsCAP6J+yl6dqdtonXISmGolI1s1KCBihs5D4jEdjbg9KbKh68AsHXaD3
                v5L3POJ9hQnI6zJdvCfxniHdUArfyYhqsp1bnCn+85g4ed7BzDqMX2IDAoGBAPVL
                Y45rALw7lsjxJndyFdffJtyAeuwxgJNwWGuY21xhwqPbuwsgLHsGerHNKB5QAJT8
                JOlrcrfC13s6Tt4wmIy/o2h1p9tMaitmVR6pJzEfHyJhSRTbeFybQ9yqlKHuk2tI
                jcUZV/59cyRrjhPKWoVym3Fh/P7D1t1kfdTvBrvfAoGAUH0rVkb5UTo/5xBFsmQw
                QM1o8CvY2CqOa11mWlcERjrMCcuqUrZuCeeyH9DP1WveL3kBROf2fFWqVmTJAGIk
                eXLfOs6EG75of17vOWioJl4r5i8+WccniDH2YkeQHCbpX8puHtFNVt05spSBHG1m
                gTTW1pRZqUet8TuEPxBuj2kCgYAVjCrRruqgnmdvfWeQpI/wp6SlSBAEQZD24q6R
                vRq/8cKEXGAA6TGfGQGcLtZwWzzB2ahwbMTmCZKeO5AECqbL7mWvXm6BYCQPbeza
                Raews/grL/qYf3MCR41djAqEcw22Jeh2QPSu4VxE/cG8UVFEWb335tCvnIp6ZkJ7
                ewfPZwKBgEnc8HH1aq8IJ6vRBePNu6M9ON6PB9qW+ZHHcy47bcGogvYRQk1Ng77G
                LdZpyjWzzmb0Z4kjEYcrlGdbNQf9iaT0r+SJPzwBDG15+fRqK7EJI00UhjB0T67M
                otrkElxOBGqHSOl0jfUBrpSkSHiy0kDc3/cTAWKn0gowaznSlR9N
                -----END RSA PRIVATE KEY-----
              host-key: "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKwIWcodi3rHl0zS3VaddXnwfgIcpp1ECR9KhaB1cyIspgcHOA98wxY8onw+zHxpMUB4pve+t416FFLSkmlJ2f4="
            source-dataset: data/src/b
            target-dataset: data/dst/b
            recursive: true
            periodic-snapshot-tasks:
              - src-b
            auto: true
            retention-policy: none
            speed-limit: 100000
    """)))

    local_shell = LocalShell()
    zettarepl = Zettarepl(Mock(), local_shell)
    zettarepl._spawn_retention = Mock()
    zettarepl.set_tasks(definition.tasks)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))

    start = time.monotonic()
    wait_replication_tasks_to_complete(zettarepl)
    end = time.monotonic()
    assert 10 <= end - start <= 15

    zettarepl._spawn_retention.assert_called_once()

    assert len(list_snapshots(local_shell, "data/dst/a", False)) == 1
    assert len(list_snapshots(local_shell, "data/dst/b", False)) == 1

    subprocess.call("zfs destroy -r data/dst", shell=True)
    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/a", shell=True)
    subprocess.check_call("zfs create data/dst/b", shell=True)

    zettarepl._replication_tasks_can_run_in_parallel = Mock(return_value=False)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))

    start = time.monotonic()
    wait_replication_tasks_to_complete(zettarepl)
    end = time.monotonic()
    assert 20 <= end - start <= 25

    assert len(list_snapshots(local_shell, "data/dst/a", False)) == 1
    assert len(list_snapshots(local_shell, "data/dst/b", False)) == 1
コード例 #23
0
def test_replication_progress_pre_calculate():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/alice", shell=True)
    subprocess.check_call("zfs create data/src/bob", shell=True)
    subprocess.check_call("zfs create data/src/charlie", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call(
        "zfs send -R data/src@2018-10-01_01-00 | zfs recv -s -F data/dst",
        shell=True)

    subprocess.check_call("zfs create data/src/dave", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
            - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))

    definition = Definition.from_data(definition)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    calls = [
        call for call in zettarepl.observer.call_args_list
        if call[0][0].__class__ != ReplicationTaskDataProgress
    ]

    result = [
        ReplicationTaskStart("src"),
        ReplicationTaskSnapshotStart("src", "data/src", "2018-10-01_02-00", 0,
                                     5),
        ReplicationTaskSnapshotSuccess("src", "data/src", "2018-10-01_02-00",
                                       1, 5),
        ReplicationTaskSnapshotStart("src", "data/src/alice",
                                     "2018-10-01_02-00", 1, 5),
        ReplicationTaskSnapshotSuccess("src", "data/src/alice",
                                       "2018-10-01_02-00", 2, 5),
        ReplicationTaskSnapshotStart("src", "data/src/bob", "2018-10-01_02-00",
                                     2, 5),
        ReplicationTaskSnapshotSuccess("src", "data/src/bob",
                                       "2018-10-01_02-00", 3, 5),
        ReplicationTaskSnapshotStart("src", "data/src/charlie",
                                     "2018-10-01_02-00", 3, 5),
        ReplicationTaskSnapshotSuccess("src", "data/src/charlie",
                                       "2018-10-01_02-00", 4, 5),
        ReplicationTaskSnapshotStart("src", "data/src/dave",
                                     "2018-10-01_02-00", 4, 5),
        ReplicationTaskSnapshotSuccess("src", "data/src/dave",
                                       "2018-10-01_02-00", 5, 5),
        ReplicationTaskSuccess("src"),
    ]

    for i, message in enumerate(result):
        call = calls[i]

        assert call[0][0].__class__ == message.__class__

        d1 = call[0][0].__dict__
        d2 = message.__dict__

        assert d1 == d2
コード例 #24
0
def test_replication_retry(caplog):
    subprocess.call("zfs destroy -r data/src", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            transport:
              type: ssh
              hostname: localhost
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: false
            retention-policy: none
            retries: 2
    """))
    set_localhost_transport_options(
        definition["replication-tasks"]["src"]["transport"])
    definition["replication-tasks"]["src"]["transport"][
        "private-key"] = textwrap.dedent("""\
        -----BEGIN RSA PRIVATE KEY-----
        MIIEowIBAAKCAQEA0/5hQu83T9Jdl1NT9malC0ovHMHLspa4t6dFTSHWRUHsA3+t
        q50bBfrsS+hm4qMndxm9Sqig5/TqlM00W49SkooyU/0j4Q4xjvJ61RXOtHXPOoMH
        opLjRlmbuxkWCb0CmwXvIunaebBFfPx/VuwNJNNv9ZNcgeQJj5ggjI7hnikK4Pn4
        jpqcivqIStNO/6q+9NLsNkMQu8vq/zuxC9ePyeaywbbAIcpKREsWgiNtuhsPxnRS
        +gVQ+XVgE6RFJzMO13MtE+E4Uphseip+fSNVmLeAQyGUrUg12JevJYnMbLOQtacB
        GNDMHSwcwAzqVYPq8oqjQhWvqBntjcd/qK3P+wIDAQABAoIBAHy8tzoNS7x6CXvb
        GhJn/0EPW31OQq9IpFPb5pkmCdAio97DJ8tM2/O+238mtjMw0S3xRUJCyrrxj34S
        6HXfdTSogEiPMKdiFKMJ5mCvPjtM/qxtIPb1+ykP3ORQNHlyb7AL49PlShpEL/8F
        C2B38Jv0lXIoTUxYg4+scaqDABpw9aaYTODcJ9uvFhAcAHALKaN0iiz050dWoH9D
        CkJ1UwoHVUz6XGZ3lOR/qxUDGd72Ara0cizCXQZIkOtu8Kfnfnlx3pqOZJgbkr49
        JY3LQId5bVhNlQLKlTSAameIiAJETeLvxHzJHCvMm0LnKDfLiejq/dEk5CMgjrVz
        ExV+ioECgYEA72zxquQJo051o2mrG0DhVBT0QzXo+8yjNYVha2stBOMGvEnL0n2H
        VFDdWhpZVzRs1uR6sJC14YTGfBNk7NTaQSorgrKvYs1E/krZEMsFquwIcLtbHxYP
        zjBSQwYA7jIEFViIkZwptb+qfA+c1YehZTYzx4R/hlkkLlTObyRFcyECgYEA4qtK
        /7UaBG4kumW+cdRnqJ+KO21PylBnGaCm6yH6DO5SKlqoHvYdyds70Oat9fPX4BRJ
        2aMTivZMkGgu6Dc1AViRgBoTIReMQ9TY3y8d0unMtBddAIx0guiP/rtPrCRTC07m
        s31b6wkLTnPnW3W2N8t4LfdTLpsgmA3t5Q6Iu5sCgYB9Lg+4kpu7Z3U4KDJPAIAP
        Lxl63n/ezuJyRDdoK1QRXwWRgl/vwLP10IW661XUs1NIk5LWKAMAUyRXkOhOrwch
        1QOExRnP5ZTyA340OoHPGLNdBYgh264N1tPbuRLZdwsNggl9YBGqtfhT/vG37r7i
        pREzesIWIxs4ohyAnY02IQKBgQDARd0Qm2a+a0/sbXHmzO5BM1PmpQsR6rIKIyR0
        QBYD8gTwuIXz/YG3QKi0w3i9MWLlSVB7tMFXFyZLOJTRlkL4KVEDARtI7tikkWCF
        sUnzJy/ldAwH8xzCDtRWmD01IHrxFLTNfIEEFl/o5JhUFL3FBmujUjDVT/GOCgLK
        UlHaEQKBgFUGEgI6/GvV/JecnEWLqd+HpRHiBywpfOkAFmJGokdAOvF0QDFHK9/P
        stO7TRqUHufxZQIeTJ7sGdsabEAypiKSFBR8w1qVg+iQZ+M+t0vCgXlnHLaw2SeJ
        1YT8kH1TsdzozkxJ7tFa1A5YI37ZiUiN7ykJ0l4Zal6Nli9z5Oa0
        -----END RSA PRIVATE KEY-----
    """)  # Some random invalid SSH key
    definition = Definition.from_data(definition)

    caplog.set_level(logging.INFO)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert any("non-recoverable replication error" in record.message
               for record in caplog.get_records("call"))