示例#1
0
def test_push_replication():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/child", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: true
            retention-policy: none
    """))

    definition = Definition.from_data(definition)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert sum(1 for m in zettarepl.observer.call_args_list
               if isinstance(m[0][0], ReplicationTaskSuccess)) == 1

    subprocess.check_call("zfs destroy -r data/src/child", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)

    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert sum(1 for m in zettarepl.observer.call_args_list
               if isinstance(m[0][0], ReplicationTaskSuccess)) == 2

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst/child", False)) == 1
示例#2
0
def test_replication_data_progress():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)

    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: ssh
              hostname: 127.0.0.1
            source-dataset:
            - data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
            - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    set_localhost_transport_options(
        definition["replication-tasks"]["src"]["transport"])
    definition["replication-tasks"]["src"]["speed-limit"] = 10240 * 9

    with patch("zettarepl.replication.run.DatasetSizeObserver.INTERVAL", 5):
        definition = Definition.from_data(definition)
        zettarepl = create_zettarepl(definition)
        zettarepl._spawn_replication_tasks(
            select_by_class(ReplicationTask, definition.tasks))
        wait_replication_tasks_to_complete(zettarepl)

    calls = [
        call for call in zettarepl.observer.call_args_list
        if call[0][0].__class__ == ReplicationTaskDataProgress
    ]

    assert len(calls) == 2

    assert 1024 * 1024 * 0.8 <= calls[0][0][0].src_size <= 1024 * 1024 * 1.2
    assert 0 <= calls[0][0][0].dst_size <= 10240 * 1.2

    assert 1024 * 1024 * 0.8 <= calls[1][0][0].src_size <= 1024 * 1024 * 1.2
    assert 10240 * 6 * 0.8 <= calls[1][0][0].dst_size <= 10240 * 6 * 1.2
示例#3
0
def test_replication_progress_pre_calculate():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs create data/src/alice", shell=True)
    subprocess.check_call("zfs create data/src/bob", shell=True)
    subprocess.check_call("zfs create data/src/charlie", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_01-00",
                          shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call(
        "zfs send -R data/src@2018-10-01_01-00 | zfs recv -s -F data/dst",
        shell=True)

    subprocess.check_call("zfs create data/src/dave", shell=True)
    subprocess.check_call("zfs snapshot -r data/src@2018-10-01_02-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
            - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))

    definition = Definition.from_data(definition)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    calls = [
        call for call in zettarepl.observer.call_args_list
        if call[0][0].__class__ != ReplicationTaskDataProgress
    ]

    result = [
        ReplicationTaskStart("src"),
        ReplicationTaskSnapshotStart("src", "data/src", "2018-10-01_02-00", 0,
                                     5),
        ReplicationTaskSnapshotSuccess("src", "data/src", "2018-10-01_02-00",
                                       1, 5),
        ReplicationTaskSnapshotStart("src", "data/src/alice",
                                     "2018-10-01_02-00", 1, 5),
        ReplicationTaskSnapshotSuccess("src", "data/src/alice",
                                       "2018-10-01_02-00", 2, 5),
        ReplicationTaskSnapshotStart("src", "data/src/bob", "2018-10-01_02-00",
                                     2, 5),
        ReplicationTaskSnapshotSuccess("src", "data/src/bob",
                                       "2018-10-01_02-00", 3, 5),
        ReplicationTaskSnapshotStart("src", "data/src/charlie",
                                     "2018-10-01_02-00", 3, 5),
        ReplicationTaskSnapshotSuccess("src", "data/src/charlie",
                                       "2018-10-01_02-00", 4, 5),
        ReplicationTaskSnapshotStart("src", "data/src/dave",
                                     "2018-10-01_02-00", 4, 5),
        ReplicationTaskSnapshotSuccess("src", "data/src/dave",
                                       "2018-10-01_02-00", 5, 5),
        ReplicationTaskSuccess("src"),
    ]

    for i, message in enumerate(result):
        call = calls[i]

        assert call[0][0].__class__ == message.__class__

        d1 = call[0][0].__dict__
        d2 = message.__dict__

        assert d1 == d2
示例#4
0
def test_replication_progress(transport):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)

    subprocess.check_call("zfs create data/src/src1", shell=True)
    subprocess.check_call("zfs snapshot data/src/src1@2018-10-01_01-00",
                          shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/src1/blob bs=1M count=1",
        shell=True)
    subprocess.check_call("zfs snapshot data/src/src1@2018-10-01_02-00",
                          shell=True)
    subprocess.check_call("rm /mnt/data/src/src1/blob", shell=True)
    subprocess.check_call("zfs snapshot data/src/src1@2018-10-01_03-00",
                          shell=True)

    subprocess.check_call("zfs create data/src/src2", shell=True)
    subprocess.check_call("zfs snapshot data/src/src2@2018-10-01_01-00",
                          shell=True)
    subprocess.check_call("zfs snapshot data/src/src2@2018-10-01_02-00",
                          shell=True)
    subprocess.check_call("zfs snapshot data/src/src2@2018-10-01_03-00",
                          shell=True)
    subprocess.check_call("zfs snapshot data/src/src2@2018-10-01_04-00",
                          shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            source-dataset:
            - data/src/src1
            - data/src/src2
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
            - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))
    definition["replication-tasks"]["src"]["transport"] = transport
    if transport["type"] == "ssh":
        definition["replication-tasks"]["src"]["speed-limit"] = 10240 * 9

    definition = Definition.from_data(definition)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    calls = [
        call for call in zettarepl.observer.call_args_list
        if call[0][0].__class__ != ReplicationTaskDataProgress
    ]

    result = [
        ReplicationTaskStart("src"),
        ReplicationTaskSnapshotStart("src", "data/src/src1",
                                     "2018-10-01_01-00", 0, 3),
        ReplicationTaskSnapshotSuccess("src", "data/src/src1",
                                       "2018-10-01_01-00", 1, 3),
        ReplicationTaskSnapshotStart("src", "data/src/src1",
                                     "2018-10-01_02-00", 1, 3),
        ReplicationTaskSnapshotSuccess("src", "data/src/src1",
                                       "2018-10-01_02-00", 2, 3),
        ReplicationTaskSnapshotStart("src", "data/src/src1",
                                     "2018-10-01_03-00", 2, 3),
        ReplicationTaskSnapshotSuccess("src", "data/src/src1",
                                       "2018-10-01_03-00", 3, 3),
        ReplicationTaskSnapshotStart("src", "data/src/src2",
                                     "2018-10-01_01-00", 3, 7),
        ReplicationTaskSnapshotSuccess("src", "data/src/src2",
                                       "2018-10-01_01-00", 4, 7),
        ReplicationTaskSnapshotStart("src", "data/src/src2",
                                     "2018-10-01_02-00", 4, 7),
        ReplicationTaskSnapshotSuccess("src", "data/src/src2",
                                       "2018-10-01_02-00", 5, 7),
        ReplicationTaskSnapshotStart("src", "data/src/src2",
                                     "2018-10-01_03-00", 5, 7),
        ReplicationTaskSnapshotSuccess("src", "data/src/src2",
                                       "2018-10-01_03-00", 6, 7),
        ReplicationTaskSnapshotStart("src", "data/src/src2",
                                     "2018-10-01_04-00", 6, 7),
        ReplicationTaskSnapshotSuccess("src", "data/src/src2",
                                       "2018-10-01_04-00", 7, 7),
        ReplicationTaskSuccess("src"),
    ]

    if transport["type"] == "ssh":
        result.insert(
            4,
            ReplicationTaskSnapshotProgress(
                "src",
                "data/src/src1",
                "2018-10-01_02-00",
                1,
                3,
                10240 * 9 * 10,  # We poll for progress every 10 seconds so
                # we would have transferred 10x speed limit
                2162784  # Empirical value
            ))

    for i, message in enumerate(result):
        call = calls[i]

        assert call[0][0].__class__ == message.__class__, calls

        d1 = call[0][0].__dict__
        d2 = message.__dict__

        if isinstance(message, ReplicationTaskSnapshotProgress):
            bytes_sent_1 = d1.pop("bytes_sent")
            bytes_total_1 = d1.pop("bytes_total")
            bytes_sent_2 = d2.pop("bytes_sent")
            bytes_total_2 = d2.pop("bytes_total")

            assert 0.8 <= bytes_sent_1 / bytes_sent_2 <= 1.2
            assert 0.8 <= bytes_total_1 / bytes_total_2 <= 1.2

        assert d1 == d2
示例#5
0
def test_replication_progress_resume():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_02-00", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_03-00", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_04-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call(
        "zfs send data/src@2018-10-01_01-00 | zfs recv -s -F data/dst",
        shell=True)
    subprocess.check_call(
        "(zfs send -i data/src@2018-10-01_01-00 data/src@2018-10-01_02-00 | "
        " throttle -b 102400 | zfs recv -s -F data/dst) & "
        "sleep 1; killall zfs",
        shell=True)

    assert "receive_resume_token\t1-" in subprocess.check_output(
        "zfs get -H receive_resume_token data/dst",
        shell=True,
        encoding="utf-8")

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        replication-tasks:
          src:
            direction: push
            transport:
              type: local
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            also-include-naming-schema:
            - "%Y-%m-%d_%H-%M"
            auto: false
            retention-policy: none
            retries: 1
    """))

    definition = Definition.from_data(definition)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    calls = [
        call for call in zettarepl.observer.call_args_list
        if call[0][0].__class__ != ReplicationTaskDataProgress
    ]

    result = [
        ReplicationTaskStart("src"),
        ReplicationTaskSnapshotStart("src", "data/src", "2018-10-01_02-00", 0,
                                     3),
        ReplicationTaskSnapshotSuccess("src", "data/src", "2018-10-01_02-00",
                                       1, 3),
        ReplicationTaskSnapshotStart("src", "data/src", "2018-10-01_03-00", 1,
                                     3),
        ReplicationTaskSnapshotSuccess("src", "data/src", "2018-10-01_03-00",
                                       2, 3),
        ReplicationTaskSnapshotStart("src", "data/src", "2018-10-01_04-00", 2,
                                     3),
        ReplicationTaskSnapshotSuccess("src", "data/src", "2018-10-01_04-00",
                                       3, 3),
        ReplicationTaskSuccess("src"),
    ]

    for i, message in enumerate(result):
        call = calls[i]

        assert call[0][0].__class__ == message.__class__

        d1 = call[0][0].__dict__
        d2 = message.__dict__

        assert d1 == d2
def test_parallel_replication():
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)

    subprocess.check_call("zfs create data/src/a", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/a/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/a@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/src/b", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/b/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/b@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/a", shell=True)
    subprocess.check_call("zfs create data/dst/b", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src-a:
            dataset: data/src/a
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"
          src-b:
            dataset: data/src/b
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src-a:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/a
            target-dataset: data/dst/a
            recursive: true
            periodic-snapshot-tasks:
              - src-a
            auto: true
            retention-policy: none
            speed-limit: 100000
          src-b:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/b
            target-dataset: data/dst/b
            recursive: true
            periodic-snapshot-tasks:
              - src-b
            auto: true
            retention-policy: none
            speed-limit: 100000
    """))
    set_localhost_transport_options(definition["replication-tasks"]["src-a"]["transport"])
    set_localhost_transport_options(definition["replication-tasks"]["src-b"]["transport"])
    definition = Definition.from_data(definition)

    local_shell = LocalShell()
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))

    start = time.monotonic()
    wait_replication_tasks_to_complete(zettarepl)
    end = time.monotonic()
    assert 10 <= end - start <= 15

    zettarepl._spawn_retention.assert_called_once()

    assert sum(1 for m in zettarepl.observer.call_args_list if isinstance(m[0][0], ReplicationTaskSuccess)) == 2

    assert len(list_snapshots(local_shell, "data/dst/a", False)) == 1
    assert len(list_snapshots(local_shell, "data/dst/b", False)) == 1

    subprocess.call("zfs destroy -r data/dst", shell=True)
    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/a", shell=True)
    subprocess.check_call("zfs create data/dst/b", shell=True)

    zettarepl._replication_tasks_can_run_in_parallel = Mock(return_value=False)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))

    start = time.monotonic()
    wait_replication_tasks_to_complete(zettarepl)
    end = time.monotonic()
    assert 20 <= end - start <= 25

    assert sum(1 for m in zettarepl.observer.call_args_list if isinstance(m[0][0], ReplicationTaskSuccess)) == 4

    assert len(list_snapshots(local_shell, "data/dst/a", False)) == 1
    assert len(list_snapshots(local_shell, "data/dst/b", False)) == 1
def test_replication_retry(caplog, direction):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call(
        "dd if=/dev/urandom of=/mnt/data/src/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            transport:
              type: ssh
              hostname: 127.0.0.1
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            auto: false
            retention-policy: none
            speed-limit: 200000
            retries: 2
    """))
    definition["replication-tasks"]["src"]["direction"] = direction
    if direction == "push":
        definition["replication-tasks"]["src"]["periodic-snapshot-tasks"] = [
            "src"
        ]
    else:
        definition["replication-tasks"]["src"]["naming-schema"] = [
            "%Y-%m-%d_%H-%M"
        ]
    set_localhost_transport_options(
        definition["replication-tasks"]["src"]["transport"])
    definition = Definition.from_data(definition)

    caplog.set_level(logging.INFO)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))

    time.sleep(2)
    if direction == "push":
        subprocess.check_output("kill $(pgrep -f '^zfs recv')", shell=True)
    else:
        subprocess.check_output("kill $(pgrep -f '^(zfs send|zfs: sending)')",
                                shell=True)

    wait_replication_tasks_to_complete(zettarepl)

    assert any(" recoverable replication error" in record.message
               for record in caplog.get_records("call"))
    assert any("Resuming replication for destination dataset" in record.message
               for record in caplog.get_records("call"))

    success = zettarepl.observer.call_args_list[-1][0][0]
    assert isinstance(success, ReplicationTaskSuccess), success

    local_shell = LocalShell()
    assert len(list_snapshots(local_shell, "data/dst", False)) == 1
def test_replication_retry(caplog):
    subprocess.call("zfs destroy -r data/src", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)
    subprocess.check_call("zfs snapshot data/src@2018-10-01_01-00", shell=True)

    definition = yaml.safe_load(
        textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src:
            dataset: data/src
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src:
            transport:
              type: ssh
              hostname: localhost
            direction: push
            source-dataset: data/src
            target-dataset: data/dst
            recursive: true
            periodic-snapshot-tasks:
              - src
            auto: false
            retention-policy: none
            retries: 2
    """))
    set_localhost_transport_options(
        definition["replication-tasks"]["src"]["transport"])
    definition["replication-tasks"]["src"]["transport"][
        "private-key"] = textwrap.dedent("""\
        -----BEGIN RSA PRIVATE KEY-----
        MIIEowIBAAKCAQEA0/5hQu83T9Jdl1NT9malC0ovHMHLspa4t6dFTSHWRUHsA3+t
        q50bBfrsS+hm4qMndxm9Sqig5/TqlM00W49SkooyU/0j4Q4xjvJ61RXOtHXPOoMH
        opLjRlmbuxkWCb0CmwXvIunaebBFfPx/VuwNJNNv9ZNcgeQJj5ggjI7hnikK4Pn4
        jpqcivqIStNO/6q+9NLsNkMQu8vq/zuxC9ePyeaywbbAIcpKREsWgiNtuhsPxnRS
        +gVQ+XVgE6RFJzMO13MtE+E4Uphseip+fSNVmLeAQyGUrUg12JevJYnMbLOQtacB
        GNDMHSwcwAzqVYPq8oqjQhWvqBntjcd/qK3P+wIDAQABAoIBAHy8tzoNS7x6CXvb
        GhJn/0EPW31OQq9IpFPb5pkmCdAio97DJ8tM2/O+238mtjMw0S3xRUJCyrrxj34S
        6HXfdTSogEiPMKdiFKMJ5mCvPjtM/qxtIPb1+ykP3ORQNHlyb7AL49PlShpEL/8F
        C2B38Jv0lXIoTUxYg4+scaqDABpw9aaYTODcJ9uvFhAcAHALKaN0iiz050dWoH9D
        CkJ1UwoHVUz6XGZ3lOR/qxUDGd72Ara0cizCXQZIkOtu8Kfnfnlx3pqOZJgbkr49
        JY3LQId5bVhNlQLKlTSAameIiAJETeLvxHzJHCvMm0LnKDfLiejq/dEk5CMgjrVz
        ExV+ioECgYEA72zxquQJo051o2mrG0DhVBT0QzXo+8yjNYVha2stBOMGvEnL0n2H
        VFDdWhpZVzRs1uR6sJC14YTGfBNk7NTaQSorgrKvYs1E/krZEMsFquwIcLtbHxYP
        zjBSQwYA7jIEFViIkZwptb+qfA+c1YehZTYzx4R/hlkkLlTObyRFcyECgYEA4qtK
        /7UaBG4kumW+cdRnqJ+KO21PylBnGaCm6yH6DO5SKlqoHvYdyds70Oat9fPX4BRJ
        2aMTivZMkGgu6Dc1AViRgBoTIReMQ9TY3y8d0unMtBddAIx0guiP/rtPrCRTC07m
        s31b6wkLTnPnW3W2N8t4LfdTLpsgmA3t5Q6Iu5sCgYB9Lg+4kpu7Z3U4KDJPAIAP
        Lxl63n/ezuJyRDdoK1QRXwWRgl/vwLP10IW661XUs1NIk5LWKAMAUyRXkOhOrwch
        1QOExRnP5ZTyA340OoHPGLNdBYgh264N1tPbuRLZdwsNggl9YBGqtfhT/vG37r7i
        pREzesIWIxs4ohyAnY02IQKBgQDARd0Qm2a+a0/sbXHmzO5BM1PmpQsR6rIKIyR0
        QBYD8gTwuIXz/YG3QKi0w3i9MWLlSVB7tMFXFyZLOJTRlkL4KVEDARtI7tikkWCF
        sUnzJy/ldAwH8xzCDtRWmD01IHrxFLTNfIEEFl/o5JhUFL3FBmujUjDVT/GOCgLK
        UlHaEQKBgFUGEgI6/GvV/JecnEWLqd+HpRHiBywpfOkAFmJGokdAOvF0QDFHK9/P
        stO7TRqUHufxZQIeTJ7sGdsabEAypiKSFBR8w1qVg+iQZ+M+t0vCgXlnHLaw2SeJ
        1YT8kH1TsdzozkxJ7tFa1A5YI37ZiUiN7ykJ0l4Zal6Nli9z5Oa0
        -----END RSA PRIVATE KEY-----
    """)  # Some random invalid SSH key
    definition = Definition.from_data(definition)

    caplog.set_level(logging.INFO)
    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(
        select_by_class(ReplicationTask, definition.tasks))
    wait_replication_tasks_to_complete(zettarepl)

    assert any("non-recoverable replication error" in record.message
               for record in caplog.get_records("call"))
def test_parallel_replication_3(max_parallel_replications):
    subprocess.call("zfs destroy -r data/src", shell=True)
    subprocess.call("zfs receive -A data/dst", shell=True)
    subprocess.call("zfs destroy -r data/dst", shell=True)

    subprocess.check_call("zfs create data/src", shell=True)

    subprocess.check_call("zfs create data/src/a", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/a/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/a@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/src/b", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/b/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/b@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/src/c", shell=True)
    subprocess.check_call("dd if=/dev/urandom of=/mnt/data/src/c/blob bs=1M count=1", shell=True)
    subprocess.check_call("zfs snapshot data/src/c@2018-10-01_01-00", shell=True)

    subprocess.check_call("zfs create data/dst", shell=True)
    subprocess.check_call("zfs create data/dst/a", shell=True)
    subprocess.check_call("zfs create data/dst/b", shell=True)
    subprocess.check_call("zfs create data/dst/c", shell=True)

    definition = yaml.safe_load(textwrap.dedent("""\
        timezone: "UTC"

        periodic-snapshot-tasks:
          src-a:
            dataset: data/src/a
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"
          src-b:
            dataset: data/src/b
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"
          src-c:
            dataset: data/src/c
            recursive: true
            lifetime: PT1H
            naming-schema: "%Y-%m-%d_%H-%M"
            schedule:
              minute: "0"

        replication-tasks:
          src-a:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/a
            target-dataset: data/dst/a
            recursive: true
            periodic-snapshot-tasks:
              - src-a
            auto: true
            retention-policy: none
            speed-limit: 100000
          src-b:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/b
            target-dataset: data/dst/b
            recursive: true
            periodic-snapshot-tasks:
              - src-b
            auto: true
            retention-policy: none
            speed-limit: 100000
          src-c:
            direction: push
            transport:
              type: ssh
              hostname: localhost
            source-dataset: data/src/c
            target-dataset: data/dst/c
            recursive: true
            periodic-snapshot-tasks:
              - src-c
            auto: true
            retention-policy: none
            speed-limit: 100000
    """))
    definition["max-parallel-replication-tasks"] = max_parallel_replications
    set_localhost_transport_options(definition["replication-tasks"]["src-a"]["transport"])
    set_localhost_transport_options(definition["replication-tasks"]["src-b"]["transport"])
    set_localhost_transport_options(definition["replication-tasks"]["src-c"]["transport"])
    definition = Definition.from_data(definition)

    zettarepl = create_zettarepl(definition)
    zettarepl._spawn_replication_tasks(select_by_class(ReplicationTask, definition.tasks))

    start = time.monotonic()
    wait_replication_tasks_to_complete(zettarepl)
    end = time.monotonic()

    if max_parallel_replications == 3:
        assert 10 <= end - start <= 15
    else:
        assert 20 <= end - start <= 25