示例#1
0
def get_transport(definition_path, transport):
    definition = load_definition_raw(definition_path)

    if transport:
        try:
            transport = definition.get("transports", {})[transport]
        except KeyError:
            sys.stderr.write(f"Invalid transport {transport!r}\n")
            sys.exit(1)

        return create_transport(transport)
    else:
        return create_transport({"type": "local"})
示例#2
0
 async def _get_zettarepl_shell(self, transport, ssh_credentials):
     transport_definition = await self._define_transport(transport, ssh_credentials)
     transport = create_transport(transport_definition)
     shell = transport.shell(transport)
     try:
         yield shell
     finally:
         await self.middleware.run_in_thread(shell.close)
示例#3
0
    async def _get_zettarepl_shell(self, transport, ssh_credentials):
        if transport != "LOCAL":
            await self.middleware.call("network.general.will_perform_activity", "replication")

        transport_definition = await self._define_transport(transport, ssh_credentials)
        transport = create_transport(transport_definition)
        shell = transport.shell(transport)
        try:
            yield shell
        finally:
            await self.middleware.run_in_thread(shell.close)
示例#4
0
    async def _get_zettarepl_shell(self, transport, ssh_credentials):
        if transport != "LOCAL":
            await self.middleware.call("network.general.will_perform_activity", "replication")

        if transport == "SSH+NETCAT":
            # There is no difference shell-wise, but `_define_transport` for `SSH+NETCAT` will fail if we don't
            # supply `netcat_active_side` and other parameters which are totally unrelated here.
            transport = "SSH"

        transport_definition = await self._define_transport(transport, ssh_credentials)
        transport = create_transport(transport_definition)
        shell = transport.shell(transport)
        try:
            yield shell
        finally:
            await self.middleware.run_in_thread(shell.close)
def test__async_exec_timeout(transport, stdout):
    if transport["type"] == "local":
        expected_timeout = 5
    else:
        expected_timeout = 10

    transport_inst = create_transport(copy.deepcopy(transport))
    shell = transport_inst.shell(transport_inst)

    start = time.monotonic()
    with pytest.raises(TimeoutError):
        shell.exec(["python", "-c", "'ZETTAREPL_TEST_MARKER_1'; import time; time.sleep(15)"], timeout=5)
    end = time.monotonic()
    assert expected_timeout * 0.9 < end - start < expected_timeout * 1.1

    if transport["type"] == "local":
        assert int(subprocess.check_output(
            "ps axw | grep ZETTAREPL_TEST_MARKER_1 | grep -v grep | wc -l", shell=True, encoding="utf-8",
        ).strip()) == 0
示例#6
0
    def from_data(cls, id, data: dict,
                  periodic_snapshot_tasks: [PeriodicSnapshotTask]):
        replication_task_validator.validate(data)

        for k in [
                "source-dataset", "naming-schema", "also-include-naming-schema"
        ]:
            if k in data and isinstance(data[k], str):
                data[k] = [data[k]]

        data.setdefault("exclude", [])
        data.setdefault("properties", True)
        data.setdefault("replicate", False)
        data.setdefault("periodic-snapshot-tasks", [])
        data.setdefault("only-matching-schedule", False)
        data.setdefault("allow-from-scratch", False)
        data.setdefault("hold-pending-snapshots", False)
        data.setdefault("compression", None)
        data.setdefault("speed-limit", None)
        data.setdefault("dedup", False)
        data.setdefault("large-block", False)
        data.setdefault("embed", False)
        data.setdefault("compressed", False)
        data.setdefault("retries", 5)
        data.setdefault("logging-level", "notset")

        resolved_periodic_snapshot_tasks = []
        for periodic_snapshot_task_id in data["periodic-snapshot-tasks"]:
            for periodic_snapshot_task in periodic_snapshot_tasks:
                if periodic_snapshot_task.id == periodic_snapshot_task_id:
                    resolved_periodic_snapshot_tasks.append(
                        periodic_snapshot_task)
                    break
            else:
                raise ValueError(
                    f"Periodic snapshot task {periodic_snapshot_task_id!r} does not exist"
                )

        if data["recursive"]:
            for source_dataset in data["source-dataset"]:
                for periodic_snapshot_task in resolved_periodic_snapshot_tasks:
                    if is_child(source_dataset,
                                periodic_snapshot_task.dataset):
                        for exclude in periodic_snapshot_task.exclude:
                            if exclude not in data["exclude"]:
                                raise ValueError(
                                    "Replication tasks should exclude everything their periodic snapshot tasks exclude "
                                    f"(task does not exclude {exclude!r} from periodic snapshot task "
                                    f"{periodic_snapshot_task.id!r})")

        if data["replicate"]:
            if not data["recursive"]:
                raise ValueError(
                    "Replication tasks that replicate entire filesystem should be recursive"
                )
            if data["exclude"]:
                raise ValueError(
                    "Replication tasks that replicate entire filesystem can't exclude datasets"
                )
            if not data["properties"]:
                raise ValueError(
                    "Replication tasks that replicate entire filesystem can't exclude properties"
                )

        data["direction"] = ReplicationDirection(data["direction"])

        if data["direction"] == ReplicationDirection.PUSH:
            if "naming-schema" in data:
                raise ValueError(
                    "Push replication task can't have naming-schema")

            data.setdefault("also-include-naming-schema", [])

            if not resolved_periodic_snapshot_tasks and not data[
                    "also-include-naming-schema"]:
                raise ValueError(
                    "You must at least provide either periodic-snapshot-tasks or also-include-naming-schema "
                    "for push replication task")

        elif data["direction"] == ReplicationDirection.PULL:
            if "naming-schema" not in data:
                raise ValueError(
                    "You must provide naming-schema for pull replication task")

            if "also-include-naming-schema" in data:
                raise ValueError(
                    "Pull replication task can't have also-include-naming-schema"
                )

            data.setdefault("also-include-naming-schema",
                            data.pop("naming-schema"))

        schedule, restrict_schedule = cls._parse_schedules(data)

        if data["direction"] == ReplicationDirection.PULL:
            if data["hold-pending-snapshots"]:
                raise ValueError(
                    "Pull replication tasks can't hold pending snapshots because they don't do source "
                    "retention")

        retention_policy = TargetSnapshotRetentionPolicy.from_data(data)

        compression = replication_compressions[
            data["compression"]] if data["compression"] else None

        return cls(id, data["direction"], create_transport(data["transport"]),
                   data["source-dataset"], data["target-dataset"],
                   data["recursive"], data["exclude"], data["properties"],
                   data["replicate"], resolved_periodic_snapshot_tasks,
                   data["also-include-naming-schema"], data["auto"], schedule,
                   restrict_schedule, data["only-matching-schedule"],
                   data["allow-from-scratch"], data["hold-pending-snapshots"],
                   retention_policy, compression, data["speed-limit"],
                   data["dedup"], data["large-block"], data["embed"],
                   data["compressed"], data["retries"],
                   logging._nameToLevel[data["logging-level"].upper()])
示例#7
0
    def from_data(cls, id, data: dict,
                  periodic_snapshot_tasks: [PeriodicSnapshotTask]):
        replication_task_validator.validate(data)

        for k in [
                "source-dataset", "naming-schema", "also-include-naming-schema"
        ]:
            if k in data and isinstance(data[k], str):
                data[k] = [data[k]]

        data.setdefault("exclude", [])
        data.setdefault("properties", True)
        data.setdefault("properties-exclude", [])
        data.setdefault("properties-override", {})
        data.setdefault("replicate", False)
        data.setdefault("encryption", None)
        data.setdefault("periodic-snapshot-tasks", [])
        data.setdefault("name-regex", None)
        data.setdefault("only-matching-schedule", False)
        data.setdefault("readonly", "ignore")
        data.setdefault("allow-from-scratch", False)
        data.setdefault("hold-pending-snapshots", False)
        data.setdefault("compression", None)
        data.setdefault("speed-limit", None)
        data.setdefault("dedup", False)
        data.setdefault("large-block", False)
        data.setdefault("embed", False)
        data.setdefault("compressed", False)
        data.setdefault("retries", 5)
        data.setdefault("logging-level", "notset")

        resolved_periodic_snapshot_tasks = []
        for periodic_snapshot_task_id in data["periodic-snapshot-tasks"]:
            for periodic_snapshot_task in periodic_snapshot_tasks:
                if periodic_snapshot_task.id == periodic_snapshot_task_id:
                    resolved_periodic_snapshot_tasks.append(
                        periodic_snapshot_task)
                    break
            else:
                raise ValueError(
                    f"Periodic snapshot task {periodic_snapshot_task_id!r} does not exist"
                )

        if data["recursive"]:
            cls._validate_exclude(data, resolved_periodic_snapshot_tasks)

        if data["replicate"]:
            if not data["recursive"]:
                raise ValueError(
                    "Replication tasks that replicate entire filesystem should be recursive"
                )
            if data["exclude"]:
                raise ValueError(
                    "Replication tasks that replicate entire filesystem can't exclude datasets"
                )
            if not data["properties"]:
                raise ValueError(
                    "Replication tasks that replicate entire filesystem can't exclude properties"
                )

        if data["encryption"]:
            encryption = ReplicationEncryption(
                data["encryption"]["key"],
                KeyFormat(data["encryption"]["key-format"]),
                data["encryption"]["key-location"],
            )
        else:
            encryption = None

        data["direction"] = ReplicationDirection(data["direction"])

        if data["direction"] == ReplicationDirection.PUSH:
            if "naming-schema" in data:
                raise ValueError(
                    "Push replication task can't have naming-schema")

            data.setdefault("also-include-naming-schema", [])

            if not resolved_periodic_snapshot_tasks and not data[
                    "also-include-naming-schema"] and not data["name-regex"]:
                raise ValueError(
                    "You must at least provide either periodic-snapshot-tasks or also-include-naming-schema or "
                    "name-regex for push replication task")

        elif data["direction"] == ReplicationDirection.PULL:
            if "naming-schema" not in data and not data["name-regex"]:
                raise ValueError(
                    "You must provide naming-schema or name-regex for pull replication task"
                )

            if "also-include-naming-schema" in data:
                raise ValueError(
                    "Pull replication task can't have also-include-naming-schema"
                )

            data.setdefault("also-include-naming-schema",
                            data.pop("naming-schema"))

        schedule, restrict_schedule = cls._parse_schedules(data)

        if data["direction"] == ReplicationDirection.PULL:
            if data["hold-pending-snapshots"]:
                raise ValueError(
                    "Pull replication tasks can't hold pending snapshots because they don't do source "
                    "retention")

        if data["name-regex"]:
            try:
                name_pattern = re.compile(f"({data['name-regex']})$")
            except Exception as e:
                raise ValueError(f"Invalid name-regex: {e}")

            if data["also-include-naming-schema"]:
                raise ValueError(
                    "naming-schema/also-include-naming-schema can't be used with name-regex"
                )
        else:
            name_pattern = None

        retention_policy = TargetSnapshotRetentionPolicy.from_data(data)

        compression = replication_compressions[
            data["compression"]] if data["compression"] else None

        return cls(
            id, data["direction"], create_transport(data["transport"]),
            data["source-dataset"], data["target-dataset"], data["recursive"],
            data["exclude"], data["properties"], data["properties-exclude"],
            {k: str(v)
             for k, v in data["properties-override"].items()},
            data["replicate"], encryption, resolved_periodic_snapshot_tasks,
            data["also-include-naming-schema"], name_pattern, data["auto"],
            schedule, restrict_schedule, data["only-matching-schedule"],
            ReadOnlyBehavior(data["readonly"]), data["allow-from-scratch"],
            data["hold-pending-snapshots"], retention_policy, compression,
            data["speed-limit"], data["dedup"], data["large-block"],
            data["embed"], data["compressed"], data["retries"],
            logging._nameToLevel[data["logging-level"].upper()])
示例#8
0
 async def _get_zettarepl_shell(self, transport, ssh_credentials):
     transport_definition = await self._define_transport(
         transport, ssh_credentials)
     transport = create_transport(transport_definition)
     return transport.shell(transport)