コード例 #1
0
ファイル: smart.py プロジェクト: razzfazz/freenas
    async def do_create(self, data):
        data['type'] = data.pop('type')[0]
        verrors = await self.validate_data(data, 'smart_test_create')

        if not data.get('disks'):
            verrors.add(
                'smart_test_create.disks',
                'This field is required'
            )

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call(
            'service.restart',
            'smartd',
            {'onetime': False}
        )

        return data
コード例 #2
0
ファイル: cron.py プロジェクト: razzfazz/freenas
    async def do_update(self, id, data):
        task_data = await self.query(filters=[('id', '=', id)], options={'get': True})
        original_data = task_data.copy()
        task_data.update(data)
        verrors, task_data = await self.validate_data(task_data, 'cron_job_update')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(task_data)
        Cron.convert_schedule_to_db_format(original_data)

        if len(set(task_data.items()) ^ set(original_data.items())) > 0:

            await self.middleware.call(
                'datastore.update',
                self._config.datastore,
                id,
                task_data,
                {'prefix': self._config.datastore_prefix}
            )

            await self.middleware.call(
                'service.restart',
                'cron',
                {'onetime': False}
            )

        return await self.query(filters=[('id', '=', id)], options={'get': True})
コード例 #3
0
ファイル: rsync.py プロジェクト: freenas/freenas
    async def do_update(self, id, data):
        """
        Update Rsync Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)], options={'get': True})

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new, 'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return await self.query(filters=[('id', '=', id)], options={'get': True})
コード例 #4
0
ファイル: replication.py プロジェクト: freenas/freenas
    async def compress(self, data):
        if data["direction"] == "PUSH":
            data["naming_schema"] = data["also_include_naming_schema"]
        del data["also_include_naming_schema"]

        Cron.convert_schedule_to_db_format(data, "schedule", key_prefix="schedule_", begin_end=True)
        Cron.convert_schedule_to_db_format(data, "restrict_schedule", key_prefix="restrict_schedule_", begin_end=True)

        del data["periodic_snapshot_tasks"]

        return data
コード例 #5
0
ファイル: smart.py プロジェクト: razzfazz/freenas
 async def smart_test_extend(self, data):
     disks = data.pop('disks')
     data['disks'] = [disk['disk_identifier'] for disk in disks]
     test_type = {
         'L': 'LONG',
         'S': 'SHORT',
         'C': 'CONVEYANCE',
         'O': 'OFFLINE',
     }
     data['type'] = test_type[data.pop('type')]
     Cron.convert_db_format_to_schedule(data)
     return data
コード例 #6
0
ファイル: cloud_sync.py プロジェクト: razzfazz/freenas
    async def _extend(self, cloud_sync):
        cloud_sync["credentials"] = cloud_sync.pop("credential")

        if "encryption_password" in cloud_sync:
            cloud_sync["encryption_password"] = await self.middleware.call(
                "notifier.pwenc_decrypt", cloud_sync["encryption_password"])
        if "encryption_salt" in cloud_sync:
            cloud_sync["encryption_salt"] = await self.middleware.call(
                "notifier.pwenc_decrypt", cloud_sync["encryption_salt"])

        Cron.convert_db_format_to_schedule(cloud_sync)

        return cloud_sync
コード例 #7
0
ファイル: cloud_sync.py プロジェクト: razzfazz/freenas
    async def _compress(self, cloud_sync):
        if "credentials" in cloud_sync:
            cloud_sync["credential"] = cloud_sync.pop("credentials")

        if "encryption_password" in cloud_sync:
            cloud_sync["encryption_password"] = await self.middleware.call(
                "notifier.pwenc_encrypt", cloud_sync["encryption_password"])
        if "encryption_salt" in cloud_sync:
            cloud_sync["encryption_salt"] = await self.middleware.call(
                "notifier.pwenc_encrypt", cloud_sync["encryption_salt"])

        Cron.convert_schedule_to_db_format(cloud_sync)

        return cloud_sync
コード例 #8
0
ファイル: cron.py プロジェクト: freenas/freenas
    async def do_create(self, data):
        """
        Create a new cron job.

        `stderr` and `stdout` are boolean values which if `true`, represent that we would like to suppress
        standard error / standard output respectively.

        .. examples(websocket)::

          Create a cron job which executes `touch /tmp/testfile` after every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "cronjob.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "command": "touch /tmp/testfile",
                    "description": "Test command",
                    "user": "******",
                    "stderr": true,
                    "stdout": true
                }]
            }
        """
        verrors, data = await self.validate_data(data, 'cron_job_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call('service.restart', 'cron')

        return await self._get_instance(data['id'])
コード例 #9
0
ファイル: cloud_sync.py プロジェクト: freenas/freenas
    async def _compress(self, cloud_sync):
        cloud_sync["credential"] = cloud_sync.pop("credentials")

        cloud_sync["encryption_password"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_password"]
        )
        cloud_sync["encryption_salt"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_salt"]
        )

        Cron.convert_schedule_to_db_format(cloud_sync)

        cloud_sync.pop('job', None)

        return cloud_sync
コード例 #10
0
ファイル: rsync.py プロジェクト: razzfazz/freenas
    async def do_create(self, data):
        verrors, data = await self.validate_rsync_task(data, 'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return data
コード例 #11
0
ファイル: snapshot.py プロジェクト: freenas/freenas
    async def extend(self, data, context):
        Cron.convert_db_format_to_schedule(data, begin_end=True)

        data['legacy'] = self._is_legacy(data, context['legacy_replication_tasks'])

        data['vmware_sync'] = any(
            (
                vmware['filesystem'] == data['dataset'] or
                (data['recursive'] and is_child(vmware['filesystem'], data['dataset']))
            )
            for vmware in context['vmware']
        )

        data['state'] = context['state'].get(f'periodic_snapshot_task_{data["id"]}', {
            'state': 'UNKNOWN',
        })

        return data
コード例 #12
0
ファイル: smart.py プロジェクト: razzfazz/freenas
    async def do_update(self, id, data):
        old = await self.query(filters=[('id', '=', id)], options={'get': True})
        new = old.copy()
        new.update(data)

        new['type'] = new.pop('type')[0]
        old['type'] = old.pop('type')[0]
        new_disks = [disk for disk in new['disks'] if disk not in old['disks']]
        deleted_disks = [disk for disk in old['disks'] if disk not in new['disks']]
        if old['type'] == new['type']:
            new['disks'] = new_disks
        verrors = await self.validate_data(new, 'smart_test_update')

        new['disks'] = [disk for disk in chain(new_disks, old['disks']) if disk not in deleted_disks]

        if not new.get('disks'):
            verrors.add(
                'smart_test_update.disks',
                'This field is required'
            )

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call(
            'service.restart',
            'smartd',
            {'onetime': False}
        )

        return await self.query(filters=[('id', '=', id)], options={'get': True})
コード例 #13
0
ファイル: pool.py プロジェクト: razzfazz/freenas
    async def do_create(self, data):
        verrors, data = await self.validate_data(data, 'pool_scrub_create')

        if verrors:
            raise verrors

        data['volume'] = data.pop('pool')
        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call(
            'service.restart',
            'cron',
            {'onetime': False}
        )

        return await self.query(filters=[('id', '=', data['id'])], options={'get': True})
コード例 #14
0
ファイル: replication.py プロジェクト: freenas/freenas
    async def extend(self, data, context):
        data["periodic_snapshot_tasks"] = [
            {k.replace("task_", ""): v for k, v in task.items()}
            for task in data["periodic_snapshot_tasks"]
        ]

        for task in data["periodic_snapshot_tasks"]:
            Cron.convert_db_format_to_schedule(task, begin_end=True)

        if data["direction"] == "PUSH":
            data["also_include_naming_schema"] = data["naming_schema"]
            data["naming_schema"] = []
        if data["direction"] == "PULL":
            data["also_include_naming_schema"] = []

        Cron.convert_db_format_to_schedule(data, "schedule", key_prefix="schedule_", begin_end=True)
        Cron.convert_db_format_to_schedule(data, "restrict_schedule", key_prefix="restrict_schedule_", begin_end=True)

        if data["transport"] == "LEGACY":
            if data["id"] in context["legacy_result"]:
                legacy_result = context["legacy_result"][data["id"]]

                msg = legacy_result.get("msg")
                if msg == "Running":
                    state = "RUNNING"
                elif msg in ["Succeeded", "Up to date"]:
                    state = "FINISHED"
                else:
                    state = "ERROR"

                data["state"] = {
                    "datetime": context["legacy_result_datetime"],
                    "state": state,
                    "last_snapshot": legacy_result.get("last_snapshot"),
                }

                if state == "ERROR":
                    data["state"]["error"] = msg
            else:
                data["state"] = {
                    "state": "PENDING",
                }
        else:
            data["state"] = context["state"].get(f"replication_task_{data['id']}", {
                "state": "PENDING",
            })

        return data
コード例 #15
0
ファイル: rsync.py プロジェクト: freenas/freenas
 async def rsync_task_extend(self, data):
     data['extra'] = list(filter(None, re.split(r"\s+", data["extra"])))
     for field in ('mode', 'direction'):
         data[field] = data[field].upper()
     Cron.convert_db_format_to_schedule(data)
     return data
コード例 #16
0
ファイル: cloud_sync.py プロジェクト: xj361685640/freenas
class CloudSyncService(CRUDService):
    class Config:
        datastore = "tasks.cloudsync"
        datastore_extend = "cloudsync._extend"

    @filterable
    async def query(self, filters=None, options=None):
        tasks_or_task = await super().query(filters, options)

        jobs = {}
        for j in await self.middleware.call(
                "core.get_jobs", [("method", "=", "cloudsync.sync")],
            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        if isinstance(tasks_or_task, list):
            for task in tasks_or_task:
                task["job"] = jobs.get(task["id"])
        else:
            tasks_or_task["job"] = jobs.get(tasks_or_task["id"])

        return tasks_or_task

    @private
    async def _extend(self, cloud_sync):
        cloud_sync["credentials"] = cloud_sync.pop("credential")

        if "encryption_password" in cloud_sync:
            cloud_sync["encryption_password"] = await self.middleware.call(
                "notifier.pwenc_decrypt", cloud_sync["encryption_password"])
        if "encryption_salt" in cloud_sync:
            cloud_sync["encryption_salt"] = await self.middleware.call(
                "notifier.pwenc_decrypt", cloud_sync["encryption_salt"])

        Cron.convert_db_format_to_schedule(cloud_sync)

        return cloud_sync

    @private
    async def _compress(self, cloud_sync):
        if "credentials" in cloud_sync:
            cloud_sync["credential"] = cloud_sync.pop("credentials")

        if "encryption_password" in cloud_sync:
            cloud_sync["encryption_password"] = await self.middleware.call(
                "notifier.pwenc_encrypt", cloud_sync["encryption_password"])
        if "encryption_salt" in cloud_sync:
            cloud_sync["encryption_salt"] = await self.middleware.call(
                "notifier.pwenc_encrypt", cloud_sync["encryption_salt"])

        Cron.convert_schedule_to_db_format(cloud_sync)

        return cloud_sync

    @private
    async def _get_credentials(self, credentials_id):
        try:
            return await self.middleware.call("datastore.query",
                                              "system.cloudcredentials",
                                              [("id", "=", credentials_id)],
                                              {"get": True})
        except IndexError:
            return None

    @private
    async def _validate(self, verrors, name, data):
        if data["encryption"]:
            if not data["encryption_password"]:
                verrors.add(
                    f"{name}.encryption_password",
                    "This field is required when encryption is enabled")

        credentials = await self._get_credentials(data["credentials"])
        if not credentials:
            verrors.add(f"{name}.credentials", "Invalid credentials")

        if verrors:
            raise verrors

        provider = REMOTES[credentials["provider"]]

        schema = []

        if provider.buckets:
            schema.append(Str("bucket", required=True, empty=False))

        schema.append(Str("folder", required=True))

        schema.extend(provider.task_schema)

        attributes_verrors = validate_attributes(schema,
                                                 data,
                                                 additional_attrs=True)

        if not attributes_verrors:
            await provider.pre_save_task(data, credentials, verrors)

        verrors.add_child(f"{name}.attributes", attributes_verrors)

        try:
            shlex.split(data["args"])
        except ValueError as e:
            verrors.add(f"{name}.args", f"Parse error: {e.args[0]}")

    @private
    async def _validate_folder(self, verrors, name, data):
        if data["direction"] == "PULL":
            if data["attributes"]["folder"].strip("/"):
                folder_parent = os.path.normpath(
                    os.path.join(data["attributes"]["folder"].strip("/"),
                                 ".."))
                if folder_parent == ".":
                    folder_parent = ""
                folder_basename = os.path.basename(
                    data["attributes"]["folder"].strip("/"))
                ls = await self.list_directory(
                    dict(
                        credentials=data["credentials"],
                        encryption=data.get("encryption"),
                        filename_encryption=data.get("filename_encryption"),
                        encryption_password=data.get("encryption_password"),
                        encryption_salt=data.get("encryption_salt"),
                        attributes=dict(data["attributes"],
                                        folder=folder_parent),
                        args=data.get("args"),
                    ))
                for item in ls:
                    if item["Name"] == folder_basename:
                        if not item["IsDir"]:
                            verrors.add(f"{name}.attributes.folder",
                                        "This is not a directory")
                        break
                else:
                    verrors.add(f"{name}.attributes.folder",
                                "Directory does not exist")

        if data["direction"] == "PUSH":
            credentials = await self._get_credentials(data["credentials"])

            provider = REMOTES[credentials["provider"]]

            if provider.readonly:
                verrors.add(f"{name}.direction", "This remote is read-only")

    @accepts(
        Dict(
            "cloud_sync",
            Str("description"),
            Str("direction", enum=["PUSH", "PULL"], required=True),
            Str("transfer_mode", enum=["SYNC", "COPY", "MOVE"], required=True),
            Str("path", required=True),
            Int("credentials", required=True),
            Bool("encryption", default=False),
            Bool("filename_encryption", default=False),
            Str("encryption_password"),
            Str("encryption_salt"),
            Cron("schedule"),
            Dict("attributes", additional_attrs=True),
            Str("args", default=""),
            Bool("enabled", default=True),
            register=True,
        ))
    async def do_create(self, cloud_sync):
        """
        Creates a new cloud_sync entry.

        .. examples(websocket)::

          Create a new cloud_sync using amazon s3 attributes, which is supposed to run every hour.

            :::javascript
            {
              "id": "6841f242-840a-11e6-a437-00e04d680384",
              "msg": "method",
              "method": "cloudsync.create",
              "params": [{
                "description": "s3 sync",
                "path": "/mnt/tank",
                "credentials": 1,
                "minute": "00",
                "hour": "*",
                "daymonth": "*",
                "month": "*",
                "attributes": {
                  "bucket": "mybucket",
                  "folder": ""
                },
                "enabled": true
              }]
            }
        """

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        cloud_sync["id"] = await self.middleware.call("datastore.insert",
                                                      "tasks.cloudsync",
                                                      cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"),
             Patch("cloud_sync", "cloud_sync_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Updates the cloud_sync entry `id` with `data`.
        """
        cloud_sync = await self._get_instance(id)

        # credentials is a foreign key for now
        if cloud_sync["credentials"]:
            cloud_sync["credentials"] = cloud_sync["credentials"]["id"]

        cloud_sync.update(data)

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        await self.middleware.call("datastore.update", "tasks.cloudsync", id,
                                   cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Deletes cloud_sync entry `id`.
        """
        await self.middleware.call("datastore.delete", "tasks.cloudsync", id)
        await self.middleware.call("service.restart", "cron")

    @accepts(Int("credentials_id"))
    async def list_buckets(self, credentials_id):
        credentials = await self._get_credentials(credentials_id)
        if not credentials:
            raise CallError("Invalid credentials")

        provider = REMOTES[credentials["provider"]]

        if not provider.buckets:
            raise CallError("This provider does not use buckets")

        return await self.ls({"credentials": credentials}, "")

    @accepts(
        Dict(
            "cloud_sync_ls",
            Int("credentials"),
            Bool("encryption", default=False),
            Bool("filename_encryption", default=False),
            Str("encryption_password"),
            Str("encryption_salt"),
            Dict("attributes", additional_attrs=True),
            Str("args"),
        ))
    async def list_directory(self, cloud_sync):
        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        credentials = await self._get_credentials(cloud_sync["credentials"])

        if REMOTES[credentials["provider"]].buckets:
            path = f"{cloud_sync['attributes']['bucket']}/{cloud_sync['attributes']['folder']}"
        else:
            path = cloud_sync["attributes"]["folder"]

        return await self.ls(dict(cloud_sync, credentials=credentials), path)

    @private
    async def ls(self, config, path):
        with RcloneConfig(config) as config:
            proc = await run([
                "rclone", "--config", config.config_path, "lsjson",
                "remote:" + path
            ],
                             check=False,
                             encoding="utf8")
            if proc.returncode == 0:
                return json.loads(proc.stdout)
            else:
                raise CallError(proc.stderr)

    @item_method
    @accepts(Int("id"))
    @job(lock=lambda args: "cloud_sync:{}".format(args[-1]),
         lock_queue_size=1,
         logs=True)
    async def sync(self, job, id):
        """
        Run the cloud_sync job `id`, syncing the local data to remote.
        """

        cloud_sync = await self._get_instance(id)

        return await rclone(job, cloud_sync)

    @accepts()
    async def providers(self):
        return sorted([{
            "name":
            provider.name,
            "title":
            provider.title,
            "credentials_schema": [{
                "property": field.name,
                "schema": field.to_json_schema()
            } for field in provider.credentials_schema],
            "buckets":
            provider.buckets,
            "task_schema": [{
                "property": field.name,
                "schema": field.to_json_schema()
            } for field in provider.task_schema],
        } for provider in REMOTES.values()],
                      key=lambda provider: provider["title"].lower())
コード例 #17
0
ファイル: rsync.py プロジェクト: freenas/freenas
    async def do_create(self, data):
        """
        Create a Rsync Task.

        `path` represents the path to pool/dataset.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data, 'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return await self._get_instance(data['id'])
コード例 #18
0
ファイル: snapshot.py プロジェクト: PersonalBSD/freenas-core
    async def do_update(self, id, data):
        """
        Update a Periodic Snapshot Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.update",
                "params": [
                    1,
                    {
                        "dataset": "data/work",
                        "recursive": true,
                        "exclude": ["data/work/temp"],
                        "lifetime_value": 2,
                        "lifetime_unit": "WEEK",
                        "naming_schema": "auto_%Y-%m-%d_%H-%M",
                        "schedule": {
                            "minute": "0",
                            "hour": "*",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        }
                    }
                ]
            }
        """

        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_update', await
                          self._validate(new))

        if not new['enabled']:
            for replication_task in await self.middleware.call(
                    'replication.query', [['enabled', '=', True]]):
                if any(periodic_snapshot_task['id'] == id
                       for periodic_snapshot_task in
                       replication_task['periodic_snapshot_tasks']):
                    verrors.add('periodic_snapshot_update.enabled', (
                        f'You can\'t disable this periodic snapshot task because it is bound to enabled replication '
                        f'task {replication_task["id"]!r}'))
                    break

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new, begin_end=True)

        for key in ('vmware_sync', 'state'):
            new.pop(key, None)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.middleware.call('zettarepl.update_tasks')

        return await self._get_instance(id)
コード例 #19
0
ファイル: snapshot.py プロジェクト: PersonalBSD/freenas-core
    async def do_create(self, data):
        """
        Create a Periodic Snapshot Task

        Create a Periodic Snapshot Task that will take snapshots of specified `dataset` at specified `schedule`.
        Recursive snapshots can be created if `recursive` flag is enabled. You can `exclude` specific child datasets
        or zvols from the snapshot.
        Snapshots will be automatically destroyed after a certain amount of time, specified by
        `lifetime_value` and `lifetime_unit`.
        If multiple periodic tasks create snapshots at the same time (for example hourly and daily at 00:00) the snapshot
        will be kept until the last of these tasks reaches its expiry time.
        Snapshots will be named according to `naming_schema` which is a `strftime`-like template for snapshot name
        and must contain `%Y`, `%m`, `%d`, `%H` and `%M`.

        .. examples(websocket)::

          Create a recursive Periodic Snapshot Task for dataset `data/work` excluding `data/work/temp`. Snapshots
          will be created on weekdays every hour from 09:00 to 18:00 and will be stored for two weeks.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.create",
                "params": [{
                    "dataset": "data/work",
                    "recursive": true,
                    "exclude": ["data/work/temp"],
                    "lifetime_value": 2,
                    "lifetime_unit": "WEEK",
                    "naming_schema": "auto_%Y-%m-%d_%H-%M",
                    "schedule": {
                        "minute": "0",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    }
                }]
            }
        """

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_create', await
                          self._validate(data))

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data, begin_end=True)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('zettarepl.update_tasks')

        return await self._get_instance(data['id'])
コード例 #20
0
class PeriodicSnapshotTaskService(CRUDService):
    class Config:
        datastore = 'storage.task'
        datastore_prefix = 'task_'
        datastore_extend = 'pool.snapshottask.extend'
        datastore_extend_context = 'pool.snapshottask.extend_context'
        namespace = 'pool.snapshottask'

    @private
    async def extend_context(self):
        return {
            'legacy_replication_tasks': await self._legacy_replication_tasks(),
            'state': await self.middleware.call('zettarepl.get_state'),
            'vmware': await self.middleware.call('vmware.query'),
        }

    @private
    async def extend(self, data, context):
        Cron.convert_db_format_to_schedule(data, begin_end=True)

        data['legacy'] = self._is_legacy(data,
                                         context['legacy_replication_tasks'])

        data['vmware_sync'] = any((vmware['filesystem'] == data['dataset'] or (
            data['recursive']
            and is_child(vmware['filesystem'], data['dataset'])))
                                  for vmware in context['vmware'])

        data['state'] = context['state'].get(
            f'periodic_snapshot_task_{data["id"]}', {
                'state': 'PENDING',
            })

        return data

    @accepts(
        Dict('periodic_snapshot_create',
             Path('dataset', required=True),
             Bool('recursive', required=True),
             List('exclude', items=[Path('item', empty=False)], default=[]),
             Int('lifetime_value', required=True),
             Str('lifetime_unit',
                 enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR'],
                 required=True),
             Str('naming_schema',
                 required=True,
                 validators=[ReplicationSnapshotNamingSchema()]),
             Cron('schedule',
                  defaults={
                      'minute': '00',
                      'begin': '09:00',
                      'end': '18:00'
                  },
                  required=True,
                  begin_end=True),
             Bool('allow_empty', default=True),
             Bool('enabled', default=True),
             register=True))
    async def do_create(self, data):
        """
        Create a Periodic Snapshot Task

        Create a Periodic Snapshot Task that will take snapshots of specified `dataset` at specified `schedule`.
        Recursive snapshots can be created if `recursive` flag is enabled. You can `exclude` specific child datasets
        from snapshot.
        Snapshots will be automatically destroyed after a certain amount of time, specified by
        `lifetime_value` and `lifetime_unit`.
        Snapshots will be named according to `naming_schema` which is a `strftime`-like template for snapshot name
        and must contain `%Y`, `%m`, `%d`, `%H` and `%M`.

        .. examples(websocket)::

          Create a recursive Periodic Snapshot Task for dataset `data/work` excluding `data/work/temp`. Snapshots
          will be created on weekdays every hour from 09:00 to 18:00 and will be stored for two weeks.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.create",
                "params": [{
                    "dataset": "data/work",
                    "recursive": true,
                    "exclude": ["data/work/temp"],
                    "lifetime_value": 2,
                    "lifetime_unit": "WEEK",
                    "naming_schema": "auto_%Y-%m-%d_%H-%M",
                    "schedule": {
                        "minute": "0",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    }
                }]
            }
        """

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_create', await
                          self._validate(data))

        if verrors:
            raise verrors

        if self._is_legacy(data, await self._legacy_replication_tasks()):
            verrors.add_child('periodic_snapshot_create',
                              self._validate_legacy(data))

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data, begin_end=True)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'cron')
        await self.middleware.call('zettarepl.update_tasks')

        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch('periodic_snapshot_create', 'periodic_snapshot_update',
                   ('attr', {
                       'update': True
                   })))
    async def do_update(self, id, data):
        """
        Update a Periodic Snapshot Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.update",
                "params": [
                    1,
                    {
                        "dataset": "data/work",
                        "recursive": true,
                        "exclude": ["data/work/temp"],
                        "lifetime_value": 2,
                        "lifetime_unit": "WEEK",
                        "naming_schema": "auto_%Y-%m-%d_%H-%M",
                        "schedule": {
                            "minute": "0",
                            "hour": "*",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        }
                    }
                ]
            }
        """

        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_update', await
                          self._validate(new))

        if not new['enabled']:
            for replication_task in await self.middleware.call(
                    'replication.query', [['enabled', '=', True]]):
                if any(periodic_snapshot_task['id'] == id
                       for periodic_snapshot_task in
                       replication_task['periodic_snapshot_tasks']):
                    verrors.add('periodic_snapshot_update.enabled', (
                        f'You can\'t disable this periodic snapshot task because it is bound to enabled replication '
                        f'task {replication_task["id"]!r}'))
                    break

        if verrors:
            raise verrors

        legacy_replication_tasks = await self._legacy_replication_tasks()
        if self._is_legacy(new, legacy_replication_tasks):
            verrors.add_child(f'periodic_snapshot_update',
                              self._validate_legacy(new))
        else:
            if self._is_legacy(old, legacy_replication_tasks):
                verrors.add('periodic_snapshot_update.naming_schema', (
                    'This snapshot task is being used in legacy replication task. You must use naming schema '
                    f'{self._legacy_naming_schema(new)!r}. Please upgrade your replication tasks to edit this field.'
                ))

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new, begin_end=True)

        for key in ('legacy', 'vmware_sync', 'state'):
            new.pop(key, None)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'cron')
        await self.middleware.call('zettarepl.update_tasks')

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete a Periodic Snapshot Task with specific `id`

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.delete",
                "params": [
                    1
                ]
            }
        """

        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self.middleware.call('service.restart', 'cron')
        await self.middleware.call('zettarepl.update_tasks')

        return response

    @item_method
    @accepts(Int("id"))
    async def run(self, id):
        """
        Execute a Periodic Snapshot Task of `id`.
        """
        task = await self._get_instance(id)

        if not task["enabled"]:
            raise CallError("Task is not enabled")

        await self.middleware.call("zettarepl.run_periodic_snapshot_task",
                                   task["id"])

    async def _validate(self, data):
        verrors = ValidationErrors()

        if data['dataset'] not in (
                await self.middleware.call('pool.filesystem_choices')):
            verrors.add('dataset', 'Invalid ZFS dataset')

        if not data['recursive'] and data['exclude']:
            verrors.add(
                'exclude',
                'Excluding datasets has no sense for non-recursive periodic snapshot tasks'
            )

        for i, v in enumerate(data['exclude']):
            if not v.startswith(f'{data["dataset"]}/'):
                verrors.add(
                    f'exclude.{i}',
                    'Excluded dataset should be a child of selected dataset')

        return verrors

    def _validate_legacy(self, data):
        verrors = ValidationErrors()

        if data['exclude']:
            verrors.add(
                'exclude',
                ('Excluding child datasets is not available because this snapshot task is being used in '
                 'legacy replication task. Please upgrade your replication tasks to edit this field.'
                 ),
            )

        if not data['allow_empty']:
            verrors.add(
                'allow_empty',
                ('Disallowing empty snapshots is not available because this snapshot task is being used in '
                 'legacy replication task. Please upgrade your replication tasks to edit this field.'
                 ),
            )

        return verrors

    def _is_legacy(self, data, legacy_replication_tasks):
        if data['naming_schema'] == self._legacy_naming_schema(data):
            for replication_task in legacy_replication_tasks:
                if (data['dataset'] == replication_task['source_datasets'][0]
                        or
                    (data['recursive']
                     and is_child(replication_task['source_datasets'][0],
                                  data['dataset'])) or
                    (replication_task['recursive']
                     and is_child(data['dataset'],
                                  replication_task['source_datasets'][0]))):
                    return True

        return False

    def _legacy_naming_schema(self, data):
        return f'auto-%Y%m%d.%H%M%S-{data["lifetime_value"]}{data["lifetime_unit"].lower()[0]}'

    async def _legacy_replication_tasks(self):
        return await self.middleware.call('replication.query',
                                          [['transport', '=', 'LEGACY']])
コード例 #21
0
ファイル: cron.py プロジェクト: xpcom-bsd/freenas
 def cron_extend(self, data):
     Cron.convert_db_format_to_schedule(data)
     return data
コード例 #22
0
class SMARTTestService(CRUDService):
    class Config:
        datastore = 'tasks.smarttest'
        datastore_extend = 'smart.test.smart_test_extend'
        datastore_prefix = 'smarttest_'
        namespace = 'smart.test'
        cli_namespace = 'task.smart_test'

    @private
    async def smart_test_extend(self, data):
        disks = data.pop('disks')
        data['disks'] = [disk['disk_identifier'] for disk in disks]
        test_type = {
            'L': 'LONG',
            'S': 'SHORT',
            'C': 'CONVEYANCE',
            'O': 'OFFLINE',
        }
        data['type'] = test_type[data.pop('type')]
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        smart_tests = await self.query(filters=[('type', '=', data['type'])])
        configured_disks = [d for test in smart_tests for d in test['disks']]
        disks_dict = await self.disk_choices()

        disks = data.get('disks')
        used_disks = []
        invalid_disks = []
        for disk in disks:
            if disk in configured_disks:
                used_disks.append(disks_dict[disk])
            if disk not in disks_dict.keys():
                invalid_disks.append(disk)

        if used_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks already have tests for this type: {", ".join(used_disks)}'
            )

        if invalid_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks are invalid: {", ".join(invalid_disks)}')

        return verrors

    @accepts(Bool('full_disk', default=False))
    async def disk_choices(self, full_disk):
        """
        Returns disk choices for S.M.A.R.T. test.

        `full_disk` will return full disk objects instead of just names.
        """
        return {
            disk['identifier']: disk if full_disk else disk['name']
            for disk in await self.middleware.call('disk.query',
                                                   [['devname', '!^', 'nv']])
        }

    @accepts(
        Dict('smart_task_create',
             Cron('schedule', exclude=['minute']),
             Str('desc'),
             Bool('all_disks', default=False),
             List('disks', items=[Str('disk')]),
             Str('type',
                 enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'],
                 required=True),
             register=True))
    async def do_create(self, data):
        """
        Create a SMART Test Task.

        `disks` is a list of valid disks which should be monitored in this task.

        `type` is specified to represent the type of SMART test to be executed.

        `all_disks` when enabled sets the task to cover all disks in which case `disks` is not required.

        .. examples(websocket)::

          Create a SMART Test Task which executes after every 30 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.create",
                "params": [{
                    "schedule": {
                        "minute": "30",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "all_disks": true,
                    "type": "OFFLINE",
                    "disks": []
                }]
            }
        """
        data['type'] = data.pop('type')[0]
        verrors = await self.validate_data(data, 'smart_test_create')

        if data['all_disks']:
            if data.get('disks'):
                verrors.add('smart_test_create.disks',
                            'This test is already enabled for all disks')
        else:
            if not data.get('disks'):
                verrors.add('smart_test_create.disks',
                            'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return data

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('smart_task_create', 'smart_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update SMART Test Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        new = old.copy()
        new.update(data)

        new['type'] = new.pop('type')[0]
        old['type'] = old.pop('type')[0]
        new_disks = [disk for disk in new['disks'] if disk not in old['disks']]
        deleted_disks = [
            disk for disk in old['disks'] if disk not in new['disks']
        ]
        if old['type'] == new['type']:
            new['disks'] = new_disks
        verrors = await self.validate_data(new, 'smart_test_update')

        new['disks'] = [
            disk for disk in chain(new_disks, old['disks'])
            if disk not in deleted_disks
        ]

        if new['all_disks']:
            if new.get('disks'):
                verrors.add('smart_test_update.disks',
                            'This test is already enabled for all disks')
        else:
            if not new.get('disks'):
                verrors.add('smart_test_update.disks',
                            'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete SMART Test Task of `id`.
        """
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return response

    @accepts(
        List('disks',
             items=[
                 Dict(
                     'disk_run',
                     Str('identifier', required=True),
                     Str('mode',
                         enum=['FOREGROUND', 'BACKGROUND'],
                         default='BACKGROUND'),
                     Str('type',
                         enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'],
                         required=True),
                 )
             ]))
    async def manual_test(self, disks):
        """
        Run manual SMART tests for `disks`.

        `type` indicates what type of SMART test will be ran and must be specified.
        """
        verrors = ValidationErrors()
        test_disks_list = []
        if not disks:
            verrors.add('disks', 'Please specify at least one disk.')
        else:
            disks_choices = await self.disk_choices(True)
            devices = await self.middleware.call(
                'device.get_storage_devices_topology')

            for index, disk in enumerate(disks):
                if current_disk := disks_choices.get(disk['identifier']):
                    test_disks_list.append({
                        'disk': current_disk['name'],
                        **disk
                    })
                else:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'{disk["identifier"]} is not valid. Please provide a valid disk identifier.'
                    )
                    continue

                if current_disk['name'] is None:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]} disk. Failed to retrieve name.'
                    )

                device = devices.get(current_disk['name'])
                if not device:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]}. Unable to retrieve disk details.'
                    )

        verrors.check()

        return await asyncio_map(self.__manual_test, test_disks_list, 16)
コード例 #23
0
class SMARTTestService(CRUDService):
    class Config:
        datastore = 'tasks.smarttest'
        datastore_extend = 'smart.test.smart_test_extend'
        datastore_prefix = 'smarttest_'
        namespace = 'smart.test'

    @private
    async def smart_test_extend(self, data):
        disks = data.pop('disks')
        data['disks'] = [disk['disk_identifier'] for disk in disks]
        test_type = {
            'L': 'LONG',
            'S': 'SHORT',
            'C': 'CONVEYANCE',
            'O': 'OFFLINE',
        }
        data['type'] = test_type[data.pop('type')]
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        smart_tests = await self.query(filters=[('type', '=', data['type'])])
        configured_disks = [d for test in smart_tests for d in test['disks']]
        disks_dict = {
            disk['identifier']: disk['name']
            for disk in (await self.middleware.call('disk.query'))
        }

        disks = data.get('disks')
        used_disks = []
        invalid_disks = []
        for disk in disks:
            if disk in configured_disks:
                used_disks.append(disks_dict[disk])
            if disk not in disks_dict.keys():
                invalid_disks.append(disk)

        if used_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks already have tests for this type: {", ".join(used_disks)}'
            )

        if invalid_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks are invalid: {", ".join(invalid_disks)}')

        return verrors

    @accepts(
        Dict('smart_task_create',
             Cron('schedule', exclude=['minute']),
             Str('desc'),
             Bool('all_disks', default=False),
             List('disks', items=[Str('disk')], default=[]),
             Str('type',
                 enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'],
                 required=True),
             register=True))
    async def do_create(self, data):
        """
        Create a SMART Test Task.

        `disks` is a list of valid disks which should be monitored in this task.

        `type` is specified to represent the type of SMART test to be executed.

        `all_disks` when enabled sets the task to cover all disks in which case `disks` is not required.

        .. examples(websocket)::

          Create a SMART Test Task which executes after every 30 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.create",
                "params": [{
                    "schedule": {
                        "minute": "30",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "all_disks": true,
                    "type": "OFFLINE",
                    "disks": []
                }]
            }
        """
        data['type'] = data.pop('type')[0]
        verrors = await self.validate_data(data, 'smart_test_create')

        if data['all_disks']:
            if data.get('disks'):
                verrors.add('smart_test_create.disks',
                            'This test is already enabled for all disks')
        else:
            if not data.get('disks'):
                verrors.add('smart_test_create.disks',
                            'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return data

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('smart_task_create', 'smart_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update SMART Test Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        new = old.copy()
        new.update(data)

        new['type'] = new.pop('type')[0]
        old['type'] = old.pop('type')[0]
        new_disks = [disk for disk in new['disks'] if disk not in old['disks']]
        deleted_disks = [
            disk for disk in old['disks'] if disk not in new['disks']
        ]
        if old['type'] == new['type']:
            new['disks'] = new_disks
        verrors = await self.validate_data(new, 'smart_test_update')

        new['disks'] = [
            disk for disk in chain(new_disks, old['disks'])
            if disk not in deleted_disks
        ]

        if new['all_disks']:
            if new.get('disks'):
                verrors.add('smart_test_update.disks',
                            'This test is already enabled for all disks')
        else:
            if not new.get('disks'):
                verrors.add('smart_test_update.disks',
                            'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete SMART Test Task of `id`.
        """
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return response

    @accepts(
        List('disks',
             items=[
                 Dict(
                     'disk_run',
                     Str('identifier', required=True),
                     Str('mode',
                         enum=['FOREGROUND', 'BACKGROUND'],
                         default='BACKGROUND'),
                     Str('type',
                         enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'],
                         required=True),
                 )
             ]))
    async def manual_test(self, disks):
        """
        Run manual SMART tests for `disks`.

        `type` indicates what type of SMART test will be ran and must be specified.
        """
        verrors = ValidationErrors()
        test_disks_list = []
        if not disks:
            verrors.add('disks', 'Please specify at least one disk.')
        else:
            disks_data = await self.middleware.call('disk.query')
            devices = await self.middleware.call(
                'device.get_storage_devices_topology')

            for index, disk in enumerate(disks):
                for d in disks_data:
                    if disk['identifier'] == d['identifier']:
                        current_disk = d
                        test_disks_list.append({
                            'disk': current_disk['name'],
                            **disk
                        })
                        break
                else:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'{disk["identifier"]} is not valid. Please provide a valid disk identifier.'
                    )
                    continue

                if current_disk['name'] is None:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]} disk. Failed to retrieve name.'
                    )

                if current_disk['name'].startswith('nvd'):
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]} disk. NVMe devices cannot be mapped yet.'
                    )

                device = devices.get(current_disk['name'])
                if not device:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]}. Unable to retrieve disk details.'
                    )

        verrors.check()

        return await asyncio_map(self.__manual_test, test_disks_list, 16)

    async def __manual_test(self, disk):
        output = {}

        try:
            new_test_num = max(
                test['num'] for test in (await self.middleware.call(
                    'smart.test.results', [['disk', '=', disk['disk']]],
                    {'get': True}))['tests']) + 1
        except (MatchNotFound, ValueError):
            new_test_num = 1

        args = ['-t', disk['type'].lower()]
        if disk['mode'] == 'FOREGROUND':
            args.extend(['-C'])
        try:
            result = await self.middleware.call('disk.smartctl', disk['disk'],
                                                args)
        except CallError as e:
            output['error'] = e.errmsg
        else:
            expected_result_time = None
            time_details = re.findall(RE_TIME_DETAILS, result)
            if time_details:
                try:
                    expected_result_time = datetime.strptime(
                        time_details[0].strip(), '%a %b %d %H:%M:%S %Y')
                except Exception as e:
                    self.logger.error(
                        'Unable to parse expected_result_time: %r', e)
                else:
                    expected_result_time = expected_result_time.astimezone(
                        timezone.utc).replace(tzinfo=None)

            if expected_result_time:
                output['expected_result_time'] = expected_result_time
                output['job'] = (await
                                 self.middleware.call('smart.test.wait', disk,
                                                      expected_result_time,
                                                      new_test_num)).id
            else:
                output['error'] = result

        return {
            'disk': disk['disk'],
            'identifier': disk['identifier'],
            **output
        }

    @filterable
    async def results(self, filters, options):
        """
        Get disk(s) S.M.A.R.T. test(s) results.

        .. examples(websocket)::

          Get all disks tests results

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.results",
                "params": []
            }

            returns

            :::javascript

            [
              # ATA disk
              {
                "disk": "ada0",
                "tests": [
                  {
                    "num": 1,
                    "description": "Short offline",
                    "status": "SUCCESS",
                    "status_verbose": "Completed without error",
                    "remaining": 0.0,
                    "lifetime": 16590,
                    "lba_of_first_error": None,
                  }
                ]
              },
              # SCSI disk
              {
                "disk": "ada1",
                "tests": [
                  {
                    "num": 1,
                    "description": "Background long",
                    "status": "FAILED",
                    "status_verbose": "Completed, segment failed",
                    "segment_number": None,
                    "lifetime": 3943,
                    "lba_of_first_error": None,
                  }
                ]
              },
            ]

          Get specific disk test results

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.results",
                "params": [
                  [["disk", "=", "ada0"]],
                  {"get": true}
                ]
            }

            returns

            :::javascript

            {
              "disk": "ada0",
              "tests": [
                {
                  "num": 1,
                  "description": "Short offline",
                  "status": "SUCCESS",
                  "status_verbose": "Completed without error",
                  "remaining": 0.0,
                  "lifetime": 16590,
                  "lba_of_first_error": None,
                }
              ]
            }
        """

        get = (options or {}).pop("get", False)

        disks = filter_list(
            [{
                "disk": disk["name"]
            } for disk in await self.middleware.call("disk.query")],
            filters,
            options,
        )

        devices = await self.middleware.call(
            'device.get_storage_devices_topology')
        return filter_list(
            list(
                filter(
                    None, await asyncio_map(
                        functools.partial(annotate_disk_smart_tests,
                                          self.middleware, devices), disks,
                        16))),
            [],
            {"get": get},
        )

    @private
    @job()
    async def wait(self, job, disk, expected_result_time, new_test_num):
        start = datetime.utcnow()
        if expected_result_time < start:
            raise CallError(
                f'Invalid expected_result_time {expected_result_time.isoformat()}'
            )

        start_monotime = time.monotonic()
        end_monotime = start_monotime + (expected_result_time -
                                         start).total_seconds()

        # Check every percent but not more often than every minute
        interval = max((end_monotime - start_monotime) / 100.0, 60)
        while True:
            job.set_progress(
                min((time.monotonic() - start_monotime) /
                    (end_monotime - start_monotime), 0.99) * 100, )

            try:
                tests = (await
                         self.middleware.call('smart.test.results',
                                              [['disk', '=', disk['disk']]],
                                              {'get': True}))['tests']
            except MatchNotFound:
                tests = []

            for test in tests:
                if test['num'] == new_test_num:
                    return test

            await asyncio.sleep(interval)
コード例 #24
0
ファイル: rsync.py プロジェクト: mike0615/freenas
 async def rsync_task_extend(self, data):
     data['extra'] = list(filter(None, re.split(r"\s+", data["extra"])))
     for field in ('mode', 'direction'):
         data[field] = data[field].upper()
     Cron.convert_db_format_to_schedule(data)
     return data
コード例 #25
0
ファイル: pool.py プロジェクト: razzfazz/freenas
 async def pool_scrub_extend(self, data):
     data['pool'] = data.pop('volume')
     data['pool'] = data['pool']['id']
     Cron.convert_db_format_to_schedule(data)
     return data
コード例 #26
0
class RsyncTaskService(CRUDService):
    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'

    @private
    async def rsync_task_extend(self, data):
        data['extra'] = list(filter(None, re.split(r"\s+", data["extra"])))
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = await self.middleware.call('notifier.get_user_object', username)
        if not user:
            verrors.add(f'{schema}.user',
                        f'Provided user "{username}" does not exist')
            raise verrors

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        if data.get('extra'):
            data['extra'] = ' '.join(data['extra'])
        else:
            data['extra'] = ''

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'module' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'ssh':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user.pw_dir, '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user.pw_dir, '.ssh',
                                               'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(
                glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.')
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}')

            if (data.get('validate_rpath') and remote_path and remote_host
                    and remote_port):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.split('@')
                else:
                    remote_username = username

                try:
                    with (await asyncio.wait_for(asyncssh.connect(
                            remote_host,
                            port=remote_port,
                            username=remote_username,
                            client_keys=key_files,
                            known_hosts=None),
                                                 timeout=5)) as conn:

                        await conn.run(f'test -d {remote_path}', check=True)

                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(f'{schema}.remotehost', e.__str__())

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field')
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}')

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__(
                        )
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data.get('validate_rpath'):
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        return verrors, data

    @accepts(
        Dict(
            'rsync_task_create',
            Str('path'),
            Str('user', required=True),
            Str('remotehost'),
            Int('remoteport'),
            Str('mode'),
            Str('remotemodule'),
            Str('remotepath'),
            Bool('validate_rpath'),
            Str('direction'),
            Str('desc'),
            Cron('schedule'),
            Bool('recursive'),
            Bool('times'),
            Bool('compress'),
            Bool('archive'),
            Bool('delete'),
            Bool('quiet'),
            Bool('preserveperm'),
            Bool('preserveattr'),
            Bool('delayupdates'),
            List('extra', items=[Str('extra')]),
            Bool('enabled'),
            register=True,
        ))
    async def do_create(self, data):
        verrors, data = await self.validate_rsync_task(data,
                                                       'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return data

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('rsync_task_create', 'rsync_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new,
                                                       'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        res = await self.middleware.call('datastore.delete',
                                         self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res
コード例 #27
0
class SMARTTestService(CRUDService):

    class Config:
        datastore = 'tasks.smarttest'
        datastore_extend = 'smart.test.smart_test_extend'
        datastore_prefix = 'smarttest_'
        namespace = 'smart.test'

    async def smart_test_extend(self, data):
        disks = data.pop('disks')
        data['disks'] = [disk['disk_identifier'] for disk in disks]
        test_type = {
            'L': 'LONG',
            'S': 'SHORT',
            'C': 'CONVEYANCE',
            'O': 'OFFLINE',
        }
        data['type'] = test_type[data.pop('type')]
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        smart_tests = await self.query(filters=[('type', '=', data['type'])])
        configured_disks = [d for test in smart_tests for d in test['disks']]
        disks_dict = {disk['identifier']: disk['name'] for disk in (await self.middleware.call('disk.query'))}

        disks = data.get('disks')
        used_disks = []
        invalid_disks = []
        for disk in disks:
            if disk in configured_disks:
                used_disks.append(disks_dict[disk])
            if disk not in disks_dict.keys():
                invalid_disks.append(disk)

        if used_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks already have tests for this type: {", ".join(used_disks)}'
            )

        if invalid_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks are invalid: {", ".join(invalid_disks)}'
            )

        return verrors

    @accepts(
        Dict(
            'smart_task_create',
            Cron('schedule'),
            Str('desc'),
            List('disks', items=[Str('disk')], required=True),
            Str('type', enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'], required=True),
            register=True
        )
    )
    async def do_create(self, data):
        data['type'] = data.pop('type')[0]
        verrors = await self.validate_data(data, 'smart_test_create')

        if not data.get('disks'):
            verrors.add(
                'smart_test_create.disks',
                'This field is required'
            )

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call(
            'service.restart',
            'smartd',
            {'onetime': False}
        )

        return data

    @accepts(
        Int('id', validators=[Range(min=1)]),
        Patch('smart_task_create', 'smart_task_update', ('attr', {'update': True}))
    )
    async def do_update(self, id, data):
        old = await self.query(filters=[('id', '=', id)], options={'get': True})
        new = old.copy()
        new.update(data)

        new['type'] = new.pop('type')[0]
        old['type'] = old.pop('type')[0]
        new_disks = [disk for disk in new['disks'] if disk not in old['disks']]
        deleted_disks = [disk for disk in old['disks'] if disk not in new['disks']]
        if old['type'] == new['type']:
            new['disks'] = new_disks
        verrors = await self.validate_data(new, 'smart_test_update')

        new['disks'] = [disk for disk in chain(new_disks, old['disks']) if disk not in deleted_disks]

        if not new.get('disks'):
            verrors.add(
                'smart_test_update.disks',
                'This field is required'
            )

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call(
            'service.restart',
            'smartd',
            {'onetime': False}
        )

        return await self.query(filters=[('id', '=', id)], options={'get': True})

    @accepts(
        Int('id')
    )
    async def do_delete(self, id):
        response = await self.middleware.call(
            'datastore.delete',
            self._config.datastore,
            id
        )

        return response
コード例 #28
0
class PoolScrubService(CRUDService):

    class Config:
        datastore = 'storage.scrub'
        datastore_extend = 'pool.scrub.pool_scrub_extend'
        datastore_prefix = 'scrub_'
        namespace = 'pool.scrub'

    @private
    async def pool_scrub_extend(self, data):
        data['pool'] = data.pop('volume')
        data['pool'] = data['pool']['id']
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        pool_pk = data.get('pool')
        if pool_pk:
            pool_obj = await self.middleware.call(
                'datastore.query',
                'storage.volume',
                [('id', '=', pool_pk)]
            )

            if len(pool_obj) == 0:
                verrors.add(
                    f'{schema}.pool',
                    'The specified volume does not exist'
                )
            elif (
                    'id' not in data.keys() or
                    (
                        'id' in data.keys() and
                        'original_pool_id' in data.keys() and
                        pool_pk != data['original_pool_id']
                    )
            ):
                scrub_obj = await self.query(filters=[('volume_id', '=', pool_pk)])
                if len(scrub_obj) != 0:
                    verrors.add(
                        f'{schema}.pool',
                        'A scrub with this pool already exists'
                    )

        return verrors, data

    @accepts(
        Dict(
            'pool_scrub_create',
            Int('pool', validators=[Range(min=1)], required=True),
            Int('threshold', validators=[Range(min=0)]),
            Str('description'),
            Cron('schedule'),
            Bool('enabled'),
            register=True
        )
    )
    async def do_create(self, data):
        verrors, data = await self.validate_data(data, 'pool_scrub_create')

        if verrors:
            raise verrors

        data['volume'] = data.pop('pool')
        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call(
            'service.restart',
            'cron',
            {'onetime': False}
        )

        return await self.query(filters=[('id', '=', data['id'])], options={'get': True})

    @accepts(
        Int('id', validators=[Range(min=1)]),
        Patch('pool_scrub_create', 'pool_scrub_update', ('attr', {'update': True}))
    )
    async def do_update(self, id, data):
        task_data = await self.query(filters=[('id', '=', id)], options={'get': True})
        original_data = task_data.copy()
        task_data['original_pool_id'] = original_data['pool']
        task_data.update(data)
        verrors, task_data = await self.validate_data(task_data, 'pool_scrub_update')

        if verrors:
            raise verrors

        task_data.pop('original_pool_id')
        Cron.convert_schedule_to_db_format(task_data)
        Cron.convert_schedule_to_db_format(original_data)

        if len(set(task_data.items()) ^ set(original_data.items())) > 0:

            task_data['volume'] = task_data.pop('pool')

            await self.middleware.call(
                'datastore.update',
                self._config.datastore,
                id,
                task_data,
                {'prefix': self._config.datastore_prefix}
            )

            await self.middleware.call(
                'service.restart',
                'cron',
                {'onetime': False}
            )

        return await self.query(filters=[('id', '=', id)], options={'get': True})

    @accepts(
        Int('id')
    )
    async def do_delete(self, id):
        response = await self.middleware.call(
            'datastore.delete',
            self._config.datastore,
            id
        )

        await self.middleware.call(
            'service.restart',
            'cron',
            {'onetime': False}
        )
        return response
コード例 #29
0
ファイル: rsync.py プロジェクト: mike0615/freenas
    async def do_create(self, data):
        """
        Create a Rsync Task.

        `path` represents the path to pool/dataset.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data, 'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return await self._get_instance(data['id'])
コード例 #30
0
ファイル: cron.py プロジェクト: razzfazz/freenas
 def cron_extend(self, data):
     Cron.convert_db_format_to_schedule(data)
     return data
コード例 #31
0
class CronJobService(CRUDService):
    class Config:
        datastore = 'tasks.cronjob'
        datastore_prefix = 'cron_'
        datastore_extend = 'cronjob.cron_extend'
        namespace = 'cronjob'
        cli_namespace = 'task.cron_job'

    @private
    def cron_extend(self, data):
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def construct_cron_command(self,
                                     schedule,
                                     user,
                                     command,
                                     stdout=True,
                                     stderr=True):
        return list(
            filter(bool, (
                schedule['minute'], schedule['hour'], schedule['dom'],
                schedule['month'], schedule['dow'], user,
                'PATH="/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:/root/bin"',
                command.replace('\n', ''), '> /dev/null' if stdout else '',
                '2> /dev/null' if stderr else '')))

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        user = data.get('user')
        if user:
            # Windows users can have spaces in their usernames
            # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808
            if ' ' in user:
                verrors.add(f'{schema}.user', 'Usernames cannot have spaces')

            else:
                user_data = None
                with contextlib.suppress(KeyError):
                    user_data = await self.middleware.call(
                        'dscache.get_uncached_user', user)

                if not user_data:
                    verrors.add(f'{schema}.user',
                                'Specified user does not exist')

        command = data.get('command')
        if not command:
            verrors.add(f'{schema}.command',
                        'Please specify a command for cronjob task.')

        return verrors, data

    @accepts(
        Dict('cron_job_create',
             Bool('enabled'),
             Bool('stderr', default=False),
             Bool('stdout', default=True),
             Cron('schedule', defaults={'minute': '00'}),
             Str('command', required=True),
             Str('description'),
             Str('user', required=True),
             register=True))
    async def do_create(self, data):
        """
        Create a new cron job.

        `stderr` and `stdout` are boolean values which if `true`, represent that we would like to suppress
        standard error / standard output respectively.

        .. examples(websocket)::

          Create a cron job which executes `touch /tmp/testfile` after every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "cronjob.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "command": "touch /tmp/testfile",
                    "description": "Test command",
                    "user": "******",
                    "stderr": true,
                    "stdout": true
                }]
            }
        """
        verrors, data = await self.validate_data(data, 'cron_job_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'cron')

        return await self._get_instance(data['id'])

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('cron_job_create', 'cron_job_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update cronjob of `id`.
        """
        task_data = await self.query(filters=[('id', '=', id)],
                                     options={'get': True})
        original_data = task_data.copy()
        task_data.update(data)
        verrors, task_data = await self.validate_data(task_data,
                                                      'cron_job_update')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(task_data)
        Cron.convert_schedule_to_db_format(original_data)

        if len(set(task_data.items()) ^ set(original_data.items())) > 0:

            await self.middleware.call(
                'datastore.update', self._config.datastore, id, task_data,
                {'prefix': self._config.datastore_prefix})

            await self.middleware.call('service.restart', 'cron')

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete cronjob of `id`.
        """
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self.middleware.call('service.restart', 'cron')

        return response

    @accepts(
        Int('id'),
        Bool('skip_disabled', default=False),
    )
    @job(lock=lambda args: f'cron_job_run_{args[0]}', logs=True)
    def run(self, job, id, skip_disabled):
        """
        Job to run cronjob task of `id`.
        """
        def __cron_log(line):
            job.logs_fd.write(line)
            syslog.syslog(syslog.LOG_INFO, line.decode())

        cron_task = self.middleware.call_sync('cronjob.get_instance', id)
        if skip_disabled and not cron_task['enabled']:
            raise CallError('Cron job is disabled', errno.EINVAL)

        cron_cmd = ' '.join(
            self.middleware.call_sync('cronjob.construct_cron_command',
                                      cron_task['schedule'], cron_task['user'],
                                      cron_task['command'],
                                      cron_task['stdout'],
                                      cron_task['stderr'])[7:])

        job.set_progress(10, 'Executing Cron Task')

        syslog.openlog('cron', facility=syslog.LOG_CRON)

        syslog.syslog(syslog.LOG_INFO,
                      f'({cron_task["user"]}) CMD ({cron_cmd})')

        cp = run_command_with_user_context(cron_cmd, cron_task['user'],
                                           __cron_log)

        syslog.closelog()

        job.set_progress(85, 'Executed Cron Task')

        if cp.stdout:
            email = (self.middleware.call_sync(
                'user.query', [['username', '=', cron_task['user']]],
                {'get': True}))['email']
            stdout = cp.stdout.decode()
            if email:
                mail_job = self.middleware.call_sync('mail.send', {
                    'subject': 'CronTask Run',
                    'text': stdout,
                    'to': [email]
                })

                job.set_progress(95, 'Sending mail for Cron Task output')

                mail_job.wait_sync()
                if mail_job.error:
                    job.logs_fd.write(
                        f'Failed to send email for CronTask run: {mail_job.error}'
                        .encode())
            else:
                job.set_progress(
                    95,
                    'Email for root user not configured. Skipping sending mail.'
                )

            job.logs_fd.write(
                f'Executed CronTask - {cron_cmd}: {stdout}'.encode())

        if cp.returncode:
            raise CallError(
                f'CronTask "{cron_cmd}" exited with {cp.returncode} (non-zero) exit status.'
            )

        job.set_progress(100, 'Execution of Cron Task complete.')
コード例 #32
0
ファイル: test_schema.py プロジェクト: freenas/freenas
def test__cron__convert_db_format_to_schedule(data_dict, begin_end, result):
    Cron.convert_db_format_to_schedule(data_dict, "schedule", "cron_", begin_end)
    assert data_dict == result
コード例 #33
0
    async def extend(self, cloud_sync):
        cloud_sync["credentials"] = cloud_sync.pop("credential")

        Cron.convert_db_format_to_schedule(cloud_sync)

        return cloud_sync
コード例 #34
0
def test__cron__convert_db_format_to_schedule(data_dict, begin_end, result):
    Cron.convert_db_format_to_schedule(data_dict, "schedule", "cron_",
                                       begin_end)
    assert data_dict == result
コード例 #35
0
class CloudSyncService(CRUDService):

    local_fs_lock_manager = FsLockManager()
    remote_fs_lock_manager = FsLockManager()

    class Config:
        datastore = "tasks.cloudsync"
        datastore_extend = "cloudsync._extend"

    @filterable
    async def query(self, filters=None, options=None):
        """
        Query all Cloud Sync Tasks with `query-filters` and `query-options`.
        """
        tasks_or_task = await super().query(filters, options)

        jobs = {}
        for j in await self.middleware.call(
                "core.get_jobs", [("method", "=", "cloudsync.sync")],
            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        if isinstance(tasks_or_task, list):
            for task in tasks_or_task:
                task["job"] = jobs.get(task["id"])
        else:
            tasks_or_task["job"] = jobs.get(tasks_or_task["id"])

        return tasks_or_task

    @private
    async def _extend(self, cloud_sync):
        cloud_sync["credentials"] = cloud_sync.pop("credential")

        cloud_sync["encryption_password"] = await self.middleware.call(
            "pwenc.decrypt", cloud_sync["encryption_password"])
        cloud_sync["encryption_salt"] = await self.middleware.call(
            "pwenc.decrypt", cloud_sync["encryption_salt"])

        Cron.convert_db_format_to_schedule(cloud_sync)

        return cloud_sync

    @private
    async def _compress(self, cloud_sync):
        cloud_sync["credential"] = cloud_sync.pop("credentials")

        cloud_sync["encryption_password"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_password"])
        cloud_sync["encryption_salt"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_salt"])

        Cron.convert_schedule_to_db_format(cloud_sync)

        cloud_sync.pop('job', None)

        return cloud_sync

    @private
    async def _get_credentials(self, credentials_id):
        try:
            return await self.middleware.call("datastore.query",
                                              "system.cloudcredentials",
                                              [("id", "=", credentials_id)],
                                              {"get": True})
        except IndexError:
            return None

    @private
    async def _basic_validate(self, verrors, name, data):
        if data["encryption"]:
            if not data["encryption_password"]:
                verrors.add(
                    f"{name}.encryption_password",
                    "This field is required when encryption is enabled")

        credentials = await self._get_credentials(data["credentials"])
        if not credentials:
            verrors.add(f"{name}.credentials", "Invalid credentials")

        try:
            shlex.split(data["args"])
        except ValueError as e:
            verrors.add(f"{name}.args", f"Parse error: {e.args[0]}")

        if verrors:
            raise verrors

        provider = REMOTES[credentials["provider"]]

        schema = []

        if provider.buckets:
            schema.append(Str("bucket", required=True, empty=False))

        schema.append(Str("folder", required=True))

        schema.extend(provider.task_schema)

        schema.extend(self.common_task_schema(provider))

        attributes_verrors = validate_attributes(schema,
                                                 data,
                                                 additional_attrs=True)

        if not attributes_verrors:
            await provider.pre_save_task(data, credentials, verrors)

        verrors.add_child(f"{name}.attributes", attributes_verrors)

    @private
    async def _validate(self, verrors, name, data):
        await self._basic_validate(verrors, name, data)

        for i, (limit1,
                limit2) in enumerate(zip(data["bwlimit"],
                                         data["bwlimit"][1:])):
            if limit1["time"] >= limit2["time"]:
                verrors.add(
                    f"{name}.bwlimit.{i + 1}.time",
                    f"Invalid time order: {limit1['time']}, {limit2['time']}")

        if data["snapshot"]:
            if data["direction"] != "PUSH":
                verrors.add(f"{name}.snapshot",
                            "This option can only be enabled for PUSH tasks")

    @private
    async def _validate_folder(self, verrors, name, data):
        if data["direction"] == "PULL":
            folder = data["attributes"]["folder"].rstrip("/")
            if folder:
                folder_parent = os.path.normpath(os.path.join(folder, ".."))
                if folder_parent == ".":
                    folder_parent = ""
                folder_basename = os.path.basename(folder)
                ls = await self.list_directory(
                    dict(
                        credentials=data["credentials"],
                        encryption=data["encryption"],
                        filename_encryption=data["filename_encryption"],
                        encryption_password=data["encryption_password"],
                        encryption_salt=data["encryption_salt"],
                        attributes=dict(data["attributes"],
                                        folder=folder_parent),
                        args=data["args"],
                    ))
                for item in ls:
                    if item["Name"] == folder_basename:
                        if not item["IsDir"]:
                            verrors.add(f"{name}.attributes.folder",
                                        "This is not a directory")
                        break
                else:
                    verrors.add(f"{name}.attributes.folder",
                                "Directory does not exist")

        if data["direction"] == "PUSH":
            credentials = await self._get_credentials(data["credentials"])

            provider = REMOTES[credentials["provider"]]

            if provider.readonly:
                verrors.add(f"{name}.direction", "This remote is read-only")

    @accepts(
        Dict(
            "cloud_sync_create",
            Str("description", default=""),
            Str("direction", enum=["PUSH", "PULL"], required=True),
            Str("transfer_mode", enum=["SYNC", "COPY", "MOVE"], required=True),
            Str("path", required=True),
            Int("credentials", required=True),
            Bool("encryption", default=False),
            Bool("filename_encryption", default=False),
            Str("encryption_password", default=""),
            Str("encryption_salt", default=""),
            Cron("schedule", required=True),
            Bool("follow_symlinks", default=False),
            Int("transfers",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            List("bwlimit",
                 default=[],
                 items=[
                     Dict(
                         "cloud_sync_bwlimit", Str("time",
                                                   validators=[Time()]),
                         Int("bandwidth", validators=[Range(min=1)],
                             null=True))
                 ]),
            List("exclude", default=[], items=[Str("path", empty=False)]),
            Dict("attributes", additional_attrs=True, required=True),
            Bool("snapshot", default=False),
            Str("pre_script", default=""),
            Str("post_script", default=""),
            Str("args", default=""),
            Bool("enabled", default=True),
            register=True,
        ))
    async def do_create(self, cloud_sync):
        """
        Creates a new cloud_sync entry.

        .. examples(websocket)::

          Create a new cloud_sync using amazon s3 attributes, which is supposed to run every hour.

            :::javascript
            {
              "id": "6841f242-840a-11e6-a437-00e04d680384",
              "msg": "method",
              "method": "cloudsync.create",
              "params": [{
                "description": "s3 sync",
                "path": "/mnt/tank",
                "credentials": 1,
                "minute": "00",
                "hour": "*",
                "daymonth": "*",
                "month": "*",
                "attributes": {
                  "bucket": "mybucket",
                  "folder": ""
                },
                "enabled": true
              }]
            }
        """

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        cloud_sync["id"] = await self.middleware.call("datastore.insert",
                                                      "tasks.cloudsync",
                                                      cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"),
             Patch("cloud_sync_create", "cloud_sync_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Updates the cloud_sync entry `id` with `data`.
        """
        cloud_sync = await self._get_instance(id)

        # credentials is a foreign key for now
        if cloud_sync["credentials"]:
            cloud_sync["credentials"] = cloud_sync["credentials"]["id"]

        cloud_sync.update(data)

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        await self.middleware.call("datastore.update", "tasks.cloudsync", id,
                                   cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Deletes cloud_sync entry `id`.
        """
        await self.middleware.call("datastore.delete", "tasks.cloudsync", id)
        await self.middleware.call("service.restart", "cron")

    @accepts(Int("credentials_id"))
    async def list_buckets(self, credentials_id):
        credentials = await self._get_credentials(credentials_id)
        if not credentials:
            raise CallError("Invalid credentials")

        provider = REMOTES[credentials["provider"]]

        if not provider.buckets:
            raise CallError("This provider does not use buckets")

        return await self.ls({"credentials": credentials}, "")

    @accepts(
        Dict(
            "cloud_sync_ls",
            Int("credentials", required=True),
            Bool("encryption", default=False),
            Bool("filename_encryption", default=False),
            Str("encryption_password", default=""),
            Str("encryption_salt", default=""),
            Dict("attributes", required=True, additional_attrs=True),
            Str("args", default=""),
        ))
    async def list_directory(self, cloud_sync):
        """
        List contents of a remote bucket / directory.

        If remote supports buckets, path is constructed by two keys "bucket"/"folder" in `attributes`.
        If remote does not support buckets, path is constructed using "folder" key only in `attributes`.
        "folder" is directory name and "bucket" is bucket name for remote.

        Path examples:

        S3 Service
        `bucketname/directory/name`

        Dropbox Service
        `directory/name`


        `credentials` is a valid id of a Cloud Sync Credential which will be used to connect to the provider.
        """
        verrors = ValidationErrors()

        await self._basic_validate(verrors, "cloud_sync", dict(cloud_sync))

        if verrors:
            raise verrors

        credentials = await self._get_credentials(cloud_sync["credentials"])

        path = get_remote_path(REMOTES[credentials["provider"]],
                               cloud_sync["attributes"])

        return await self.ls(dict(cloud_sync, credentials=credentials), path)

    @private
    async def ls(self, config, path):
        with RcloneConfig(config) as config:
            proc = await run([
                "rclone", "--config", config.config_path, "lsjson",
                "remote:" + path
            ],
                             check=False,
                             encoding="utf8")
            if proc.returncode == 0:
                return json.loads(proc.stdout)
            else:
                raise CallError(proc.stderr)

    @item_method
    @accepts(Int("id"))
    @job(lock=lambda args: "cloud_sync:{}".format(args[-1]),
         lock_queue_size=1,
         logs=True)
    async def sync(self, job, id):
        """
        Run the cloud_sync job `id`, syncing the local data to remote.
        """

        cloud_sync = await self._get_instance(id)

        credentials = cloud_sync["credentials"]

        local_path = cloud_sync["path"]
        local_direction = FsLockDirection.READ if cloud_sync[
            "direction"] == "PUSH" else FsLockDirection.WRITE

        remote_path = get_remote_path(REMOTES[credentials["provider"]],
                                      cloud_sync["attributes"])
        remote_direction = FsLockDirection.READ if cloud_sync[
            "direction"] == "PULL" else FsLockDirection.WRITE

        directions = {
            FsLockDirection.READ: "reading",
            FsLockDirection.WRITE: "writing",
        }

        job.set_progress(
            0,
            f"Locking local path {local_path!r} for {directions[local_direction]}"
        )
        async with self.local_fs_lock_manager.lock(local_path,
                                                   local_direction):
            job.set_progress(
                0,
                f"Locking remote path {remote_path!r} for {directions[remote_direction]}"
            )
            async with self.remote_fs_lock_manager.lock(
                    f"{credentials['id']}/{remote_path}", remote_direction):
                job.set_progress(0, "Starting")
                return await rclone(self.middleware, job, cloud_sync)

    @accepts()
    async def providers(self):
        """
        Returns a list of dictionaries of supported providers for Cloud Sync Tasks.

        `credentials_schema` is JSON schema for credentials attributes.

        `task_schema` is JSON schema for task attributes.

        `buckets` is a boolean value which is set to "true" if provider supports buckets.

        Example of a single provider:

        [
            {
                "name": "AMAZON_CLOUD_DRIVE",
                "title": "Amazon Cloud Drive",
                "credentials_schema": [
                    {
                        "property": "client_id",
                        "schema": {
                            "title": "Amazon Application Client ID",
                            "_required_": true,
                            "type": "string"
                        }
                    },
                    {
                        "property": "client_secret",
                        "schema": {
                            "title": "Application Key",
                            "_required_": true,
                            "type": "string"
                        }
                    }
                ],
                "credentials_oauth": null,
                "buckets": false,
                "bucket_title": "Bucket",
                "task_schema": []
            }
        ]
        """
        return sorted([{
            "name":
            provider.name,
            "title":
            provider.title,
            "credentials_schema": [{
                "property": field.name,
                "schema": field.to_json_schema()
            } for field in provider.credentials_schema],
            "credentials_oauth":
            f"{OAUTH_URL}/{provider.name.lower()}"
            if provider.credentials_oauth else None,
            "buckets":
            provider.buckets,
            "bucket_title":
            provider.bucket_title,
            "task_schema": [{
                "property": field.name,
                "schema": field.to_json_schema()
            } for field in provider.task_schema +
                            self.common_task_schema(provider)],
        } for provider in REMOTES.values()],
                      key=lambda provider: provider["title"].lower())

    def common_task_schema(self, provider):
        schema = []

        if provider.fast_list:
            schema.append(
                Bool("fast_list",
                     default=False,
                     title="Use --fast-list",
                     description=textwrap.dedent("""\
                Use fewer transactions in exchange for more RAM. This may also speed up or slow down your
                transfer. See [rclone documentation](https://rclone.org/docs/#fast-list) for more details.
            """).rstrip()))

        return schema
コード例 #36
0
ファイル: rsync.py プロジェクト: razzfazz/freenas
 async def rsync_task_extend(self, data):
     data['extra'] = list(filter(None, re.split(r"\s+", data["extra"])))
     Cron.convert_db_format_to_schedule(data)
     return data
コード例 #37
0
ファイル: cron.py プロジェクト: xpcom-bsd/freenas
class CronJobService(CRUDService):
    class Config:
        datastore = 'tasks.cronjob'
        datastore_prefix = 'cron_'
        datastore_extend = 'cronjob.cron_extend'
        namespace = 'cronjob'

    @private
    def cron_extend(self, data):
        Cron.convert_db_format_to_schedule(data)
        return data

    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        user = data.get('user')
        if user:
            # Windows users can have spaces in their usernames
            # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808
            if ' ' in user:
                verrors.add(f'{schema}.user', 'Usernames cannot have spaces')

            elif not (await self.middleware.call('notifier.get_user_object',
                                                 user)):
                verrors.add(f'{schema}.user', 'Specified user does not exist')

        return verrors, data

    @accepts(
        Dict('cron_job_create',
             Bool('enabled'),
             Bool('stderr'),
             Bool('stdout'),
             Cron('schedule'),
             Str('command', required=True),
             Str('description'),
             Str('user', required=True),
             register=True))
    async def do_create(self, data):
        verrors, data = await self.validate_data(data, 'cron_job_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'cron',
                                   {'onetime': False})

        return data

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('cron_job_create', 'cron_job_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        task_data = await self.query(filters=[('id', '=', id)],
                                     options={'get': True})
        original_data = task_data.copy()
        task_data.update(data)
        verrors, task_data = await self.validate_data(task_data,
                                                      'cron_job_update')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(task_data)
        Cron.convert_schedule_to_db_format(original_data)

        if len(set(task_data.items()) ^ set(original_data.items())) > 0:

            await self.middleware.call(
                'datastore.update', self._config.datastore, id, task_data,
                {'prefix': self._config.datastore_prefix})

            await self.middleware.call('service.restart', 'cron',
                                       {'onetime': False})

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        return response
コード例 #38
0
ファイル: smart.py プロジェクト: linfengzhong/freenas
class SMARTTestService(CRUDService):
    class Config:
        datastore = 'tasks.smarttest'
        datastore_extend = 'smart.test.smart_test_extend'
        datastore_prefix = 'smarttest_'
        namespace = 'smart.test'

    @private
    async def smart_test_extend(self, data):
        disks = data.pop('disks')
        data['disks'] = [disk['disk_identifier'] for disk in disks]
        test_type = {
            'L': 'LONG',
            'S': 'SHORT',
            'C': 'CONVEYANCE',
            'O': 'OFFLINE',
        }
        data['type'] = test_type[data.pop('type')]
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        smart_tests = await self.query(filters=[('type', '=', data['type'])])
        configured_disks = [d for test in smart_tests for d in test['disks']]
        disks_dict = {
            disk['identifier']: disk['name']
            for disk in (await self.middleware.call('disk.query'))
        }

        disks = data.get('disks')
        used_disks = []
        invalid_disks = []
        for disk in disks:
            if disk in configured_disks:
                used_disks.append(disks_dict[disk])
            if disk not in disks_dict.keys():
                invalid_disks.append(disk)

        if used_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks already have tests for this type: {", ".join(used_disks)}'
            )

        if invalid_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks are invalid: {", ".join(invalid_disks)}')

        return verrors

    @accepts(
        Dict('smart_task_create',
             Cron('schedule', exclude=['minute']),
             Str('desc'),
             Bool('all_disks', default=False),
             List('disks', items=[Str('disk')], default=[]),
             Str('type',
                 enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'],
                 required=True),
             register=True))
    async def do_create(self, data):
        """
        Create a SMART Test Task.

        `disks` is a list of valid disks which should be monitored in this task.

        `type` is specified to represent the type of SMART test to be executed.

        `all_disks` when enabled sets the task to cover all disks in which case `disks` is not required.

        .. examples(websocket)::

          Create a SMART Test Task which executes after every 30 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.create",
                "params": [{
                    "schedule": {
                        "minute": "30",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "all_disks": true,
                    "type": "OFFLINE",
                    "disks": []
                }]
            }
        """
        data['type'] = data.pop('type')[0]
        verrors = await self.validate_data(data, 'smart_test_create')

        if data['all_disks']:
            if data.get('disks'):
                verrors.add('smart_test_create.disks',
                            'This test is already enabled for all disks')
        else:
            if not data.get('disks'):
                verrors.add('smart_test_create.disks',
                            'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return data

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('smart_task_create', 'smart_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update SMART Test Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        new = old.copy()
        new.update(data)

        new['type'] = new.pop('type')[0]
        old['type'] = old.pop('type')[0]
        new_disks = [disk for disk in new['disks'] if disk not in old['disks']]
        deleted_disks = [
            disk for disk in old['disks'] if disk not in new['disks']
        ]
        if old['type'] == new['type']:
            new['disks'] = new_disks
        verrors = await self.validate_data(new, 'smart_test_update')

        new['disks'] = [
            disk for disk in chain(new_disks, old['disks'])
            if disk not in deleted_disks
        ]

        if new['all_disks']:
            if new.get('disks'):
                verrors.add('smart_test_update.disks',
                            'This test is already enabled for all disks')
        else:
            if not new.get('disks'):
                verrors.add('smart_test_update.disks',
                            'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete SMART Test Task of `id`.
        """
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return response

    @filterable
    async def results(self, filters, options):
        """
        Get disk(s) S.M.A.R.T. test(s) results.

        .. examples(websocket)::

          Get all disks tests results

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.results",
                "params": []
            }

            returns

            :::javascript

            [
              # ATA disk
              {
                "disk": "ada0",
                "tests": [
                  {
                    "num": 1,
                    "description": "Short offline",
                    "status": "SUCCESS",
                    "status_verbose": "Completed without error",
                    "remaining": 0.0,
                    "lifetime": 16590,
                    "lba_of_first_error": None,
                  }
                ]
              },
              # SCSI disk
              {
                "disk": "ada1",
                "tests": [
                  {
                    "num": 1,
                    "description": "Background long",
                    "status": "FAILED",
                    "status_verbose": "Completed, segment failed",
                    "segment_number": None,
                    "lifetime": 3943,
                    "lba_of_first_error": None,
                  }
                ]
              },
            ]

          Get specific disk test results

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.results",
                "params": [
                  [["disk", "=", "ada0"]],
                  {"get": true}
                ]
            }

            returns

            :::javascript

            {
              "disk": "ada0",
              "tests": [
                {
                  "num": 1,
                  "description": "Short offline",
                  "status": "SUCCESS",
                  "status_verbose": "Completed without error",
                  "remaining": 0.0,
                  "lifetime": 16590,
                  "lba_of_first_error": None,
                }
              ]
            }
        """

        get = (options or {}).pop("get", False)

        disks = filter_list(
            [{
                "disk": disk["name"]
            } for disk in await self.middleware.call("disk.query")],
            filters,
            options,
        )

        devices = await camcontrol_list()
        return filter_list(
            list(
                filter(
                    None, await asyncio_map(
                        functools.partial(annotate_disk_smart_tests,
                                          self.middleware, devices), disks,
                        16))),
            [],
            {"get": get},
        )
コード例 #39
0
ファイル: snapshot.py プロジェクト: PersonalBSD/freenas-core
class PeriodicSnapshotTaskService(CRUDService):
    class Config:
        datastore = 'storage.task'
        datastore_prefix = 'task_'
        datastore_extend = 'pool.snapshottask.extend'
        datastore_extend_context = 'pool.snapshottask.extend_context'
        namespace = 'pool.snapshottask'

    @private
    async def extend_context(self, extra):
        return {
            'state': await self.middleware.call('zettarepl.get_state'),
            'vmware': await self.middleware.call('vmware.query'),
        }

    @private
    async def extend(self, data, context):
        Cron.convert_db_format_to_schedule(data, begin_end=True)

        data['vmware_sync'] = any((vmware['filesystem'] == data['dataset'] or (
            data['recursive']
            and is_child(vmware['filesystem'], data['dataset'])))
                                  for vmware in context['vmware'])

        if 'error' in context['state']:
            data['state'] = context['state']['error']
        else:
            data['state'] = context['state']['tasks'].get(
                f'periodic_snapshot_task_{data["id"]}', {
                    'state': 'PENDING',
                })

        return data

    @accepts(
        Dict('periodic_snapshot_create',
             Path('dataset', required=True),
             Bool('recursive', required=True),
             List('exclude', items=[Path('item', empty=False)], default=[]),
             Int('lifetime_value', required=True),
             Str('lifetime_unit',
                 enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR'],
                 required=True),
             Str('naming_schema',
                 required=True,
                 validators=[ReplicationSnapshotNamingSchema()]),
             Cron('schedule',
                  defaults={
                      'minute': '00',
                      'begin': '00:00',
                      'end': '23:59',
                  },
                  required=True,
                  begin_end=True),
             Bool('allow_empty', default=True),
             Bool('enabled', default=True),
             register=True))
    async def do_create(self, data):
        """
        Create a Periodic Snapshot Task

        Create a Periodic Snapshot Task that will take snapshots of specified `dataset` at specified `schedule`.
        Recursive snapshots can be created if `recursive` flag is enabled. You can `exclude` specific child datasets
        or zvols from the snapshot.
        Snapshots will be automatically destroyed after a certain amount of time, specified by
        `lifetime_value` and `lifetime_unit`.
        If multiple periodic tasks create snapshots at the same time (for example hourly and daily at 00:00) the snapshot
        will be kept until the last of these tasks reaches its expiry time.
        Snapshots will be named according to `naming_schema` which is a `strftime`-like template for snapshot name
        and must contain `%Y`, `%m`, `%d`, `%H` and `%M`.

        .. examples(websocket)::

          Create a recursive Periodic Snapshot Task for dataset `data/work` excluding `data/work/temp`. Snapshots
          will be created on weekdays every hour from 09:00 to 18:00 and will be stored for two weeks.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.create",
                "params": [{
                    "dataset": "data/work",
                    "recursive": true,
                    "exclude": ["data/work/temp"],
                    "lifetime_value": 2,
                    "lifetime_unit": "WEEK",
                    "naming_schema": "auto_%Y-%m-%d_%H-%M",
                    "schedule": {
                        "minute": "0",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    }
                }]
            }
        """

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_create', await
                          self._validate(data))

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data, begin_end=True)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('zettarepl.update_tasks')

        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch('periodic_snapshot_create', 'periodic_snapshot_update',
                   ('attr', {
                       'update': True
                   })))
    async def do_update(self, id, data):
        """
        Update a Periodic Snapshot Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.update",
                "params": [
                    1,
                    {
                        "dataset": "data/work",
                        "recursive": true,
                        "exclude": ["data/work/temp"],
                        "lifetime_value": 2,
                        "lifetime_unit": "WEEK",
                        "naming_schema": "auto_%Y-%m-%d_%H-%M",
                        "schedule": {
                            "minute": "0",
                            "hour": "*",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        }
                    }
                ]
            }
        """

        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_update', await
                          self._validate(new))

        if not new['enabled']:
            for replication_task in await self.middleware.call(
                    'replication.query', [['enabled', '=', True]]):
                if any(periodic_snapshot_task['id'] == id
                       for periodic_snapshot_task in
                       replication_task['periodic_snapshot_tasks']):
                    verrors.add('periodic_snapshot_update.enabled', (
                        f'You can\'t disable this periodic snapshot task because it is bound to enabled replication '
                        f'task {replication_task["id"]!r}'))
                    break

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new, begin_end=True)

        for key in ('vmware_sync', 'state'):
            new.pop(key, None)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.middleware.call('zettarepl.update_tasks')

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete a Periodic Snapshot Task with specific `id`

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.delete",
                "params": [
                    1
                ]
            }
        """

        for replication_task in await self.middleware.call(
                'replication.query', [
                    ['direction', '=', 'PUSH'],
                    ['also_include_naming_schema', '=', []],
                    ['enabled', '=', True],
                ]):
            if len(replication_task['periodic_snapshot_tasks']) == 1:
                if replication_task['periodic_snapshot_tasks'][0]['id'] == id:
                    raise CallError(
                        f'You are deleting the last periodic snapshot task bound to enabled replication task '
                        f'{replication_task["name"]!r} which will break it. Please, disable that replication task '
                        f'first.', )

        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self.middleware.call('zettarepl.update_tasks')

        return response

    @item_method
    @accepts(Int("id"))
    async def run(self, id):
        """
        Execute a Periodic Snapshot Task of `id`.
        """
        task = await self._get_instance(id)

        if not task["enabled"]:
            raise CallError("Task is not enabled")

        await self.middleware.call("zettarepl.run_periodic_snapshot_task",
                                   task["id"])

    async def _validate(self, data):
        verrors = ValidationErrors()

        if data['dataset'] not in (
                await self.middleware.call('pool.filesystem_choices')):
            verrors.add('dataset', 'ZFS dataset or zvol not found')

        if not data['recursive'] and data['exclude']:
            verrors.add(
                'exclude',
                'Excluding datasets or zvols is not necessary for non-recursive periodic snapshot tasks'
            )

        for i, v in enumerate(data['exclude']):
            if not v.startswith(f'{data["dataset"]}/'):
                verrors.add(
                    f'exclude.{i}',
                    'Excluded dataset or zvol should be a child or other descendant of selected dataset'
                )

        return verrors
コード例 #40
0
class RsyncTaskService(TaskPathService):

    share_task_type = 'Rsync'

    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'
        datastore_extend_context = 'rsynctask.rsync_task_extend_context'
        cli_namespace = 'task.rsync'

    ENTRY = Patch(
        'rsync_task_create',
        'rsync_task_entry',
        ('rm', {
            'name': 'validate_rpath'
        }),
        ('add', Int('id')),
        ('add', Bool('locked')),
        ('add', Dict('job', null=True, additional_attrs=True)),
    )

    @private
    async def rsync_task_extend(self, data, context):
        try:
            data['extra'] = shlex.split(data['extra'].replace('"',
                                                              r'"\"').replace(
                                                                  "'", r'"\"'))
        except ValueError:
            # This is to handle the case where the extra value is misconfigured for old cases
            # Moving on, we are going to verify that it can be split successfully using shlex
            data['extra'] = data['extra'].split()

        for field in ('mode', 'direction'):
            data[field] = data[field].upper()
        Cron.convert_db_format_to_schedule(data)
        data['job'] = context['jobs'].get(data['id'])
        return data

    @private
    async def rsync_task_extend_context(self, rows, extra):
        jobs = {}
        for j in await self.middleware.call("core.get_jobs",
                                            [("method", "=", "rsynctask.run")],
                                            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        return {
            "jobs": jobs,
        }

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = None
        with contextlib.suppress(KeyError):
            user = await self.middleware.call('dscache.get_uncached_user',
                                              username)

        if not user:
            verrors.add(f'{schema}.user',
                        f'Provided user "{username}" does not exist')
            raise verrors

        await self.validate_path_field(data, schema, verrors)

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        data['extra'] = ' '.join(data['extra'])
        try:
            shlex.split(data['extra'].replace('"',
                                              r'"\"').replace("'", r'"\"'))
        except ValueError as e:
            verrors.add(f'{schema}.extra', f'Please specify valid value: {e}')

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'MODULE' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'SSH':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user['pw_dir'], '.ssh',
                                               'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(
                glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.')
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}')

            if (data['enabled'] and data['validate_rpath'] and remote_path
                    and remote_host and remote_port):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.rsplit('@', 1)
                else:
                    remote_username = username

                try:
                    async with await asyncio.wait_for(
                            asyncssh.connect(remote_host,
                                             port=remote_port,
                                             username=remote_username,
                                             client_keys=key_files,
                                             known_hosts=None),
                            timeout=5,
                    ) as conn:
                        await conn.run(f'test -d {shlex.quote(remote_path)}',
                                       check=True)
                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(f'{schema}.remotehost', e.__str__())

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field')
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}')

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__(
                        )
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data['enabled'] and data['validate_rpath']:
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        # Keeping compatibility with legacy UI
        for field in ('mode', 'direction'):
            data[field] = data[field].lower()

        return verrors, data

    @accepts(
        Dict(
            'rsync_task_create',
            Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
            Str('user', required=True),
            Str('remotehost'),
            Int('remoteport'),
            Str('mode', enum=['MODULE', 'SSH'], default='MODULE'),
            Str('remotemodule'),
            Str('remotepath'),
            Bool('validate_rpath', default=True),
            Str('direction', enum=['PULL', 'PUSH'], default='PUSH'),
            Str('desc'),
            Cron(
                'schedule',
                defaults={'minute': '00'},
            ),
            Bool('recursive'),
            Bool('times'),
            Bool('compress'),
            Bool('archive'),
            Bool('delete'),
            Bool('quiet'),
            Bool('preserveperm'),
            Bool('preserveattr'),
            Bool('delayupdates'),
            List('extra', items=[Str('extra')]),
            Bool('enabled', default=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsync Task.

        See the comment in Rsyncmod about `path` length limits.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data,
                                                       'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(data['id'])

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('rsync_task_create', 'rsync_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsync Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        old.pop('job')

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new,
                                                       'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)
        new.pop(self.locked_field)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete Rsync Task of `id`.
        """
        res = await self.middleware.call('datastore.delete',
                                         self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res

    @private
    async def commandline(self, id):
        """
        Helper method to generate the rsync command avoiding code duplication.
        """
        rsync = await self.get_instance(id)
        path = shlex.quote(rsync['path'])

        line = ['rsync']
        for name, flag in (
            ('archive', '-a'),
            ('compress', '-zz'),
            ('delayupdates', '--delay-updates'),
            ('delete', '--delete-delay'),
            ('preserveattr', '-X'),
            ('preserveperm', '-p'),
            ('recursive', '-r'),
            ('times', '-t'),
        ):
            if rsync[name]:
                line.append(flag)
        if rsync['extra']:
            line.append(' '.join(rsync['extra']))

        # Do not use username if one is specified in host field
        # See #5096 for more details
        if '@' in rsync['remotehost']:
            remote = rsync['remotehost']
        else:
            remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'

        if rsync['mode'] == 'MODULE':
            module_args = [path, f'{remote}::"{rsync["remotemodule"]}"']
            if rsync['direction'] != 'PUSH':
                module_args.reverse()
            line += module_args
        else:
            line += [
                '-e',
                f'"ssh -p {rsync["remoteport"]} -o BatchMode=yes -o StrictHostKeyChecking=yes"'
            ]
            path_args = [
                path, f'{remote}:"{shlex.quote(rsync["remotepath"])}"'
            ]
            if rsync['direction'] != 'PUSH':
                path_args.reverse()
            line += path_args

        if rsync['quiet']:
            line += ['>', '/dev/null', '2>&1']

        return ' '.join(line)

    @item_method
    @accepts(Int('id'))
    @returns()
    @job(lock=lambda args: args[-1], lock_queue_size=1, logs=True)
    def run(self, job, id):
        """
        Job to run rsync task of `id`.

        Output is saved to job log excerpt (not syslog).
        """
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'rsync')

        rsync = self.middleware.call_sync('rsynctask.get_instance', id)
        if rsync['locked']:
            self.middleware.call_sync('rsynctask.generate_locked_alert', id)
            return

        commandline = self.middleware.call_sync('rsynctask.commandline', id)

        cp = run_command_with_user_context(commandline, rsync['user'],
                                           lambda v: job.logs_fd.write(v))

        for klass in ('RsyncSuccess',
                      'RsyncFailed') if not rsync['quiet'] else ():
            self.middleware.call_sync('alert.oneshot_delete', klass,
                                      rsync['id'])

        if cp.returncode not in RsyncReturnCode.nonfatals():
            err = None
            if cp.returncode == RsyncReturnCode.STREAMIO and rsync['compress']:
                err = (
                    "rsync command with compression enabled failed with STREAMIO error. "
                    "This may indicate that remote server lacks support for the new-style "
                    "compression used by TrueNAS.")

            if not rsync['quiet']:
                self.middleware.call_sync(
                    'alert.oneshot_create', 'RsyncFailed', {
                        'id': rsync['id'],
                        'direction': rsync['direction'],
                        'path': rsync['path'],
                    })

            if err:
                msg = f'{err} Check logs for further information'
            else:
                try:
                    rc_name = RsyncReturnCode(cp.returncode).name
                except ValueError:
                    rc_name = 'UNKNOWN'

                msg = (f'rsync command returned {cp.returncode} - {rc_name}. '
                       'Check logs for further information.')
            raise CallError(msg)

        elif not rsync['quiet']:
            self.middleware.call_sync(
                'alert.oneshot_create', 'RsyncSuccess', {
                    'id': rsync['id'],
                    'direction': rsync['direction'],
                    'path': rsync['path'],
                })
コード例 #41
0
ファイル: replication.py プロジェクト: AlexEvy/freenas
class ReplicationService(CRUDService):
    class Config:
        datastore = "storage.replication"
        datastore_prefix = "repl_"
        datastore_extend = "replication.extend"
        datastore_extend_context = "replication.extend_context"

    @private
    async def extend_context(self):
        legacy_result, legacy_result_datetime = await self.middleware.run_in_thread(
            self._legacy_extend_context)

        return {
            "state": await self.middleware.call("zettarepl.get_state"),
            "legacy_result": legacy_result,
            "legacy_result_datetime": legacy_result_datetime,
        }

    def _legacy_extend_context(self):
        try:
            with open("/tmp/.repl-result", "rb") as f:
                data = f.read()
                legacy_result = pickle.loads(data)
                legacy_result_datetime = datetime.fromtimestamp(
                    os.stat("/tmp/.repl-result").st_mtime)
        except Exception:
            legacy_result = defaultdict(dict)
            legacy_result_datetime = None

        return legacy_result, legacy_result_datetime

    @private
    async def extend(self, data, context):
        data["periodic_snapshot_tasks"] = [{
            k.replace("task_", ""): v
            for k, v in task.items()
        } for task in data["periodic_snapshot_tasks"]]

        for task in data["periodic_snapshot_tasks"]:
            Cron.convert_db_format_to_schedule(task, begin_end=True)

        if data["direction"] == "PUSH":
            data["also_include_naming_schema"] = data["naming_schema"]
            data["naming_schema"] = []
        if data["direction"] == "PULL":
            data["also_include_naming_schema"] = []

        Cron.convert_db_format_to_schedule(data,
                                           "schedule",
                                           key_prefix="schedule_",
                                           begin_end=True)
        Cron.convert_db_format_to_schedule(data,
                                           "restrict_schedule",
                                           key_prefix="restrict_schedule_",
                                           begin_end=True)

        if data["transport"] == "LEGACY":
            if data["id"] in context["legacy_result"]:
                legacy_result = context["legacy_result"][data["id"]]

                msg = legacy_result.get("msg")
                if msg == "Running":
                    state = "RUNNING"
                elif msg in ["Succeeded", "Up to date"]:
                    state = "FINISHED"
                else:
                    state = "ERROR"

                data["state"] = {
                    "datetime": context["legacy_result_datetime"],
                    "state": state,
                    "last_snapshot": legacy_result.get("last_snapshot"),
                }

                if state == "ERROR":
                    data["state"]["error"] = msg
            else:
                data["state"] = {
                    "state": "PENDING",
                }
        else:
            data["state"] = context["state"].get(
                f"replication_task_{data['id']}", {
                    "state": "PENDING",
                })

        data["job"] = data["state"].pop("job", None)

        return data

    @private
    async def compress(self, data):
        if data["direction"] == "PUSH":
            data["naming_schema"] = data["also_include_naming_schema"]
        del data["also_include_naming_schema"]

        Cron.convert_schedule_to_db_format(data,
                                           "schedule",
                                           key_prefix="schedule_",
                                           begin_end=True)
        Cron.convert_schedule_to_db_format(data,
                                           "restrict_schedule",
                                           key_prefix="restrict_schedule_",
                                           begin_end=True)

        del data["periodic_snapshot_tasks"]

        return data

    @accepts(
        Dict(
            "replication_create",
            Str("name", required=True),
            Str("direction", enum=["PUSH", "PULL"], required=True),
            Str("transport",
                enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"],
                required=True),
            Int("ssh_credentials", null=True, default=None),
            Str("netcat_active_side",
                enum=["LOCAL", "REMOTE"],
                null=True,
                default=None),
            Str("netcat_active_side_listen_address", null=True, default=None),
            Int("netcat_active_side_port_min",
                null=True,
                default=None,
                validators=[Port()]),
            Int("netcat_active_side_port_max",
                null=True,
                default=None,
                validators=[Port()]),
            Str("netcat_passive_side_connect_address", null=True,
                default=None),
            List("source_datasets",
                 items=[Path("dataset", empty=False)],
                 required=True,
                 empty=False),
            Path("target_dataset", required=True, empty=False),
            Bool("recursive", required=True),
            List("exclude", items=[Path("dataset", empty=False)], default=[]),
            Bool('properties', default=True),
            List("periodic_snapshot_tasks",
                 items=[Int("periodic_snapshot_task")],
                 default=[],
                 validators=[Unique()]),
            List("naming_schema",
                 items=[
                     Str("naming_schema",
                         validators=[ReplicationSnapshotNamingSchema()])
                 ],
                 default=[]),
            List("also_include_naming_schema",
                 items=[
                     Str("naming_schema",
                         validators=[ReplicationSnapshotNamingSchema()])
                 ],
                 default=[]),
            Bool("auto", required=True),
            Cron("schedule",
                 defaults={"minute": "00"},
                 begin_end=True,
                 null=True,
                 default=None),
            Cron("restrict_schedule",
                 defaults={"minute": "00"},
                 begin_end=True,
                 null=True,
                 default=None),
            Bool("only_matching_schedule", default=False),
            Bool("allow_from_scratch", default=False),
            Bool("hold_pending_snapshots", default=False),
            Str("retention_policy",
                enum=["SOURCE", "CUSTOM", "NONE"],
                required=True),
            Int("lifetime_value",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            Str("lifetime_unit",
                null=True,
                default=None,
                enum=["HOUR", "DAY", "WEEK", "MONTH", "YEAR"]),
            Str("compression",
                enum=["LZ4", "PIGZ", "PLZIP"],
                null=True,
                default=None),
            Int("speed_limit",
                null=True,
                default=None,
                validators=[Range(min=1)]),
            Bool("dedup", default=False),
            Bool("large_block", default=True),
            Bool("embed", default=False),
            Bool("compressed", default=True),
            Int("retries", default=5, validators=[Range(min=1)]),
            Str("logging_level",
                enum=["DEBUG", "INFO", "WARNING", "ERROR"],
                null=True,
                default=None),
            Bool("enabled", default=True),
            register=True,
            strict=True,
        ))
    async def do_create(self, data):
        """
        Create a Replication Task

        Create a Replication Task that will push or pull ZFS snapshots to or from remote host..

        * `name` specifies a name for replication task
        * `direction` specifies whether task will `PUSH` or `PULL` snapshots
        * `transport` is a method of snapshots transfer:
          * `SSH` transfers snapshots via SSH connection. This method is supported everywhere but does not achieve
            great performance
            `ssh_credentials` is a required field for this transport (Keychain Credential ID of type `SSH_CREDENTIALS`)
          * `SSH+NETCAT` uses unencrypted connection for data transfer. This can only be used in trusted networks
            and requires a port (specified by range from `netcat_active_side_port_min` to `netcat_active_side_port_max`)
            to be open on `netcat_active_side`
            `ssh_credentials` is also required for control connection
          * `LOCAL` replicates to or from localhost
          * `LEGACY` uses legacy replication engine prior to FreeNAS 11.3
        * `source_datasets` is a non-empty list of datasets to replicate snapshots from
        * `target_dataset` is a dataset to put snapshots into. It must exist on target side
        * `recursive` and `exclude` have the same meaning as for Periodic Snapshot Task
        * `properties` control whether we should send dataset properties along with snapshots
        * `periodic_snapshot_tasks` is a list of periodic snapshot task IDs that are sources of snapshots for this
          replication task. Only push replication tasks can be bound to periodic snapshot tasks.
        * `naming_schema` is a list of naming schemas for pull replication
        * `also_include_naming_schema` is a list of naming schemas for push replication
        * `auto` allows replication to run automatically on schedule or after bound periodic snapshot task
        * `schedule` is a schedule to run replication task. Only `auto` replication tasks without bound periodic
          snapshot tasks can have a schedule
        * `restrict_schedule` restricts when replication task with bound periodic snapshot tasks runs. For example,
          you can have periodic snapshot tasks that run every 15 minutes, but only run replication task every hour.
        * Enabling `only_matching_schedule` will only replicate snapshots that match `schedule` or
          `restrict_schedule`
        * `allow_from_scratch` will destroy all snapshots on target side and replicate everything from scratch if none
          of the snapshots on target side matches source snapshots
        * `hold_pending_snapshots` will prevent source snapshots from being deleted by retention of replication fails
          for some reason
        * `retention_policy` specifies how to delete old snapshots on target side:
          * `SOURCE` deletes snapshots that are absent on source side
          * `CUSTOM` deletes snapshots that are older than `lifetime_value` and `lifetime_unit`
          * `NONE` does not delete any snapshots
        * `compression` compresses SSH stream. Available only for SSH transport
        * `speed_limit` limits speed of SSH stream. Available only for SSH transport
        * `dedup`, `large_block`, `embed` and `compressed` are various ZFS stream flag documented in `man zfs send`
        * `retries` specifies number of retries before considering replication failed

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create",
                "params": [{
                    "name": "Work Backup",
                    "direction": "PUSH",
                    "transport": "SSH",
                    "ssh_credentials": [12],
                    "source_datasets", ["data/work"],
                    "target_dataset": "repl/work",
                    "recursive": true,
                    "periodic_snapshot_tasks": [5],
                    "auto": true,
                    "restrict_schedule": {
                        "minute": "0",
                        "hour": "*/2",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    },
                    "only_matching_schedule": true,
                    "retention_policy": "CUSTOM",
                    "lifetime_value": 1,
                    "lifetime_unit": "WEEK",
                }]
            }
        """

        verrors = ValidationErrors()
        verrors.add_child("replication_create", await self._validate(data))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = data["periodic_snapshot_tasks"]
        await self.compress(data)

        id = await self.middleware.call(
            "datastore.insert", self._config.datastore, data,
            {"prefix": self._config.datastore_prefix})

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(Int("id"),
             Patch(
                 "replication_create",
                 "replication_update",
                 ("attr", {
                     "update": True
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update a Replication Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.update",
                "params": [
                    7,
                    {
                        "name": "Work Backup",
                        "direction": "PUSH",
                        "transport": "SSH",
                        "ssh_credentials": [12],
                        "source_datasets", ["data/work"],
                        "target_dataset": "repl/work",
                        "recursive": true,
                        "periodic_snapshot_tasks": [5],
                        "auto": true,
                        "restrict_schedule": {
                            "minute": "0",
                            "hour": "*/2",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        },
                        "only_matching_schedule": true,
                        "retention_policy": "CUSTOM",
                        "lifetime_value": 1,
                        "lifetime_unit": "WEEK",
                    }
                ]
            }
        """

        old = await self._get_instance(id)

        new = old.copy()
        if new["ssh_credentials"]:
            new["ssh_credentials"] = new["ssh_credentials"]["id"]
        new["periodic_snapshot_tasks"] = [
            task["id"] for task in new["periodic_snapshot_tasks"]
        ]
        new.update(data)

        verrors = ValidationErrors()
        verrors.add_child("replication_update", await self._validate(new, id))

        if verrors:
            raise verrors

        periodic_snapshot_tasks = new["periodic_snapshot_tasks"]
        await self.compress(new)

        new.pop("state", None)
        new.pop("job", None)

        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self._set_periodic_snapshot_tasks(id, periodic_snapshot_tasks)

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return await self._get_instance(id)

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete a Replication Task with specific `id`

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.delete",
                "params": [
                    1
                ]
            }
        """

        response = await self.middleware.call("datastore.delete",
                                              self._config.datastore, id)

        await self.middleware.call("service.restart", "cron")
        await self.middleware.call("zettarepl.update_tasks")

        return response

    @item_method
    @accepts(Int("id"), Bool("really_run", default=True, hidden=True))
    @job(logs=True)
    async def run(self, job, id, really_run):
        """
        Run Replication Task of `id`.
        """
        if really_run:
            task = await self._get_instance(id)

            if not task["enabled"]:
                raise CallError("Task is not enabled")

            if task["transport"] == "LEGACY":
                raise CallError("You can't run legacy replication manually")

        await self.middleware.call("zettarepl.run_replication_task", id,
                                   really_run, job)

    async def _validate(self, data, id=None):
        verrors = ValidationErrors()

        await self._ensure_unique(verrors, "", "name", data["name"], id)

        # Direction

        snapshot_tasks = []

        if data["direction"] == "PUSH":
            e, snapshot_tasks = await self._query_periodic_snapshot_tasks(
                data["periodic_snapshot_tasks"])
            verrors.add_child("periodic_snapshot_tasks", e)

            if data["naming_schema"]:
                verrors.add("naming_schema",
                            "This field has no sense for push replication")

            if data["transport"] != "LEGACY" and not snapshot_tasks and not data[
                    "also_include_naming_schema"]:
                verrors.add(
                    "periodic_snapshot_tasks",
                    "You must at least either bind a periodic snapshot task or provide "
                    "\"Also Include Naming Schema\" for push replication task")

            if data["schedule"]:
                if data["periodic_snapshot_tasks"]:
                    verrors.add(
                        "schedule",
                        "Push replication can't be bound to periodic snapshot task and have "
                        "schedule at the same time")
            else:
                if data["auto"] and not data[
                        "periodic_snapshot_tasks"] and data[
                            "transport"] != "LEGACY":
                    verrors.add(
                        "auto",
                        "Push replication that runs automatically must be either "
                        "bound to periodic snapshot task or have schedule")

        if data["direction"] == "PULL":
            if data["schedule"]:
                pass
            else:
                if data["auto"]:
                    verrors.add(
                        "auto",
                        "Pull replication that runs automatically must have schedule"
                    )

            if data["periodic_snapshot_tasks"]:
                verrors.add(
                    "periodic_snapshot_tasks",
                    "Pull replication can't be bound to periodic snapshot task"
                )

            if not data["naming_schema"]:
                verrors.add("naming_schema",
                            "Naming schema is required for pull replication")

            if data["also_include_naming_schema"]:
                verrors.add("also_include_naming_schema",
                            "This field has no sense for pull replication")

            if data["hold_pending_snapshots"]:
                verrors.add(
                    "hold_pending_snapshots",
                    "Pull replication tasks can't hold pending snapshots because "
                    "they don't do source retention")

        # Transport

        if data["transport"] == "SSH+NETCAT":
            if data["netcat_active_side"] is None:
                verrors.add(
                    "netcat_active_side",
                    "You must choose active side for SSH+netcat replication")

            if data["netcat_active_side_port_min"] is not None and data[
                    "netcat_active_side_port_max"] is not None:
                if data["netcat_active_side_port_min"] > data[
                        "netcat_active_side_port_max"]:
                    verrors.add(
                        "netcat_active_side_port_max",
                        "Please specify value greater or equal than netcat_active_side_port_min"
                    )

            if data["compression"] is not None:
                verrors.add(
                    "compression",
                    "Compression is not supported for SSH+netcat replication")

            if data["speed_limit"] is not None:
                verrors.add(
                    "speed_limit",
                    "Speed limit is not supported for SSH+netcat replication")
        else:
            if data["netcat_active_side"] is not None:
                verrors.add(
                    "netcat_active_side",
                    "This field only has sense for SSH+netcat replication")

            for k in [
                    "netcat_active_side_listen_address",
                    "netcat_active_side_port_min",
                    "netcat_active_side_port_max",
                    "netcat_passive_side_connect_address"
            ]:
                if data[k] is not None:
                    verrors.add(
                        k,
                        "This field only has sense for SSH+netcat replication")

        if data["transport"] == "LOCAL":
            if data["ssh_credentials"] is not None:
                verrors.add(
                    "ssh_credentials",
                    "Remote credentials have no sense for local replication")

            if data["compression"] is not None:
                verrors.add("compression",
                            "Compression has no sense for local replication")

            if data["speed_limit"] is not None:
                verrors.add("speed_limit",
                            "Speed limit has no sense for local replication")
        else:
            if data["ssh_credentials"] is None:
                verrors.add(
                    "ssh_credentials",
                    "SSH Credentials are required for non-local replication")
            else:
                try:
                    await self.middleware.call(
                        "keychaincredential.get_of_type",
                        data["ssh_credentials"], "SSH_CREDENTIALS")
                except CallError as e:
                    verrors.add("ssh_credentials", str(e))

        if data["transport"] == "LEGACY":
            for should_be_true in ["auto", "allow_from_scratch"]:
                if not data[should_be_true]:
                    verrors.add(
                        should_be_true,
                        "Legacy replication does not support disabling this option"
                    )

            for should_be_false in [
                    "exclude", "periodic_snapshot_tasks", "naming_schema",
                    "also_include_naming_schema", "only_matching_schedule",
                    "dedup", "large_block", "embed", "compressed"
            ]:
                if data[should_be_false]:
                    verrors.add(
                        should_be_false,
                        "Legacy replication does not support this option")

            if data["direction"] != "PUSH":
                verrors.add(
                    "direction",
                    "Only push application is allowed for Legacy transport")

            if len(data["source_datasets"]) != 1:
                verrors.add(
                    "source_datasets",
                    "You can only have one source dataset for legacy replication"
                )

            if data["retention_policy"] not in ["SOURCE", "NONE"]:
                verrors.add(
                    "retention_policy",
                    "Only \"source\" and \"none\" retention policies are supported by "
                    "legacy replication")

            if data["retries"] != 1:
                verrors.add("retries",
                            "This value should be 1 for legacy replication")

        # Common for all directions and transports

        for i, source_dataset in enumerate(data["source_datasets"]):
            for snapshot_task in snapshot_tasks:
                if is_child(source_dataset, snapshot_task["dataset"]):
                    if data["recursive"]:
                        for exclude in snapshot_task["exclude"]:
                            if exclude not in data["exclude"]:
                                verrors.add(
                                    "exclude",
                                    f"You should exclude {exclude!r} as bound periodic snapshot "
                                    f"task dataset {snapshot_task['dataset']!r} does"
                                )
                    else:
                        if source_dataset in snapshot_task["exclude"]:
                            verrors.add(
                                f"source_datasets.{i}",
                                f"Dataset {source_dataset!r} is excluded by bound "
                                f"periodic snapshot task for dataset "
                                f"{snapshot_task['dataset']!r}")

        if not data["recursive"] and data["exclude"]:
            verrors.add(
                "exclude",
                "Excluding child datasets is only supported for recursive replication"
            )

        for i, v in enumerate(data["exclude"]):
            if not any(
                    v.startswith(ds + "/") for ds in data["source_datasets"]):
                verrors.add(
                    f"exclude.{i}",
                    "This dataset is not a child of any of source datasets")

        if data["schedule"]:
            if not data["auto"]:
                verrors.add(
                    "schedule",
                    "You can't have schedule for replication that does not run automatically"
                )
        else:
            if data["only_matching_schedule"]:
                verrors.add(
                    "only_matching_schedule",
                    "You can't have only-matching-schedule without schedule")

        if data["retention_policy"] == "CUSTOM":
            if data["lifetime_value"] is None:
                verrors.add(
                    "lifetime_value",
                    "This field is required for custom retention policy")
            if data["lifetime_unit"] is None:
                verrors.add(
                    "lifetime_value",
                    "This field is required for custom retention policy")
        else:
            if data["lifetime_value"] is not None:
                verrors.add(
                    "lifetime_value",
                    "This field has no sense for specified retention policy")
            if data["lifetime_unit"] is not None:
                verrors.add(
                    "lifetime_unit",
                    "This field has no sense for specified retention policy")

        if data["enabled"]:
            for i, snapshot_task in enumerate(snapshot_tasks):
                if not snapshot_task["enabled"]:
                    verrors.add(
                        f"periodic_snapshot_tasks.{i}",
                        "You can't bind disabled periodic snapshot task to enabled replication task"
                    )

        return verrors

    async def _set_periodic_snapshot_tasks(self, replication_task_id,
                                           periodic_snapshot_tasks_ids):
        await self.middleware.call(
            "datastore.delete",
            "storage.replication_repl_periodic_snapshot_tasks",
            [["replication_id", "=", replication_task_id]])
        for periodic_snapshot_task_id in periodic_snapshot_tasks_ids:
            await self.middleware.call(
                "datastore.insert",
                "storage.replication_repl_periodic_snapshot_tasks",
                {
                    "replication_id": replication_task_id,
                    "task_id": periodic_snapshot_task_id,
                },
            )

    async def _query_periodic_snapshot_tasks(self, ids):
        verrors = ValidationErrors()

        query_result = await self.middleware.call("pool.snapshottask.query",
                                                  [["id", "in", ids]])

        snapshot_tasks = []
        for i, task_id in enumerate(ids):
            for task in query_result:
                if task["id"] == task_id:
                    snapshot_tasks.append(task)
                    break
            else:
                verrors.add(str(i), "This snapshot task does not exist")

        return verrors, snapshot_tasks

    @accepts(
        Str("transport",
            enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"],
            required=True), Int("ssh_credentials", null=True, default=None))
    async def list_datasets(self, transport, ssh_credentials=None):
        """
        List datasets on remote side

        Accepts `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.list_datasets",
                "params": [
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.list_datasets", transport,
                                          ssh_credentials)

    @accepts(Str("dataset", required=True),
             Str("transport",
                 enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"],
                 required=True), Int("ssh_credentials",
                                     null=True,
                                     default=None))
    async def create_dataset(self, dataset, transport, ssh_credentials=None):
        """
        Creates dataset on remote side

        Accepts `dataset` name, `transport` and SSH credentials ID (for non-local transport)

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.create_dataset",
                "params": [
                    "repl/work",
                    "SSH",
                    7
                ]
            }
        """

        return await self.middleware.call("zettarepl.create_dataset", dataset,
                                          transport, ssh_credentials)

    @accepts()
    async def list_naming_schemas(self):
        """
        List all naming schemas used in periodic snapshot and replication tasks.
        """
        naming_schemas = []
        for snapshottask in await self.middleware.call(
                "pool.snapshottask.query"):
            naming_schemas.append(snapshottask["naming_schema"])
        for replication in await self.middleware.call("replication.query"):
            naming_schemas.extend(replication["naming_schema"])
            naming_schemas.extend(replication["also_include_naming_schema"])
        return sorted(set(naming_schemas))

    @accepts(
        List("datasets", empty=False, items=[
            Path("dataset", empty=False),
        ]),
        List("naming_schema",
             empty=False,
             items=[
                 Str("naming_schema",
                     validators=[ReplicationSnapshotNamingSchema()])
             ]),
        Str("transport",
            enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"],
            required=True),
        Int("ssh_credentials", null=True, default=None),
    )
    async def count_eligible_manual_snapshots(self, datasets, naming_schema,
                                              transport, ssh_credentials):
        """
        Count how many existing snapshots of `dataset` match `naming_schema`.

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.count_eligible_manual_snapshots",
                "params": [
                    "repl/work",
                    ["auto-%Y-%m-%d_%H-%M"],
                    "SSH",
                    4
                ]
            }
        """
        return await self.middleware.call(
            "zettarepl.count_eligible_manual_snapshots", datasets,
            naming_schema, transport, ssh_credentials)

    @accepts(
        Str("direction", enum=["PUSH", "PULL"], required=True),
        List("source_datasets",
             items=[Path("dataset", empty=False)],
             required=True,
             empty=False),
        Path("target_dataset", required=True, empty=False),
        Str("transport",
            enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"],
            required=True),
        Int("ssh_credentials", null=True, default=None),
    )
    async def target_unmatched_snapshots(self, direction, source_datasets,
                                         target_dataset, transport,
                                         ssh_credentials):
        """
        Check if target has any snapshots that do not exist on source.

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "replication.target_unmatched_snapshots",
                "params": [
                    "PUSH",
                    ["repl/work", "repl/games"],
                    "backup",
                    "SSH",
                    4
                ]
            }

        Returns

            {
                "backup/work": ["auto-2019-10-15_13-00", "auto-2019-10-15_09-00"],
                "backup/games": ["auto-2019-10-15_13-00"],
            }
        """
        return await self.middleware.call(
            "zettarepl.target_unmatched_snapshots", direction, source_datasets,
            target_dataset, transport, ssh_credentials)

    # Legacy pair support
    @private
    @accepts(
        Dict(
            "replication-pair-data",
            Str("hostname", required=True),
            Str("public-key", required=True),
            Str("user", null=True),
        ))
    async def pair(self, data):
        result = await self.middleware.call(
            "keychaincredential.ssh_pair", {
                "remote_hostname": data["hostname"],
                "username": data["user"] or "root",
                "public_key": data["public-key"],
            })
        return {
            "ssh_port": result["port"],
            "ssh_hostkey": result["host_key"],
        }
コード例 #42
0
ファイル: cron.py プロジェクト: vaibhav-rbs/freenas
class CronJobService(CRUDService):
    class Config:
        datastore = 'tasks.cronjob'
        datastore_prefix = 'cron_'
        datastore_extend = 'cronjob.cron_extend'
        namespace = 'cronjob'

    @private
    def cron_extend(self, data):
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        user = data.get('user')
        if user:
            # Windows users can have spaces in their usernames
            # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808
            if ' ' in user:
                verrors.add(f'{schema}.user', 'Usernames cannot have spaces')

            elif not (await self.middleware.call('notifier.get_user_object',
                                                 user)):
                verrors.add(f'{schema}.user', 'Specified user does not exist')

        return verrors, data

    @accepts(
        Dict('cron_job_create',
             Bool('enabled'),
             Bool('stderr'),
             Bool('stdout'),
             Cron('schedule'),
             Str('command', required=True),
             Str('description'),
             Str('user', required=True),
             register=True))
    async def do_create(self, data):
        """
        Create a new cron job.

        `stderr` and `stdout` are boolean values which if `true`, represent that we would like to suppress
        standard error / standard output respectively.

        .. examples(websocket)::

          Create a cron job which executes `touch /tmp/testfile` after every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "cronjob.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "command": "touch /tmp/testfile",
                    "description": "Test command",
                    "user": "******",
                    "stderr": true,
                    "stdout": true
                }]
            }
        """
        verrors, data = await self.validate_data(data, 'cron_job_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'cron')

        return data

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('cron_job_create', 'cron_job_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update cronjob of `id`.
        """
        task_data = await self.query(filters=[('id', '=', id)],
                                     options={'get': True})
        original_data = task_data.copy()
        task_data.update(data)
        verrors, task_data = await self.validate_data(task_data,
                                                      'cron_job_update')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(task_data)
        Cron.convert_schedule_to_db_format(original_data)

        if len(set(task_data.items()) ^ set(original_data.items())) > 0:

            await self.middleware.call(
                'datastore.update', self._config.datastore, id, task_data,
                {'prefix': self._config.datastore_prefix})

            await self.middleware.call('service.restart', 'cron')

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete cronjob of `id`.
        """
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self.middleware.call('service.restart', 'cron')

        return response
コード例 #43
0
ファイル: snapshot.py プロジェクト: freenas/freenas
    async def do_update(self, id, data):
        """
        Update a Periodic Snapshot Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.update",
                "params": [
                    1,
                    {
                        "dataset": "data/work",
                        "recursive": true,
                        "exclude": ["data/work/temp"],
                        "lifetime_value": 2,
                        "lifetime_unit": "WEEK",
                        "naming_schema": "auto_%Y-%m-%d_%H-%M",
                        "schedule": {
                            "minute": "0",
                            "hour": "*",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        }
                    }
                ]
            }
        """

        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_update', await self._validate(new))

        if not data['enabled']:
            for replication_task in await self.middleware.call('replication.query', [['enabled', '=', True]]):
                if any(periodic_snapshot_task['id'] == id
                       for periodic_snapshot_task in replication_task['periodic_snapshot_tasks']):
                    verrors.add(
                        'periodic_snapshot_update.enabled',
                        (f'You can\'t disable this periodic snapshot task because it is bound to enabled replication '
                         f'task {replication_task["id"]!r}')
                    )
                    break

        if verrors:
            raise verrors

        legacy_replication_tasks = await self._legacy_replication_tasks()
        if self._is_legacy(new, legacy_replication_tasks):
            verrors.add_child(f'periodic_snapshot_update', self._validate_legacy(new))
        else:
            if self._is_legacy(old, legacy_replication_tasks):
                verrors.add(
                    'periodic_snapshot_update.naming_schema',
                    ('This snapshot task is being used in legacy replication task. You must use naming schema '
                     f'{self._legacy_naming_schema(new)!r}. Please upgrade your replication tasks to edit this field.')
                )

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new, begin_end=True)

        for key in ('legacy', 'vmware_sync', 'state'):
            new.pop(key, None)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call('service.restart', 'cron')
        await self.middleware.call('zettarepl.update_tasks')

        return await self._get_instance(id)
コード例 #44
0
 async def rsync_task_extend(self, data):
     data['extra'] = list(filter(None, re.split(r"\s+", data["extra"])))
     Cron.convert_db_format_to_schedule(data)
     return data
コード例 #45
0
ファイル: snapshot.py プロジェクト: freenas/freenas
    async def do_create(self, data):
        """
        Create a Periodic Snapshot Task

        Create a Periodic Snapshot Task that will take snapshots of specified `dataset` at specified `schedule`.
        Recursive snapshots can be created if `recursive` flag is enabled. You can `exclude` specific child datasets
        from snapshot.
        Snapshots will be automatically destroyed after a certain amount of time, specified by
        `lifetime_value` and `lifetime_unit`.
        Snapshots will be named according to `naming_schema` which is a `strftime`-like template for snapshot name
        and must contain `%Y`, `%m`, `%d`, `%H` and `%M`.

        .. examples(websocket)::

          Create a recursive Periodic Snapshot Task for dataset `data/work` excluding `data/work/temp`. Snapshots
          will be created on weekdays every hour from 09:00 to 18:00 and will be stored for two weeks.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.create",
                "params": [{
                    "dataset": "data/work",
                    "recursive": true,
                    "exclude": ["data/work/temp"],
                    "lifetime_value": 2,
                    "lifetime_unit": "WEEK",
                    "naming_schema": "auto_%Y-%m-%d_%H-%M",
                    "schedule": {
                        "minute": "0",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    }
                }]
            }
        """

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_create', await self._validate(data))

        if verrors:
            raise verrors

        if self._is_legacy(data, await self._legacy_replication_tasks()):
            verrors.add_child('periodic_snapshot_create', self._validate_legacy(data))

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data, begin_end=True)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        await self.middleware.call('service.restart', 'cron')
        await self.middleware.call('zettarepl.update_tasks')

        return await self._get_instance(data['id'])
コード例 #46
0
ファイル: rsync.py プロジェクト: mike0615/freenas
class RsyncTaskService(CRUDService):

    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'

    @private
    async def rsync_task_extend(self, data):
        data['extra'] = list(filter(None, re.split(r"\s+", data["extra"])))
        for field in ('mode', 'direction'):
            data[field] = data[field].upper()
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = await self.middleware.call(
            'notifier.get_user_object',
            username
        )
        if not user:
            verrors.add(f'{schema}.user', f'Provided user "{username}" does not exist')
            raise verrors

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        if data.get('extra'):
            data['extra'] = ' '.join(data['extra'])
        else:
            data['extra'] = ''

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'MODULE' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'SSH':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.'
                )
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}'
                            )

            if(
                data.get('validate_rpath') and
                remote_path and
                remote_host and
                remote_port
            ):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.rsplit('@', 1)
                else:
                    remote_username = username

                try:
                    with (await asyncio.wait_for(asyncssh.connect(
                            remote_host,
                            port=remote_port,
                            username=remote_username,
                            client_keys=key_files,
                            known_hosts=None
                    ), timeout=5)) as conn:

                        await conn.run(f'test -d {shlex.quote(remote_path)}', check=True)

                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(
                            f'{schema}.remotehost',
                            e.__str__()
                        )

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field'
                        )
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}'
                        )

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__()
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data.get('validate_rpath'):
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        # Keeping compatibility with legacy UI
        for field in ('mode', 'direction'):
            data[field] = data[field].lower()

        return verrors, data

    @accepts(Dict(
        'rsync_task_create',
        Str('path', required=True),
        Str('user', required=True),
        Str('remotehost'),
        Int('remoteport'),
        Str('mode', enum=['MODULE', 'SSH'], default='MODULE'),
        Str('remotemodule'),
        Str('remotepath'),
        Bool('validate_rpath'),
        Str('direction', enum=['PULL', 'PUSH'], default='PUSH'),
        Str('desc'),
        Cron(
            'schedule',
            defaults={'minute': '00'},
        ),
        Bool('recursive'),
        Bool('times'),
        Bool('compress'),
        Bool('archive'),
        Bool('delete'),
        Bool('quiet'),
        Bool('preserveperm'),
        Bool('preserveattr'),
        Bool('delayupdates'),
        List('extra', items=[Str('extra')]),
        Bool('enabled'),
        register=True,
    ))
    async def do_create(self, data):
        """
        Create a Rsync Task.

        `path` represents the path to pool/dataset.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data, 'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return await self._get_instance(data['id'])

    @accepts(
        Int('id', validators=[Range(min=1)]),
        Patch('rsync_task_create', 'rsync_task_update', ('attr', {'update': True}))
    )
    async def do_update(self, id, data):
        """
        Update Rsync Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)], options={'get': True})

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new, 'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return await self.query(filters=[('id', '=', id)], options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Rsync Task of `id`.
        """
        res = await self.middleware.call('datastore.delete', self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res

    @private
    async def commandline(self, id):
        """
        Helper method to generate the rsync command avoiding code duplication.
        """
        rsync = await self._get_instance(id)
        path = shlex.quote(rsync['path'])

        line = [
            '/usr/bin/lockf', '-s', '-t', '0', '-k', path, '/usr/local/bin/rsync'
        ]
        for name, flag in (
            ('archive', '-a'),
            ('compress', '-z'),
            ('delayupdates', '--delay-updates'),
            ('delete', '--delete-delay'),
            ('preserveattr', '-X'),
            ('preserveperm', '-p'),
            ('recursive', '-r'),
            ('times', '-t'),
        ):
            if rsync[name]:
                line.append(flag)
        if rsync['extra']:
            line.append(' '.join(rsync['extra']))

        # Do not use username if one is specified in host field
        # See #5096 for more details
        if '@' in rsync['remotehost']:
            remote = rsync['remotehost']
        else:
            remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'

        if rsync['mode'] == 'MODULE':
            module_args = [path, f'{remote}::"{rsync["remotemodule"]}"']
            if rsync['direction'] != 'PUSH':
                module_args.reverse()
            line += module_args
        else:
            line += [
                '-e',
                f'"ssh -p {rsync["remoteport"]} -o BatchMode=yes -o StrictHostKeyChecking=yes"'
            ]
            path_args = [path, f'{remote}:"{shlex.quote(rsync["remotepath"])}"']
            if rsync['direction'] != 'PUSH':
                path_args.reverse()
            line += path_args

        if rsync['quiet']:
            line += ['>', '/dev/null', '2>&1']

        return ' '.join(line)

    @item_method
    @accepts(Int('id'))
    @job(lock=lambda args: args[-1], logs=True)
    def run(self, job, id):
        """
        Job to run rsync task of `id`.

        Output is saved to job log excerpt as well as syslog.
        """
        rsync = self.middleware.call_sync('rsynctask._get_instance', id)
        commandline = self.middleware.call_sync('rsynctask.commandline', id)

        cp = run_command_with_user_context(
            commandline, rsync['user'], lambda v: job.logs_fd.write(v)
        )

        if cp.returncode != 0:
            raise CallError(
                f'rsync command returned {cp.returncode}. Check logs for further information.'
            )
コード例 #47
0
ファイル: smart.py プロジェクト: bmhughes/freenas
    async def do_create(self, data):
        """
        Create a SMART Test Task.

        `disks` is a list of valid disks which should be monitored in this task.

        `type` is specified to represent the type of SMART test to be executed.

        `all_disks` when enabled sets the task to cover all disks in which case `disks` is not required.

        .. examples(websocket)::

          Create a SMART Test Task which executes after every 30 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.create",
                "params": [{
                    "schedule": {
                        "minute": "30",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "all_disks": true,
                    "type": "OFFLINE",
                    "disks": []
                }]
            }
        """
        data['type'] = data.pop('type')[0]
        verrors = await self.validate_data(data, 'smart_test_create')

        if data['all_disks']:
            if data.get('disks'):
                verrors.add(
                    'smart_test_create.disks',
                    'This test is already enabled for all disks'
                )
        else:
            if not data.get('disks'):
                verrors.add(
                    'smart_test_create.disks',
                    'This field is required'
                )

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return await self.get_instance(data['id'])
コード例 #48
0
 async def pool_scrub_extend(self, data):
     data['pool'] = data.pop('volume')
     data['pool'] = data['pool']['id']
     Cron.convert_db_format_to_schedule(data)
     return data
コード例 #49
0
class RsyncTaskService(CRUDService):

    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'

    @private
    async def rsync_task_extend(self, data):
        data['extra'] = list(filter(None, re.split(r"\s+", data["extra"])))
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = await self.middleware.call(
            'notifier.get_user_object',
            username
        )
        if not user:
            verrors.add(f'{schema}.user', f'Provided user "{username}" does not exist')
            raise verrors

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        if data.get('extra'):
            data['extra'] = ' '.join(data['extra'])
        else:
            data['extra'] = ''

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'module' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'ssh':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user.pw_dir, '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user.pw_dir, '.ssh', 'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.'
                )
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}'
                            )

            if(
                data.get('validate_rpath') and
                remote_path and
                remote_host and
                remote_port
            ):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.rsplit('@', 1)
                else:
                    remote_username = username

                try:
                    with (await asyncio.wait_for(asyncssh.connect(
                            remote_host,
                            port=remote_port,
                            username=remote_username,
                            client_keys=key_files,
                            known_hosts=None
                    ), timeout=5)) as conn:

                        await conn.run(f'test -d {shlex.quote(remote_path)}', check=True)

                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(
                            f'{schema}.remotehost',
                            e.__str__()
                        )

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field'
                        )
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}'
                        )

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__()
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data.get('validate_rpath'):
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        return verrors, data

    @accepts(Dict(
        'rsync_task_create',
        Str('path'),
        Str('user', required=True),
        Str('remotehost'),
        Int('remoteport'),
        Str('mode'),
        Str('remotemodule'),
        Str('remotepath'),
        Bool('validate_rpath'),
        Str('direction'),
        Str('desc'),
        Cron('schedule'),
        Bool('recursive'),
        Bool('times'),
        Bool('compress'),
        Bool('archive'),
        Bool('delete'),
        Bool('quiet'),
        Bool('preserveperm'),
        Bool('preserveattr'),
        Bool('delayupdates'),
        List('extra', items=[Str('extra')]),
        Bool('enabled'),
        register=True,
    ))
    async def do_create(self, data):
        verrors, data = await self.validate_rsync_task(data, 'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return data

    @accepts(
        Int('id', validators=[Range(min=1)]),
        Patch('rsync_task_create', 'rsync_task_update', ('attr', {'update': True}))
    )
    async def do_update(self, id, data):
        old = await self.query(filters=[('id', '=', id)], options={'get': True})

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new, 'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )
        await self.middleware.call('service.restart', 'cron')

        return await self.query(filters=[('id', '=', id)], options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        res = await self.middleware.call('datastore.delete', self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res

    @private
    async def commandline(self, id):
        """
        Helper method to generate the rsync command avoiding code duplication.
        """
        rsync = await self._get_instance(id)
        path = shlex.quote(rsync['path'])

        line = [
            '/usr/bin/lockf', '-s', '-t', '0', '-k', path, '/usr/local/bin/rsync'
        ]
        for name, flag in (
            ('archive', '-a'),
            ('compress', '-z'),
            ('delayupdates', '--delay-updates'),
            ('delete', '--delete-delay'),
            ('preserveattr', '-X'),
            ('preserveperm', '-p'),
            ('recursive', '-r'),
            ('times', '-t'),
        ):
            if rsync[name]:
                line.append(flag)
        if rsync['extra']:
            line.append(' '.join(rsync['extra']))

        # Do not use username if one is specified in host field
        # See #5096 for more details
        if '@' in rsync['remotehost']:
            remote = rsync['remotehost']
        else:
            remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'

        if rsync['mode'] == 'module':
            module_args = [path, f'{remote}::"{rsync["remotemodule"]}"']
            if rsync['direction'] != 'push':
                module_args.reverse()
            line += module_args
        else:
            line += [
                '-e',
                f'ssh -p {rsync["remoteport"]} -o BatchMode=yes -o StrictHostKeyChecking=yes'
            ]
            path_args = [path, f'{remote}:"{shlex.quote(rsync["remotepath"])}"']
            if rsync['direction'] != 'push':
                path_args.reverse()
            line += path_args

        if rsync['quiet']:
            line += ['>', '/dev/null', '2>&1']

        return ' '.join(line)

    @item_method
    @accepts(Int('id'))
    @job(lock=lambda args: args[-1], logs=True)
    def run(self, job, id):
        """
        Job to run rsync task of `id`.

        Output is saved to job log excerpt as well as syslog.
        """
        rsync = self.middleware.call_sync('rsynctask._get_instance', id)
        commandline = self.middleware.call_sync('rsynctask.commandline', id)
        q = Queue()
        rv = Value('i')
        p = Process(target=_run_command, args=(rsync['user'], commandline, q, rv), daemon=True)
        p.start()
        while p.is_alive() or not q.empty():
            try:
                get = q.get(True, 2)
                if get is None:
                    break
                job.logs_fd.write(get)
            except queue.Empty:
                pass
        p.join()
        if rv.value != 0:
            raise CallError(
                f'rsync command returned {rv.value}. Check logs for further information.'
            )
コード例 #50
0
class PeriodicSnapshotTaskService(CRUDService):
    class Config:
        datastore = 'storage.task'
        datastore_prefix = 'task_'
        datastore_extend = 'pool.snapshottask.extend'
        datastore_extend_context = 'pool.snapshottask.extend_context'
        namespace = 'pool.snapshottask'
        cli_namespace = 'task.snapshot'

    @private
    async def extend_context(self, rows, extra):
        return {
            'state': await self.middleware.call('zettarepl.get_state'),
            'vmware': await self.middleware.call('vmware.query'),
        }

    @private
    async def extend(self, data, context):
        Cron.convert_db_format_to_schedule(data, begin_end=True)

        data['vmware_sync'] = any((vmware['filesystem'] == data['dataset'] or (
            data['recursive']
            and is_child(vmware['filesystem'], data['dataset'])))
                                  for vmware in context['vmware'])

        if 'error' in context['state']:
            data['state'] = context['state']['error']
        else:
            data['state'] = context['state']['tasks'].get(
                f'periodic_snapshot_task_{data["id"]}', {
                    'state': 'PENDING',
                })

        return data

    @accepts(
        Dict('periodic_snapshot_create',
             Dataset('dataset', required=True),
             Bool('recursive', required=True),
             List('exclude', items=[Dataset('item')]),
             Int('lifetime_value', required=True),
             Str('lifetime_unit',
                 enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR'],
                 required=True),
             Str('naming_schema',
                 required=True,
                 validators=[ReplicationSnapshotNamingSchema()]),
             Cron('schedule',
                  defaults={
                      'minute': '00',
                      'begin': '00:00',
                      'end': '23:59',
                  },
                  required=True,
                  begin_end=True),
             Bool('allow_empty', default=True),
             Bool('enabled', default=True),
             register=True))
    async def do_create(self, data):
        """
        Create a Periodic Snapshot Task

        Create a Periodic Snapshot Task that will take snapshots of specified `dataset` at specified `schedule`.
        Recursive snapshots can be created if `recursive` flag is enabled. You can `exclude` specific child datasets
        or zvols from the snapshot.
        Snapshots will be automatically destroyed after a certain amount of time, specified by
        `lifetime_value` and `lifetime_unit`.
        If multiple periodic tasks create snapshots at the same time (for example hourly and daily at 00:00) the snapshot
        will be kept until the last of these tasks reaches its expiry time.
        Snapshots will be named according to `naming_schema` which is a `strftime`-like template for snapshot name
        and must contain `%Y`, `%m`, `%d`, `%H` and `%M`.

        .. examples(websocket)::

          Create a recursive Periodic Snapshot Task for dataset `data/work` excluding `data/work/temp`. Snapshots
          will be created on weekdays every hour from 09:00 to 18:00 and will be stored for two weeks.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.create",
                "params": [{
                    "dataset": "data/work",
                    "recursive": true,
                    "exclude": ["data/work/temp"],
                    "lifetime_value": 2,
                    "lifetime_unit": "WEEK",
                    "naming_schema": "auto_%Y-%m-%d_%H-%M",
                    "schedule": {
                        "minute": "0",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    }
                }]
            }
        """

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_create', await
                          self._validate(data))

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data, begin_end=True)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('zettarepl.update_tasks')

        return await self.get_instance(data['id'])

    @accepts(
        Int('id', required=True),
        Patch('periodic_snapshot_create', 'periodic_snapshot_update',
              ('add', {
                  'name': 'fixate_removal_date',
                  'type': 'bool'
              }), ('attr', {
                  'update': True
              })),
    )
    async def do_update(self, id, data):
        """
        Update a Periodic Snapshot Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.update",
                "params": [
                    1,
                    {
                        "dataset": "data/work",
                        "recursive": true,
                        "exclude": ["data/work/temp"],
                        "lifetime_value": 2,
                        "lifetime_unit": "WEEK",
                        "naming_schema": "auto_%Y-%m-%d_%H-%M",
                        "schedule": {
                            "minute": "0",
                            "hour": "*",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        }
                    }
                ]
            }
        """

        fixate_removal_date = data.pop('fixate_removal_date', False)

        old = await self.get_instance(id)
        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_update', await
                          self._validate(new))

        if not new['enabled']:
            for replication_task in await self.middleware.call(
                    'replication.query', [['enabled', '=', True]]):
                if any(periodic_snapshot_task['id'] == id
                       for periodic_snapshot_task in
                       replication_task['periodic_snapshot_tasks']):
                    verrors.add('periodic_snapshot_update.enabled', (
                        f'You can\'t disable this periodic snapshot task because it is bound to enabled replication '
                        f'task {replication_task["id"]!r}'))
                    break

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new, begin_end=True)

        for key in ('vmware_sync', 'state'):
            new.pop(key, None)

        will_change_retention_for = None
        if fixate_removal_date:
            will_change_retention_for = await self.middleware.call(
                'pool.snapshottask.update_will_change_retention_for',
                id,
                data,
            )

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        if will_change_retention_for:
            await self.middleware.call('pool.snapshottask.fixate_removal_date',
                                       will_change_retention_for, old)

        await self.middleware.call('zettarepl.update_tasks')

        return await self.get_instance(id)

    @accepts(
        Int('id'),
        Dict(
            'options',
            Bool('fixate_removal_date', default=False),
        ),
    )
    async def do_delete(self, id, options):
        """
        Delete a Periodic Snapshot Task with specific `id`

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.delete",
                "params": [
                    1
                ]
            }
        """

        for replication_task in await self.middleware.call(
                'replication.query', [
                    ['direction', '=', 'PUSH'],
                    ['also_include_naming_schema', '=', []],
                    ['enabled', '=', True],
                ]):
            if len(replication_task['periodic_snapshot_tasks']) == 1:
                if replication_task['periodic_snapshot_tasks'][0]['id'] == id:
                    raise CallError(
                        f'You are deleting the last periodic snapshot task bound to enabled replication task '
                        f'{replication_task["name"]!r} which will break it. Please, disable that replication task '
                        f'first.', )

        if options['fixate_removal_date']:
            will_change_retention_for = await self.middleware.call(
                'pool.snapshottask.delete_will_change_retention_for', id)

            if will_change_retention_for:
                task = await self.get_instance(id)
                await self.middleware.call(
                    'pool.snapshottask.fixate_removal_date',
                    will_change_retention_for, task)

        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self.middleware.call('zettarepl.update_tasks')

        return response

    @accepts(
        Dict(
            'periodic_snapshot_foreseen_count',
            Int('lifetime_value', required=True),
            Str('lifetime_unit',
                enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR'],
                required=True),
            Cron('schedule',
                 defaults={
                     'minute': '00',
                     'begin': '00:00',
                     'end': '23:59',
                 },
                 required=True,
                 begin_end=True),
            register=True,
        ))
    @returns(Int())
    def foreseen_count(self, data):
        """
        Returns a number of snapshots (per-dataset) being retained if a periodic snapshot task with specific parameters
        is created.
        """

        # Arbitrary year choice, fixed for unit tests repeatability. We don't need the precise answer, we only need
        # to evaluate the magnitude.
        base = datetime(2020, 1, 1, 0, 0, 0) - timedelta(seconds=1)

        multiplier = 1
        lifetime_value = data['lifetime_value']
        lifetime_unit = data['lifetime_unit']
        if lifetime_unit == 'YEAR' and lifetime_value > 1:
            # All years are the same, we don't need to run the same croniter multiple times for N years, just need to
            # run it for one year and multiply the result.
            multiplier = lifetime_value
            lifetime_value = 1

        until = base + timedelta(
            seconds=lifetime_value * {
                'HOUR': 3600,
                'DAY': 3600 * 24,
                'WEEK': 3600 * 24 * 7,
                'MONTH': 3600 * 24 * 30,
                'YEAR': 3600 * 24 * 365,
            }[lifetime_unit])

        iter = croniter_for_schedule(data['schedule'], base, datetime)
        count = 0
        while True:
            d = iter.get_next()
            if d > until:
                break

            if data['schedule']['begin'] <= d.time(
            ) <= data['schedule']['end']:
                count += 1

        return count * multiplier

    @accepts()
    @returns(Int())
    def max_count(self):
        """
        Returns a maximum amount of snapshots (per-dataset) the system can sustain.
        """

        # There is a limit to how many snapshots Windows will present to users through File Explorer. If we respond
        # with too many, then File Explorer will show no snapshots available.
        return 512

    @accepts()
    @returns(Int())
    def max_total_count(self):
        """
        Returns a maximum amount of snapshots (total) the system can sustain.
        """

        # Having too many snapshots results in various performance complications (mainly, when listing them).
        # This is a random round number that is large enough and does not cause issues in most use cases.
        return 10000

    @item_method
    @accepts(Int("id"))
    async def run(self, id):
        """
        Execute a Periodic Snapshot Task of `id`.
        """
        task = await self.get_instance(id)

        if not task["enabled"]:
            raise CallError("Task is not enabled")

        await self.middleware.call("zettarepl.run_periodic_snapshot_task",
                                   task["id"])

    async def _validate(self, data):
        verrors = ValidationErrors()

        if data['dataset'] not in (
                await self.middleware.call('pool.filesystem_choices')):
            verrors.add('dataset', 'ZFS dataset or zvol not found')

        if not data['recursive'] and data['exclude']:
            verrors.add(
                'exclude',
                'Excluding datasets or zvols is not necessary for non-recursive periodic snapshot tasks'
            )

        for i, v in enumerate(data['exclude']):
            if not v.startswith(f'{data["dataset"]}/'):
                verrors.add(
                    f'exclude.{i}',
                    'Excluded dataset or zvol should be a child or other descendant of selected dataset'
                )

        return verrors