async def do_update(self, id, data): """ Update Rsync Task of `id`. """ old = await self.query(filters=[('id', '=', id)], options={'get': True}) new = old.copy() new.update(data) verrors, data = await self.validate_rsync_task(new, 'rsync_task_update') if verrors: raise verrors Cron.convert_schedule_to_db_format(new) await self.middleware.call( 'datastore.update', self._config.datastore, id, new, {'prefix': self._config.datastore_prefix} ) await self.middleware.call('service.restart', 'cron') return await self.query(filters=[('id', '=', id)], options={'get': True})
async def do_update(self, id, data): task_data = await self.query(filters=[('id', '=', id)], options={'get': True}) original_data = task_data.copy() task_data.update(data) verrors, task_data = await self.validate_data(task_data, 'cron_job_update') if verrors: raise verrors Cron.convert_schedule_to_db_format(task_data) Cron.convert_schedule_to_db_format(original_data) if len(set(task_data.items()) ^ set(original_data.items())) > 0: await self.middleware.call( 'datastore.update', self._config.datastore, id, task_data, {'prefix': self._config.datastore_prefix} ) await self.middleware.call( 'service.restart', 'cron', {'onetime': False} ) return await self.query(filters=[('id', '=', id)], options={'get': True})
async def do_create(self, data): data['type'] = data.pop('type')[0] verrors = await self.validate_data(data, 'smart_test_create') if not data.get('disks'): verrors.add( 'smart_test_create.disks', 'This field is required' ) if verrors: raise verrors Cron.convert_schedule_to_db_format(data) data['id'] = await self.middleware.call( 'datastore.insert', self._config.datastore, data, {'prefix': self._config.datastore_prefix} ) await self.middleware.call( 'service.restart', 'smartd', {'onetime': False} ) return data
async def compress(self, data): if data["direction"] == "PUSH": data["naming_schema"] = data["also_include_naming_schema"] del data["also_include_naming_schema"] Cron.convert_schedule_to_db_format(data, "schedule", key_prefix="schedule_", begin_end=True) Cron.convert_schedule_to_db_format(data, "restrict_schedule", key_prefix="restrict_schedule_", begin_end=True) del data["periodic_snapshot_tasks"] return data
async def _compress(self, cloud_sync): if "credentials" in cloud_sync: cloud_sync["credential"] = cloud_sync.pop("credentials") if "encryption_password" in cloud_sync: cloud_sync["encryption_password"] = await self.middleware.call( "notifier.pwenc_encrypt", cloud_sync["encryption_password"]) if "encryption_salt" in cloud_sync: cloud_sync["encryption_salt"] = await self.middleware.call( "notifier.pwenc_encrypt", cloud_sync["encryption_salt"]) Cron.convert_schedule_to_db_format(cloud_sync) return cloud_sync
async def do_create(self, data): """ Create a new cron job. `stderr` and `stdout` are boolean values which if `true`, represent that we would like to suppress standard error / standard output respectively. .. examples(websocket):: Create a cron job which executes `touch /tmp/testfile` after every 5 minutes. :::javascript { "id": "6841f242-840a-11e6-a437-00e04d680384", "msg": "method", "method": "cronjob.create", "params": [{ "enabled": true, "schedule": { "minute": "5", "hour": "*", "dom": "*", "month": "*", "dow": "*" }, "command": "touch /tmp/testfile", "description": "Test command", "user": "******", "stderr": true, "stdout": true }] } """ verrors, data = await self.validate_data(data, 'cron_job_create') if verrors: raise verrors Cron.convert_schedule_to_db_format(data) data['id'] = await self.middleware.call( 'datastore.insert', self._config.datastore, data, {'prefix': self._config.datastore_prefix} ) await self.middleware.call('service.restart', 'cron') return await self._get_instance(data['id'])
async def _compress(self, cloud_sync): cloud_sync["credential"] = cloud_sync.pop("credentials") cloud_sync["encryption_password"] = await self.middleware.call( "pwenc.encrypt", cloud_sync["encryption_password"] ) cloud_sync["encryption_salt"] = await self.middleware.call( "pwenc.encrypt", cloud_sync["encryption_salt"] ) Cron.convert_schedule_to_db_format(cloud_sync) cloud_sync.pop('job', None) return cloud_sync
async def do_create(self, data): verrors, data = await self.validate_rsync_task(data, 'rsync_task_create') if verrors: raise verrors Cron.convert_schedule_to_db_format(data) data['id'] = await self.middleware.call( 'datastore.insert', self._config.datastore, data, {'prefix': self._config.datastore_prefix} ) await self.middleware.call('service.restart', 'cron') return data
async def do_update(self, id, data): old = await self.query(filters=[('id', '=', id)], options={'get': True}) new = old.copy() new.update(data) new['type'] = new.pop('type')[0] old['type'] = old.pop('type')[0] new_disks = [disk for disk in new['disks'] if disk not in old['disks']] deleted_disks = [disk for disk in old['disks'] if disk not in new['disks']] if old['type'] == new['type']: new['disks'] = new_disks verrors = await self.validate_data(new, 'smart_test_update') new['disks'] = [disk for disk in chain(new_disks, old['disks']) if disk not in deleted_disks] if not new.get('disks'): verrors.add( 'smart_test_update.disks', 'This field is required' ) if verrors: raise verrors Cron.convert_schedule_to_db_format(new) await self.middleware.call( 'datastore.update', self._config.datastore, id, new, {'prefix': self._config.datastore_prefix} ) await self.middleware.call( 'service.restart', 'smartd', {'onetime': False} ) return await self.query(filters=[('id', '=', id)], options={'get': True})
async def do_create(self, data): verrors, data = await self.validate_data(data, 'pool_scrub_create') if verrors: raise verrors data['volume'] = data.pop('pool') Cron.convert_schedule_to_db_format(data) data['id'] = await self.middleware.call( 'datastore.insert', self._config.datastore, data, {'prefix': self._config.datastore_prefix} ) await self.middleware.call( 'service.restart', 'cron', {'onetime': False} ) return await self.query(filters=[('id', '=', data['id'])], options={'get': True})
async def do_create(self, data): """ Create a Rsync Task. See the comment in Rsyncmod about `path` length limits. `remotehost` is ip address or hostname of the remote system. If username differs on the remote host, "username@remote_host" format should be used. `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode. `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE. `remotepath` specifies the path on the remote system. `validate_rpath` is a boolean which when sets validates the existence of the remote path. `direction` specifies if data should be PULLED or PUSHED from the remote system. `compress` when set reduces the size of the data which is to be transmitted. `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group, and special files. `delete` when set deletes files in the destination directory which do not exist in the source directory. `preserveperm` when set preserves original file permissions. .. examples(websocket):: Create a Rsync Task which pulls data from a remote system every 5 minutes. :::javascript { "id": "6841f242-840a-11e6-a437-00e04d680384", "msg": "method", "method": "rsynctask.create", "params": [{ "enabled": true, "schedule": { "minute": "5", "hour": "*", "dom": "*", "month": "*", "dow": "*" }, "desc": "Test rsync task", "user": "******", "mode": "MODULE", "remotehost": "[email protected]", "compress": true, "archive": true, "direction": "PULL", "path": "/mnt/vol1/rsync_dataset", "remotemodule": "remote_module1" }] } """ verrors, data = await self.validate_rsync_task(data, 'rsync_task_create') if verrors: raise verrors Cron.convert_schedule_to_db_format(data) data['id'] = await self.middleware.call( 'datastore.insert', self._config.datastore, data, {'prefix': self._config.datastore_prefix}) await self.middleware.call('service.restart', 'cron') return await self.get_instance(data['id'])
async def do_update(self, id, data): """ Update a Periodic Snapshot Task with specific `id` See the documentation for `create` method for information on payload contents .. examples(websocket):: :::javascript { "id": "6841f242-840a-11e6-a437-00e04d680384", "msg": "method", "method": "pool.snapshottask.update", "params": [ 1, { "dataset": "data/work", "recursive": true, "exclude": ["data/work/temp"], "lifetime_value": 2, "lifetime_unit": "WEEK", "naming_schema": "auto_%Y-%m-%d_%H-%M", "schedule": { "minute": "0", "hour": "*", "dom": "*", "month": "*", "dow": "1,2,3,4,5", "begin": "09:00", "end": "18:00" } } ] } """ fixate_removal_date = data.pop('fixate_removal_date', False) old = await self._get_instance(id) new = old.copy() new.update(data) verrors = ValidationErrors() verrors.add_child('periodic_snapshot_update', await self._validate(new)) if not new['enabled']: for replication_task in await self.middleware.call('replication.query', [['enabled', '=', True]]): if any(periodic_snapshot_task['id'] == id for periodic_snapshot_task in replication_task['periodic_snapshot_tasks']): verrors.add( 'periodic_snapshot_update.enabled', (f'You can\'t disable this periodic snapshot task because it is bound to enabled replication ' f'task {replication_task["id"]!r}') ) break if verrors: raise verrors Cron.convert_schedule_to_db_format(new, begin_end=True) for key in ('vmware_sync', 'state'): new.pop(key, None) will_change_retention_for = None if fixate_removal_date: will_change_retention_for = await self.middleware.call( 'pool.snapshottask.update_will_change_retention_for', id, data, ) await self.middleware.call( 'datastore.update', self._config.datastore, id, new, {'prefix': self._config.datastore_prefix} ) if will_change_retention_for: await self.middleware.call('pool.snapshottask.fixate_removal_date', will_change_retention_for, old) await self.middleware.call('zettarepl.update_tasks') return await self._get_instance(id)
async def do_create(self, data): """ Create a Periodic Snapshot Task Create a Periodic Snapshot Task that will take snapshots of specified `dataset` at specified `schedule`. Recursive snapshots can be created if `recursive` flag is enabled. You can `exclude` specific child datasets from snapshot. Snapshots will be automatically destroyed after a certain amount of time, specified by `lifetime_value` and `lifetime_unit`. Snapshots will be named according to `naming_schema` which is a `strftime`-like template for snapshot name and must contain `%Y`, `%m`, `%d`, `%H` and `%M`. .. examples(websocket):: Create a recursive Periodic Snapshot Task for dataset `data/work` excluding `data/work/temp`. Snapshots will be created on weekdays every hour from 09:00 to 18:00 and will be stored for two weeks. :::javascript { "id": "6841f242-840a-11e6-a437-00e04d680384", "msg": "method", "method": "pool.snapshottask.create", "params": [{ "dataset": "data/work", "recursive": true, "exclude": ["data/work/temp"], "lifetime_value": 2, "lifetime_unit": "WEEK", "naming_schema": "auto_%Y-%m-%d_%H-%M", "schedule": { "minute": "0", "hour": "*", "dom": "*", "month": "*", "dow": "1,2,3,4,5", "begin": "09:00", "end": "18:00" } }] } """ verrors = ValidationErrors() verrors.add_child('periodic_snapshot_create', await self._validate(data)) if verrors: raise verrors if self._is_legacy(data, await self._legacy_replication_tasks()): verrors.add_child('periodic_snapshot_create', self._validate_legacy(data)) if verrors: raise verrors Cron.convert_schedule_to_db_format(data, begin_end=True) data['id'] = await self.middleware.call( 'datastore.insert', self._config.datastore, data, {'prefix': self._config.datastore_prefix} ) await self.middleware.call('service.restart', 'cron') await self.middleware.call('zettarepl.update_tasks') return await self._get_instance(data['id'])
async def do_update(self, id, data): """ Update a Periodic Snapshot Task with specific `id` See the documentation for `create` method for information on payload contents .. examples(websocket):: :::javascript { "id": "6841f242-840a-11e6-a437-00e04d680384", "msg": "method", "method": "pool.snapshottask.update", "params": [ 1, { "dataset": "data/work", "recursive": true, "exclude": ["data/work/temp"], "lifetime_value": 2, "lifetime_unit": "WEEK", "naming_schema": "auto_%Y-%m-%d_%H-%M", "schedule": { "minute": "0", "hour": "*", "dom": "*", "month": "*", "dow": "1,2,3,4,5", "begin": "09:00", "end": "18:00" } } ] } """ old = await self._get_instance(id) new = old.copy() new.update(data) verrors = ValidationErrors() verrors.add_child('periodic_snapshot_update', await self._validate(new)) if not data['enabled']: for replication_task in await self.middleware.call('replication.query', [['enabled', '=', True]]): if any(periodic_snapshot_task['id'] == id for periodic_snapshot_task in replication_task['periodic_snapshot_tasks']): verrors.add( 'periodic_snapshot_update.enabled', (f'You can\'t disable this periodic snapshot task because it is bound to enabled replication ' f'task {replication_task["id"]!r}') ) break if verrors: raise verrors legacy_replication_tasks = await self._legacy_replication_tasks() if self._is_legacy(new, legacy_replication_tasks): verrors.add_child(f'periodic_snapshot_update', self._validate_legacy(new)) else: if self._is_legacy(old, legacy_replication_tasks): verrors.add( 'periodic_snapshot_update.naming_schema', ('This snapshot task is being used in legacy replication task. You must use naming schema ' f'{self._legacy_naming_schema(new)!r}. Please upgrade your replication tasks to edit this field.') ) if verrors: raise verrors Cron.convert_schedule_to_db_format(new, begin_end=True) for key in ('legacy', 'vmware_sync', 'state'): new.pop(key, None) await self.middleware.call( 'datastore.update', self._config.datastore, id, new, {'prefix': self._config.datastore_prefix} ) await self.middleware.call('service.restart', 'cron') await self.middleware.call('zettarepl.update_tasks') return await self._get_instance(id)
async def do_create(self, data): """ Create a Periodic Snapshot Task Create a Periodic Snapshot Task that will take snapshots of specified `dataset` at specified `schedule`. Recursive snapshots can be created if `recursive` flag is enabled. You can `exclude` specific child datasets or zvols from the snapshot. Snapshots will be automatically destroyed after a certain amount of time, specified by `lifetime_value` and `lifetime_unit`. If multiple periodic tasks create snapshots at the same time (for example hourly and daily at 00:00) the snapshot will be kept until the last of these tasks reaches its expiry time. Snapshots will be named according to `naming_schema` which is a `strftime`-like template for snapshot name and must contain `%Y`, `%m`, `%d`, `%H` and `%M`. .. examples(websocket):: Create a recursive Periodic Snapshot Task for dataset `data/work` excluding `data/work/temp`. Snapshots will be created on weekdays every hour from 09:00 to 18:00 and will be stored for two weeks. :::javascript { "id": "6841f242-840a-11e6-a437-00e04d680384", "msg": "method", "method": "pool.snapshottask.create", "params": [{ "dataset": "data/work", "recursive": true, "exclude": ["data/work/temp"], "lifetime_value": 2, "lifetime_unit": "WEEK", "naming_schema": "auto_%Y-%m-%d_%H-%M", "schedule": { "minute": "0", "hour": "*", "dom": "*", "month": "*", "dow": "1,2,3,4,5", "begin": "09:00", "end": "18:00" } }] } """ verrors = ValidationErrors() verrors.add_child('periodic_snapshot_create', await self._validate(data)) if verrors: raise verrors Cron.convert_schedule_to_db_format(data, begin_end=True) data['id'] = await self.middleware.call( 'datastore.insert', self._config.datastore, data, {'prefix': self._config.datastore_prefix} ) await self.middleware.call('zettarepl.update_tasks') return await self._get_instance(data['id'])
async def do_create(self, data): """ Create a SMART Test Task. `disks` is a list of valid disks which should be monitored in this task. `type` is specified to represent the type of SMART test to be executed. `all_disks` when enabled sets the task to cover all disks in which case `disks` is not required. .. examples(websocket):: Create a SMART Test Task which executes after every 30 minutes. :::javascript { "id": "6841f242-840a-11e6-a437-00e04d680384", "msg": "method", "method": "smart.test.create", "params": [{ "schedule": { "minute": "30", "hour": "*", "dom": "*", "month": "*", "dow": "*" }, "all_disks": true, "type": "OFFLINE", "disks": [] }] } """ data['type'] = data.pop('type')[0] verrors = await self.validate_data(data, 'smart_test_create') if data['all_disks']: if data.get('disks'): verrors.add( 'smart_test_create.disks', 'This test is already enabled for all disks' ) else: if not data.get('disks'): verrors.add( 'smart_test_create.disks', 'This field is required' ) if verrors: raise verrors Cron.convert_schedule_to_db_format(data) data['id'] = await self.middleware.call( 'datastore.insert', self._config.datastore, data, {'prefix': self._config.datastore_prefix} ) asyncio.ensure_future(self._service_change('smartd', 'restart')) return await self.get_instance(data['id'])
async def do_create(self, data): """ Create a Rsync Task. `path` represents the path to pool/dataset. `remotehost` is ip address or hostname of the remote system. If username differs on the remote host, "username@remote_host" format should be used. `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode. `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE. `remotepath` specifies the path on the remote system. `validate_rpath` is a boolean which when sets validates the existence of the remote path. `direction` specifies if data should be PULLED or PUSHED from the remote system. `compress` when set reduces the size of the data which is to be transmitted. `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group, and special files. `delete` when set deletes files in the destination directory which do not exist in the source directory. `preserveperm` when set preserves original file permissions. .. examples(websocket):: Create a Rsync Task which pulls data from a remote system every 5 minutes. :::javascript { "id": "6841f242-840a-11e6-a437-00e04d680384", "msg": "method", "method": "rsynctask.create", "params": [{ "enabled": true, "schedule": { "minute": "5", "hour": "*", "dom": "*", "month": "*", "dow": "*" }, "desc": "Test rsync task", "user": "******", "mode": "MODULE", "remotehost": "[email protected]", "compress": true, "archive": true, "direction": "PULL", "path": "/mnt/vol1/rsync_dataset", "remotemodule": "remote_module1" }] } """ verrors, data = await self.validate_rsync_task(data, 'rsync_task_create') if verrors: raise verrors Cron.convert_schedule_to_db_format(data) data['id'] = await self.middleware.call( 'datastore.insert', self._config.datastore, data, {'prefix': self._config.datastore_prefix} ) await self.middleware.call('service.restart', 'cron') return await self._get_instance(data['id'])