Пример #1
0
    def run(self, target):
        normalize(target, {
            'enabled': True,
            'user': None,
            'secret': None,
            'mutual_user': None,
            'mutual_secret': None
        })

        # if iscsid service is not enabled, enable it now.
        service_state = self.dispatcher.call_sync('service.query', [('name', '=', 'iscsid')], {'single': True})
        if service_state['state'] != 'RUNNING':
            config = service_state['config']
            config['enable'] = True
            self.join_subtasks(self.run_subtask('service.update', service_state['id'], {'config': config}))

        id = self.datastore.insert('iscsi_initiator.targets', target)
        ctx = iscsi.ISCSIInitiator()
        session = iscsi_convert_session(target)
        ctx.add_session(session)

        self.dispatcher.emit_event('disk.iscsi.target.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #2
0
    def run(self, directory):
        try:
            params = self.dispatcher.call_sync(
                'dscached.management.normalize_parameters',
                directory['plugin'],
                directory.get('parameters', {})
            )
        except RpcException as err:
            raise TaskException(err.code, err.message)

        normalize(directory, {
            'enabled': False,
            'enumerate': True,
            'immutable': False,
            'uid_range': None,
            'gid_range': None,
            'parameters': params
        })

        if directory['plugin'] == 'winbind':
            normalize(directory, {
                'uid_range': [100000, 999999],
                'gid_range': [100000, 999999]
            })

        self.id = self.datastore.insert('directories', directory)
        self.dispatcher.call_sync('dscached.management.configure_directory', self.id)
        self.dispatcher.dispatch_event('directory.changed', {
            'operation': 'create',
            'ids': [self.id]
        })

        return self.id
Пример #3
0
    def run(self, share):
        normalize(
            share['properties'], {
                'alldirs': False,
                'read_only': False,
                'maproot_user': None,
                'maproot_group': None,
                'mapall_user': None,
                'mapall_group': None,
                'hosts': [],
                'security': []
            })

        if share['properties']['security'] and not self.dispatcher.call_sync(
                'service.query', [('name', '=', 'nfs')],
            {'single': True})['config']['v4']:
            self.add_warning(
                TaskWarning(
                    errno.ENXIO,
                    "NFS security option requires NFSv4 support to be enabled in NFS service settings."
                ))

        id = self.datastore.insert('shares', share)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'nfs')
        self.dispatcher.call_sync('service.reload', 'nfs', timeout=60)
        return id
    def run(self, backup):
        normalize(backup['properties'], {
            'privkey': None,
            'hostkey': None
        })

        return backup['properties']
Пример #5
0
    def emit(self, alert):
        cls = self.datastore.get_by_id('alert.classes', alert['class'])
        if not cls:
            raise RpcException(errno.ENOENT, 'Alert class {0} not found'.format(alert['class']))

        normalize(alert, {
            'when': datetime.utcnow(),
            'dismissed': False,
            'active': True,
            'one_shot': False,
            'severity': cls['severity']
        })

        alert.update({
            'type': cls['type'],
            'subtype': cls['subtype'],
            'send_count': 0
        })

        id = self.datastore.insert('alerts', alert)
        self.dispatcher.dispatch_event('alert.changed', {
            'operation': 'create',
            'ids': [id]
        })

        self.dispatcher.call_sync('alertd.alert.emit', id)
        return id
Пример #6
0
    def run(self, share):
        normalize(share['properties'], {
            'read_only': False,
            'time_machine': False,
            'zero_dev_numbers': False,
            'no_stat': False,
            'afp3_privileges': False,
            'ro_users': None,
            'ro_groups': None,
            'rw_users': None,
            'rw_groups': None,
            'users_allow': None,
            'users_deny': None,
            'groups_allow': None,
            'groups_deny': None,
            'hosts_allow': None,
            'hosts_deny': None,
            'default_file_perms': None,
            'default_directory_perms': None,
            'default_umask': None
        })

        id = self.datastore.insert('shares', share)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'afp')
        self.dispatcher.call_sync('service.reload', 'afp', timeout=60)
        self.dispatcher.dispatch_event('share.afp.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #7
0
    def run(self, directory):
        try:
            params = self.dispatcher.call_sync(
                "dscached.management.normalize_parameters", directory["type"], directory.get("parameters", {})
            )
        except RpcException as err:
            raise TaskException(err.code, err.message)

        if self.datastore.exists("directories", ("name", "=", directory["name"])):
            raise TaskException(errno.EEXIST, "Directory {0} already exists".format(directory["name"]))

        normalize(
            directory, {"enabled": False, "enumerate": True, "immutable": False, "uid_range": None, "gid_range": None}
        )

        # Replace passed in params with normalized ones
        directory["parameters"] = params

        if directory["type"] == "winbind":
            normalize(directory, {"uid_range": [100000, 999999], "gid_range": [100000, 999999]})

            smb = self.dispatcher.call_sync("service.query", [("name", "=", "smb")], {"single": True})
            if not q.get(smb, "config.enable"):
                q.set(smb, "config.enable", True)
                self.join_subtasks(self.run_subtask("service.update", smb["id"], smb))

        self.id = self.datastore.insert("directories", directory)
        self.dispatcher.call_sync("dscached.management.configure_directory", self.id)
        self.dispatcher.dispatch_event("directory.changed", {"operation": "create", "ids": [self.id]})

        node = ConfigNode("directory", self.configstore)
        node["search_order"] = node["search_order"].value + [directory["name"]]
        self.dispatcher.call_sync("dscached.management.reload_config")
        return self.id
Пример #8
0
    def run(self, target):
        normalize(target, {
            'enabled': True,
            'user': None,
            'secret': None,
            'mutual_user': None,
            'mutual_secret': None
        })

        # if iscsid service is not enabled, enable it now.
        service_state = self.dispatcher.call_sync('service.query', [('name', '=', 'iscsid')], {'single': True})
        if service_state['state'] != 'RUNNING':
            config = service_state['config']
            config['enable'] = True
            self.run_subtask_sync('service.update', service_state['id'], {'config': config})

        id = self.datastore.insert('iscsi_initiator.targets', target)
        ctx = iscsi.ISCSIInitiator()
        session = iscsi_convert_session(target)
        ctx.add_session(session)

        self.dispatcher.emit_event('disk.iscsi.target.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #9
0
    def emit(self, alert):
        cls = self.datastore.get_by_id('alert.classes', alert['class'])
        if not cls:
            raise RpcException(errno.ENOENT, 'Alert class {0} not found'.format(alert['class']))

        normalize(alert, {
            'when': datetime.utcnow(),
            'dismissed': False,
            'active': True,
            'one_shot': False,
            'severity': cls['severity']
        })

        alert.update({
            'type': cls['type'],
            'subtype': cls['subtype'],
            'send_count': 0
        })

        id = self.datastore.insert('alerts', alert)
        self.dispatcher.dispatch_event('alert.changed', {
            'operation': 'create',
            'ids': [id]
        })

        try:
            self.dispatcher.call_sync('alertd.alert.emit', id)
        except RpcException as err:
            if err.code == errno.ENOENT:
                # Alertd didn't start yet. Add alert to the pending queue
                pending_alerts.append(id)
            else:
                raise

        return id
Пример #10
0
    def run(self, share):
        normalize(
            share["properties"],
            {
                "alldirs": False,
                "read_only": False,
                "maproot_user": None,
                "maproot_group": None,
                "mapall_user": None,
                "mapall_group": None,
                "hosts": [],
                "security": [],
            },
        )

        if (
            share["properties"]["security"]
            and not self.dispatcher.call_sync("service.query", [("name", "=", "nfs")], {"single": True})["config"]["v4"]
        ):
            self.add_warning(
                TaskWarning(
                    errno.ENXIO, "NFS security option requires NFSv4 support to be enabled in NFS service settings."
                )
            )

        id = self.datastore.insert("shares", share)
        self.dispatcher.call_sync("etcd.generation.generate_group", "nfs")
        self.dispatcher.call_sync("service.reload", "nfs", timeout=60)
        return id
Пример #11
0
    def run(self, share):
        normalize(share['properties'], {
            'read_only': False,
            'guest_ok': False,
            'guest_only': False,
            'browseable': True,
            'recyclebin': False,
            'show_hidden_files': False,
            'vfs_objects': [],
            'hosts_allow': None,
            'hosts_deny': None
        })

        id = self.datastore.insert('shares', share)
        path = self.dispatcher.call_sync(
            'shares.translate_path',
            share['type'],
            share['target'],
            share['name']
        )

        try:
            smb_conf = smbconf.SambaConfig('registry')
            smb_share = smbconf.SambaShare()
            convert_share(smb_share, path, share['properties'])
            smb_conf.shares[share['name']] = smb_share
        except smbconf.SambaConfigException:
            raise TaskException(errno.EFAULT, 'Cannot access samba registry')

        self.dispatcher.dispatch_event('shares.cifs.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #12
0
    def run(self, pool_name, path, type, params=None):
        if params:
            normalize(params, {
                'properties': {}
            })

        if type == 'VOLUME':
            params['properties']['volsize'] = {'value': str(params['volsize'])}

        self.join_subtasks(self.run_subtask(
            'zfs.create_dataset',
            pool_name,
            path,
            type,
            {k: v['value'] for k, v in list(params['properties'].items())} if params else {}
        ))

        if params:
            props = {}
            if 'permissions_type' in params:
                props['org.freenas:permissions_type'] = {'value': params['permissions_type']}
                props['aclmode'] = {'value': 'restricted' if params['permissions_type'] == 'ACL' else 'passthrough'}

            self.join_subtasks(self.run_subtask('zfs.configure', pool_name, path, props))

        self.join_subtasks(self.run_subtask('zfs.mount', path))
Пример #13
0
    def emit(self, alert):
        cls = self.datastore.get_by_id('alert.classes', alert['clazz'])
        if not cls:
            raise RpcException(errno.ENOENT, 'Alert class {0} not found'.format(alert['clazz']))

        normalize(alert, {
            'when': datetime.utcnow(),
            'dismissed': False,
            'active': True,
            'one_shot': False,
            'severity': cls['severity']
        })

        alert.update({
            'type': cls['type'],
            'subtype': cls['subtype'],
            'send_count': 0
        })

        id = self.datastore.insert('alerts', alert)
        self.dispatcher.dispatch_event('alert.changed', {
            'operation': 'create',
            'ids': [id]
        })

        try:
            self.dispatcher.call_sync('alertd.alert.emit', id)
        except RpcException as err:
            if err.code == errno.ENOENT:
                # Alertd didn't start yet. Add alert to the pending queue
                pending_alerts.append(id)
            else:
                raise

        return id
Пример #14
0
    def run(self, share):
        normalize(
            share['properties'], {
                'read_only': False,
                'time_machine': False,
                'zero_dev_numbers': False,
                'no_stat': False,
                'afp3_privileges': True,
                'smb_compatible': False,
                'ro_users': None,
                'ro_groups': None,
                'rw_users': None,
                'rw_groups': None,
                'users_allow': None,
                'users_deny': None,
                'groups_allow': None,
                'groups_deny': None,
                'hosts_allow': None,
                'hosts_deny': None,
                'default_file_perms': None,
                'default_directory_perms': None,
                'default_umask': None
            })

        id = self.datastore.insert('shares', share)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'afp')
        self.dispatcher.call_sync('service.reload', 'afp', timeout=60)
        self.dispatcher.dispatch_event('share.afp.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #15
0
    def run(self, share):
        props = share['properties']
        normalize(
            props, {
                'serial':
                self.dispatcher.call_sync('share.iscsi.generate_serial'),
                'block_size': 512,
                'physical_block_size': True,
                'tpc': False,
                'vendor_id': None,
                'product_id': None,
                'device_id': None,
                'rpm': 'SSD',
                'read_only': False,
                'xen_compat': False
            })

        props['naa'] = self.dispatcher.call_sync('share.iscsi.generate_naa')
        id = self.datastore.insert('shares', share)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl')
        self.dispatcher.call_sync('service.reload', 'ctl')

        self.dispatcher.dispatch_event('share.iscsi.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #16
0
    def run(self, share):
        props = share['properties']
        normalize(props, {
            'serial': self.dispatcher.call_sync('share.iscsi.generate_serial'),
            'block_size': 512,
            'physical_block_size': True,
            'tpc': False,
            'vendor_id': None,
            'product_id': None,
            'device_id': None,
            'rpm': 'SSD',
            'read_only': False,
            'xen_compat': False
        })

        props['naa'] = self.dispatcher.call_sync('share.iscsi.generate_naa')
        id = self.datastore.insert('shares', share)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl')
        self.dispatcher.call_sync('service.reload', 'ctl')

        self.dispatcher.dispatch_event('share.iscsi.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #17
0
    def run(self, share):
        normalize(
            share['properties'], {
                'read_only': False,
                'guest_ok': False,
                'guest_only': False,
                'browseable': True,
                'recyclebin': False,
                'show_hidden_files': False,
                'previous_versions': True,
                'vfs_objects': [],
                'hosts_allow': None,
                'hosts_deny': None,
                'extra_parameters': {}
            })

        id = self.datastore.insert('shares', share)
        path = self.dispatcher.call_sync('share.translate_path', id)

        try:
            smb_conf = smbconf.SambaConfig('registry')
            smb_share = smbconf.SambaShare()
            convert_share(self.dispatcher, smb_share, path, share['enabled'],
                          share['properties'])
            smb_conf.shares[share['name']] = smb_share
            reload_samba()
        except smbconf.SambaConfigException:
            raise TaskException(errno.EFAULT, 'Cannot access samba registry')

        self.dispatcher.dispatch_event('share.smb.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #18
0
    def run(self, backup):
        normalize(backup['properties'], {
            'peer': None,
            'bucket': None,
            'folder': None
        })

        return backup['properties']
Пример #19
0
    def run(self, backup):
        normalize(backup['properties'], {
            'peer': None,
            'bucket': None,
            'folder': None
        })

        return backup['properties']
Пример #20
0
    def run(self, share):
        normalize(share['properties'], {
            'read_only': False,
            'guest_ok': False,
            'guest_only': False,
            'browseable': True,
            'recyclebin': False,
            'show_hidden_files': False,
            'previous_versions': True,
            'vfs_objects': [],
            'hosts_allow': [],
            'hosts_deny': [],
            'users_allow': [],
            'users_deny': [],
            'groups_allow': [],
            'groups_deny': [],
            'full_audit_prefix': '%u|%I|%m|%S',
            'full_audit_priority': 'notice',
            'full_audit_failure': 'connect',
            'full_audit_success': 'open mkdir unlink rmdir rename',
            'case_sensitive': 'AUTO',
            'allocation_roundup_size': 1048576,
            'ea_support': True,
            'store_dos_attributes': True,
            'map_archive': True,
            'map_hidden': False,
            'map_readonly': True,
            'map_system': False,
            'fruit_metadata': 'STREAM'

        })

        id = self.datastore.insert('shares', share)
        path = self.dispatcher.call_sync('share.translate_path', id)

        try:
            smb_conf = smbconf.SambaConfig('registry')
            smb_conf.transaction_start()
            try:
                smb_share = smbconf.SambaShare()
                convert_share(self.dispatcher, smb_share, path, share['enabled'], share['properties'])
                smb_conf.shares[share['name']] = smb_share
            except BaseException as err:
                smb_conf.transaction_cancel()
                raise TaskException(errno.EBUSY, 'Failed to update samba configuration: {0}', err)
            else:
                smb_conf.transaction_commit()

            reload_samba()
        except smbconf.SambaConfigException:
            raise TaskException(errno.EFAULT, 'Cannot access samba registry')

        self.dispatcher.dispatch_event('share.smb.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
    def run(self, id, updated_fields):
        task = 'networkd.configuration.configure_interface'
        entity = self.datastore.get_by_id('network.interfaces', id)

        if updated_fields.get('dhcp'):
            # Check for DHCP inconsistencies
            # 1. Check whether DHCP is enabled on other interfaces
            # 2. Check whether DHCP configures default route and/or DNS server addresses
            dhcp_used = self.datastore.exists('network.interfaces', ('dhcp', '=', True), ('id', '!=', id))
            dhcp_global = self.configstore.get('network.dhcp.assign_gateway') or \
                self.configstore.get('network.dhcp.assign_dns')

            if dhcp_used and dhcp_global:
                raise TaskException(errno.ENXIO, 'DHCP is already configured on another interface')

            # Clear all aliases
            entity['aliases'] = []

        if updated_fields.get('aliases'):
            # Forbid setting any aliases on interface with DHCP
            if (updated_fields.get('dhcp') or entity['dhcp']) and len(updated_fields['aliases']) > 0:
                raise TaskException(errno.EINVAL, 'Cannot set aliases when using DHCP')

            # Check for aliases inconsistencies
            ips = [x['address'] for x in updated_fields['aliases']]
            if any(ips.count(x) > 1 for x in ips):
                raise TaskException(errno.ENXIO, 'Duplicated IP alias')

            # Add missing broadcast addresses and address family
            for i in updated_fields['aliases']:
                normalize(i, {
                    'type': 'INET'
                })

                if not i.get('broadcast') and i['type'] == 'INET':
                    i['broadcast'] = str(calculate_broadcast(i['address'], i['netmask']))

        if updated_fields.get('vlan'):
            vlan = updated_fields['vlan']
            if (not vlan['parent'] and vlan['tag']) or (vlan['parent'] and not vlan['tag']):
                raise TaskException(errno.EINVAL, 'Can only set VLAN parent interface and tag at the same time')

        if 'enabled' in updated_fields:
            if entity['enabled'] and not updated_fields['enabled']:
                task = 'networkd.configuration.down_interface'

        entity.update(updated_fields)
        self.datastore.update('network.interfaces', id, entity)

        try:
            self.dispatcher.call_sync(task, id)
        except RpcException as err:
            raise TaskException(errno.ENXIO, 'Cannot reconfigure interface: {0}'.format(str(err)))

        self.dispatcher.dispatch_event('network.interface.changed', {
            'operation': 'update',
            'ids': [id]
        })
Пример #22
0
    def run(self, target):
        normalize(
            target, {"description": None, "auth_group": "no-authentication", "portal_group": "default", "extents": []}
        )

        id = self.datastore.insert("iscsi.targets", target)
        self.dispatcher.dispatch_event("iscsi.target.changed", {"operation": "create", "ids": [id]})

        return id
Пример #23
0
    def run(self, alertfilter):
        id = self.datastore.insert('alert.filters', alertfilter)
        normalize(alertfilter, {
            'predicates': []
        })

        self.dispatcher.dispatch_event('alert.filter.changed', {
            'operation': 'create',
            'ids': [id]
        })
Пример #24
0
    def run(self, alertfilter):
        id = self.datastore.insert('alert.filters', alertfilter)
        normalize(alertfilter, {
            'predicates': []
        })

        self.dispatcher.dispatch_event('alert.filter.changed', {
            'operation': 'create',
            'ids': [id]
        })
Пример #25
0
    def run(self, peer, initial_credentials=None):
        normalize(peer, {'health_check_interval': 60})

        id = self.run_subtask_sync('peer.{0}.create'.format(peer.get('type')),
                                   peer, initial_credentials)

        self.dispatcher.dispatch_event('peer.changed', {
            'operation': 'create',
            'ids': [id]
        })
Пример #26
0
    def run(self, directory):
        try:
            params = self.dispatcher.call_sync(
                'dscached.management.normalize_parameters', directory['type'],
                directory.get('parameters', {}))
        except RpcException as err:
            raise TaskException(err.code, err.message)

        if self.datastore.exists('directories',
                                 ('name', '=', directory['name'])):
            raise TaskException(
                errno.EEXIST,
                'Directory {0} already exists'.format(directory['name']))

        normalize(
            directory, {
                'enabled': False,
                'enumerate': True,
                'immutable': False,
                'uid_range': None,
                'gid_range': None
            })

        # Replace passed in params with normalized ones
        directory['parameters'] = params

        for k, v in directory['parameters'].items():
            if k == 'password':
                directory['parameters'][k] = unpassword(v)

        if directory['type'] == 'winbind':
            normalize(directory, {
                'uid_range': [100000, 999999],
                'gid_range': [100000, 999999]
            })

            smb = self.dispatcher.call_sync('service.query',
                                            [('name', '=', 'smb')],
                                            {"single": True})
            if not q.get(smb, 'config.enable'):
                q.set(smb, 'config.enable', True)
                self.run_subtask_sync('service.update', smb['id'], smb)

        self.id = self.datastore.insert('directories', directory)
        self.dispatcher.call_sync('dscached.management.configure_directory',
                                  self.id)
        self.dispatcher.dispatch_event('directory.changed', {
            'operation': 'create',
            'ids': [self.id]
        })

        node = ConfigNode('directory', self.configstore)
        node['search_order'] = node['search_order'].value + [directory['name']]
        self.dispatcher.call_sync('dscached.management.reload_config')
        return self.id
Пример #27
0
    def run(self, share):
        normalize(share['properties'], {
            'read_only': False,
            'guest_ok': False,
            'guest_only': False,
            'browseable': True,
            'recyclebin': False,
            'show_hidden_files': False,
            'previous_versions': True,
            'vfs_objects': [],
            'hosts_allow': None,
            'hosts_deny': None,
            'extra_parameters': {},
            'full_audit_prefix': '%u|%I|%m|%S',
            'full_audit_priority': 'notice',
            'full_audit_failure': 'connect',
            'full_audit_success': 'open mkdir unlink rmdir rename',
            'case_sensitive': 'AUTO',
            'allocation_roundup_size': 1048576,
            'ea_support': False,
            'store_dos_attributes': False,
            'map_archive': True,
            'map_hidden': False,
            'map_readonly': True,
            'map_system': False,
            'fruit_metadata': 'STREAM'

        })

        id = self.datastore.insert('shares', share)
        path = self.dispatcher.call_sync('share.translate_path', id)

        try:
            smb_conf = smbconf.SambaConfig('registry')
            smb_conf.transaction_start()
            try:
                smb_share = smbconf.SambaShare()
                convert_share(self.dispatcher, smb_share, path, share['enabled'], share['properties'])
                smb_conf.shares[share['name']] = smb_share
            except BaseException as err:
                smb_conf.transaction_cancel()
                raise TaskException(errno.EBUSY, 'Failed to update samba configuration: {0}', err)
            else:
                smb_conf.transaction_commit()

            reload_samba()
        except smbconf.SambaConfigException:
            raise TaskException(errno.EFAULT, 'Cannot access samba registry')

        self.dispatcher.dispatch_event('share.smb.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #28
0
    def run(self, port):
        normalize(port, {'luns': []})

        id = self.datastore.insert('vm.scsi_ports', port)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl')
        self.dispatcher.emit_event('vm.scsi.port.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #29
0
    def run(self, backup):
        normalize(backup, {
            'properties': {}
        })

        id = self.datastore.insert('backup', backup)

        self.dispatcher.emit_event('backup.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #30
0
    def run(self, share):
        normalize(share['properties'], {
            'read_only': False,
            'permission': False,
        })
        id = self.datastore.insert('shares', share)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'webdav')
        self.dispatcher.call_sync('service.reload', 'webdav')
        self.dispatcher.dispatch_event('share.webdav.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #31
0
    def run(self, directory):
        try:
            params = self.dispatcher.call_sync(
                'dscached.management.normalize_parameters',
                directory['type'],
                directory.get('parameters', {})
            )
        except RpcException as err:
            raise TaskException(err.code, err.message)

        if self.datastore.exists('directories', ('name', '=', directory['name'])):
            raise TaskException(errno.EEXIST, 'Directory {0} already exists'.format(directory['name']))

        normalize(directory, {
            'enabled': False,
            'enumerate': True,
            'immutable': False,
            'uid_range': None,
            'gid_range': None
        })

        # Replace passed in params with normalized ones
        directory['parameters'] = params

        for k, v in directory['parameters'].items():
            if k == 'password':
                directory['parameters'][k] = unpassword(v)

        if directory['type'] == 'winbind':
            normalize(directory, {
                'uid_range': [100000, 999999],
                'gid_range': [100000, 999999]
            })

            smb = self.dispatcher.call_sync('service.query', [('name', '=', 'smb')], {"single": True})
            if not q.get(smb, 'config.enable'):
                q.set(smb, 'config.enable', True)
                self.run_subtask_sync('service.update', smb['id'], smb)

        self.id = self.datastore.insert('directories', directory)
        self.dispatcher.call_sync('dscached.management.configure_directory', self.id)
        self.dispatcher.dispatch_event('directory.changed', {
            'operation': 'create',
            'ids': [self.id]
        })

        node = ConfigNode('directory', self.configstore)
        node['search_order'] = node['search_order'].value + [directory['name']]
        self.dispatcher.call_sync('dscached.management.reload_config')
        return self.id
Пример #32
0
    def run(self, target):
        normalize(target, {
            'description': None,
            'auth_group': 'no-authentication',
            'portal_group': 'default',
            'extents': []
        })

        id = self.datastore.insert('iscsi.targets', target)
        self.dispatcher.dispatch_event('iscsi.target.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #33
0
    def run(self, share):
        normalize(share['properties'], {
            'read_only': False,
            'permission': False,
            'show_hidden_files': False,
        })
        id = self.datastore.insert('shares', share)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'webdav')
        self.dispatcher.call_sync('service.reload', 'webdav')
        self.dispatcher.dispatch_event('share.webdav.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #34
0
    def run(self, peer, initial_credentials=None):
        normalize(peer, {
            'health_check_interval': 60
        })

        id = self.run_subtask_sync(
            'peer.{0}.create'.format(peer.get('type')),
            peer,
            initial_credentials
        )

        self.dispatcher.dispatch_event('peer.changed', {
            'operation': 'create',
            'ids': [id]
        })
Пример #35
0
    def run(self, peer, initial_credentials):
        if 'name' not in peer:
            raise TaskException(errno.EINVAL, 'Name has to be specified')

        normalize(peer, {
            'port': 22,
            'password': None,
            'privkey': None,
            'hostkey': None
        })

        if self.datastore.exists('peers', ('name', '=', peer['name'])):
            raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name']))

        return self.datastore.insert('peers', peer)
Пример #36
0
    def run(self, auth_group):
        normalize(auth_group, {
            'id': self.datastore.collection_get_next_pkey('iscsi.auth', 'ag'),
            'users': None,
            'initiators': None,
            'networks': None
        })

        id = self.datastore.insert('iscsi.auth', auth_group)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl')
        self.dispatcher.call_sync('service.reload', 'ctl')
        self.dispatcher.dispatch_event('iscsi.auth.changed', {
            'operation': 'create',
            'ids': [id]
        })
        return id
    def run(self, share):
        normalize(share['properties'], {
            'alldirs': False,
            'read_only': False,
            'maproot_user': None,
            'maproot_group': None,
            'mapall_user': None,
            'mapall_group': None,
            'hosts': [],
            'security': []
        })

        id = self.datastore.insert('shares', share)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'nfs')
        self.dispatcher.call_sync('service.reload', 'nfs')
        return id
Пример #38
0
    def run(self, auth_group):
        normalize(
            auth_group,
            {
                "id": self.datastore.collection_get_next_pkey("iscsi.auth", "ag"),
                "users": None,
                "initiators": None,
                "networks": None,
            },
        )

        id = self.datastore.insert("iscsi.auth", auth_group)
        self.dispatcher.call_sync("etcd.generation.generate_group", "ctl")
        self.dispatcher.call_sync("services.reload", "ctl")
        self.dispatcher.dispatch_event("iscsi.auth.changed", {"operation": "create", "ids": [id]})
        return id
Пример #39
0
    def run(self, auth_group):
        normalize(auth_group, {
            'id': self.datastore.collection_get_next_pkey('iscsi.auth', 'ag'),
            'users': None,
            'initiators': None,
            'networks': None
        })

        id = self.datastore.insert('iscsi.auth', auth_group)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl')
        self.dispatcher.call_sync('service.reload', 'ctl')
        self.dispatcher.dispatch_event('iscsi.auth.changed', {
            'operation': 'create',
            'ids': [id]
        })
        return id
Пример #40
0
    def run(self, portal):
        normalize(
            portal,
            {
                "id": self.datastore.collection_get_next_pkey("iscsi.portals", "pg"),
                "discovery_auth_group": None,
                "discovery_auth_method": "NONE",
                "portals": [],
            },
        )

        id = self.datastore.insert("iscsi.portals", portal)
        self.dispatcher.call_sync("etcd.generation.generate_group", "ctl")
        self.dispatcher.call_sync("services.reload", "ctl")
        self.dispatcher.dispatch_event("iscsi.portal.changed", {"operation": "create", "ids": [id]})
        return id
Пример #41
0
    def run(self, portal):
        normalize(portal, {
            'id': self.datastore.collection_get_next_pkey('iscsi.portals', 'pg'),
            'discovery_auth_group': None,
            'discovery_auth_method': 'NONE',
            'portals': []
        })

        id = self.datastore.insert('iscsi.portals', portal)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl')
        self.dispatcher.call_sync('service.reload', 'ctl')
        self.dispatcher.dispatch_event('iscsi.portal.changed', {
            'operation': 'create',
            'ids': [id]
        })
        return id
Пример #42
0
    def run(self, ntp, force=False):
        if self.datastore.exists('ntpservers',
                                 ('address', '=', ntp['address'])):
            raise TaskException(
                errno.ENXIO, 'NTP Server with given address already exists')

        try:
            system('ntpdate', '-q', ntp['address'])
        except SubprocessException:
            if not force:
                raise TaskException(
                    errno.EACCES,
                    'Server could not be reached. Check "Force" to continue regardless.'
                )

        normalize(ntp, {'minpoll': 6, 'maxpoll': 10})

        minpoll = ntp.get('minpoll')
        maxpoll = ntp.get('maxpoll')

        if not maxpoll > minpoll:
            raise TaskException(errno.EINVAL,
                                'Max Poll should be higher than Min Poll')

        if minpoll > 17 or minpoll < 4:
            raise TaskException(errno.EINVAL,
                                'Min Poll range should be between 4 and 17')

        if maxpoll > 17 or maxpoll < 4:
            raise TaskException(errno.EINVAL,
                                'Max Poll range should be between 4 and 17')

        try:
            pkey = self.datastore.insert('ntpservers', ntp)
            self.dispatcher.call_sync('etcd.generation.generate_group', 'ntpd')
            self.dispatcher.call_sync('service.restart', 'ntpd')
            self.dispatcher.dispatch_event('ntp_server.changed', {
                'operation': 'create',
                'ids': [pkey]
            })
        except DatastoreException as e:
            raise TaskException(errno.EBADMSG,
                                'Cannot create NTP Server: {0}'.format(str(e)))
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot generate certificate: {0}'.format(str(e)))
        return pkey
Пример #43
0
    def run(self, share):
        normalize(
            share['properties'], {
                'alldirs': False,
                'read_only': False,
                'maproot_user': None,
                'maproot_group': None,
                'mapall_user': None,
                'mapall_group': None,
                'hosts': [],
                'security': []
            })

        id = self.datastore.insert('shares', share)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'nfs')
        self.dispatcher.call_sync('service.reload', 'nfs', timeout=60)
        return id
Пример #44
0
    def run(self, peer, initial_credentials):
        if 'name' not in peer:
            raise TaskException(errno.EINVAL, 'Name has to be specified')

        normalize(peer, {
            'port': 22,
            'password': None,
            'privkey': None,
            'hostkey': None
        })

        if self.datastore.exists('peers', ('name', '=', peer['name'])):
            raise TaskException(
                errno.EINVAL,
                'Peer entry {0} already exists'.format(peer['name']))

        return self.datastore.insert('peers', peer)
Пример #45
0
    def run(self, share, skip_dataset=False):
        with self.dispatcher.get_lock('sharing'):
            pool = share['target']
            root_ds = os.path.join(pool, share['type'])
            ds_name = os.path.join(root_ds, share['name'])
            share_type = self.dispatcher.call_sync('shares.supported_types').get(share['type'])

            normalize(share, {
                'enabled': True,
                'compression': 'lz4',
                'description': ''
            })

            if not share_type:
                raise TaskException('Unsupported sharing type {0}'.format(share['type']))

            if not skip_dataset:
                if not self.dispatcher.call_sync('zfs.dataset.query', [('name', '=', root_ds)], {'single': True}):
                    # Create root dataset for given sharing type
                    self.join_subtasks(self.run_subtask('volume.dataset.create', pool, root_ds, 'FILESYSTEM'))

                if share_type['subtype'] == 'file':
                    self.join_subtasks(self.run_subtask('volume.dataset.create', pool, ds_name, 'FILESYSTEM', {
                        'permissions_type': share_type['perm_type'],
                        'properties': {
                            'compression': {'value': share['compression']}
                        }
                    }))

                if share_type['subtype'] == 'block':
                    self.join_subtasks(self.run_subtask('volume.dataset.create', pool, ds_name, 'VOLUME', {
                        'volsize': share['properties']['size'],
                        'properties': {
                            'compression': {'value': share['compression']}
                        }
                    }))

            ids = self.join_subtasks(self.run_subtask('share.{0}.create'.format(share['type']), share))
            self.dispatcher.dispatch_event('shares.changed', {
                'operation': 'create',
                'ids': ids
            })

            return ids[0]
    def create_device(self, container, res):
        if res['type'] == 'DISK':
            container_ds = os.path.join(container['target'], 'vm', container['name'])
            container_dir = self.dispatcher.call_sync('volume.get_dataset_path', container_ds)
            ds_name = os.path.join(container_ds, res['name'])
            self.join_subtasks(self.run_subtask('volume.dataset.create', {
                'volume': container['target'],
                'id': ds_name,
                'type': 'VOLUME',
                'volsize': res['properties']['size']
            }))

            if res['properties'].get('source'):
                source = res['properties']['source']
                self.join_subtasks(self.run_subtask(
                    'container.download_image',
                    source['url'],
                    source['sha256'],
                    container_dir,
                    os.path.join('/dev/zvol', ds_name)
                ))

        if res['type'] == 'NIC':
            normalize(res['properties'], {
                'link_address': self.dispatcher.call_sync('container.generate_mac')
            })

        if res['type'] == 'VOLUME':
            properties = res['properties']
            mgmt_net = ipaddress.ip_interface(self.configstore.get('container.network.management'))
            container_ds = os.path.join(container['target'], 'vm', container['name'])
            opts = {}

            if properties['type'] == 'NFS':
                opts['sharenfs'] = {'value': '-network={0}'.format(str(mgmt_net.network))}
                if not self.configstore.get('service.nfs.enable'):
                    self.join_subtasks(self.run_subtask('service.update', 'nfs', {'enable': True}))

            if properties['type'] == 'VT9P':
                if properties.get('auto'):
                    self.join_subtasks(self.run_subtask('volume.dataset.create', {
                        'volume': container['target'],
                        'id': os.path.join(container_ds, res['name'])
                    }))
Пример #47
0
    def run(self, backup):
        if 'id' in backup and self.datastore.exists('backup', ('id', '=', backup['id'])):
            raise TaskException(errno.EEXIST, 'Backup with ID {0} already exists'.format(backup['id']))

        if self.datastore.exists('backup', ('name', '=', backup['name'])):
            raise TaskException(errno.EEXIST, 'Backup with name {0} already exists'.format(backup['name']))

        normalize(backup, {
            'properties': {}
        })

        backup['properties'] = self.run_subtask_sync('backup.{0}.init'.format(backup['provider']), backup)
        id = self.datastore.insert('backup', backup)
        self.dispatcher.emit_event('backup.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #48
0
    def run(self, backup):
        if 'id' in backup and self.datastore.exists('backup', ('id', '=', backup['id'])):
            raise TaskException(errno.EEXIST, 'Backup with ID {0} already exists'.format(backup['id']))

        if self.datastore.exists('backup', ('name', '=', backup['name'])):
            raise TaskException(errno.EEXIST, 'Backup with name {0} already exists'.format(backup['name']))

        normalize(backup, {
            'properties': {}
        })

        backup['properties'] = self.run_subtask_sync('backup.{0}.init'.format(backup['provider']), backup)
        id = self.datastore.insert('backup', backup)
        self.dispatcher.emit_event('backup.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #49
0
    def run(self, portal):
        validate_portal_port(portal.get('listen'))
        check_auth_group_mode(self.datastore, portal.get('discovery_auth_group'))

        normalize(portal, {
            'id': self.datastore.collection_get_next_pkey('iscsi.portals', 'pg'),
            'discovery_auth_group': None,
            'discovery_auth_method': 'NONE',
            'portals': []
        })

        id = self.datastore.insert('iscsi.portals', portal)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl')
        self.dispatcher.call_sync('service.reload', 'ctl')
        self.dispatcher.dispatch_event('iscsi.portal.changed', {
            'operation': 'create',
            'ids': [id]
        })
        return id
Пример #50
0
 def normalize_parameters(cls, parameters):
     return normalize(parameters, {
         '%type': 'ldap-directory-params',
         'user_suffix': 'ou=users',
         'group_suffix': 'ou=groups',
         'krb_principal': None,
         'encryption': 'OFF',
         'certificate': None,
         'verify_certificate': True
     })
Пример #51
0
    def run(self, alertfilter):
        normalize(alertfilter, {'clazz': None, 'predicates': []})

        order = self.configstore.get('alert.filter.order')
        index = alertfilter.pop('index', len(order))
        id = self.datastore.insert('alert.filters', alertfilter)
        order.insert(index, id)
        self.configstore.set('alert.filter.order', order)

        self.dispatcher.dispatch_event('alert.filter.changed', {
            'operation': 'create',
            'ids': [id]
        })

        self.dispatcher.dispatch_event('alert.filter.changed', {
            'operation': 'update',
            'ids': list(set(order) - {id})
        })

        return id
Пример #52
0
 def normalize_parameters(parameters):
     return normalize(parameters, {
         '%type': 'FreeipaDirectoryParams',
         'realm': '',
         'server': None,
         'kdc': None,
         'krb_principal': None,
         'username': '',
         'password': '',
         'user_suffix': 'cn=users,cn=accounts',
         'group_suffix': 'cn=groups,cn=accounts'
     })
Пример #53
0
    def run(self, disk):
        defpath = os.path.join(self.dispatcher.call_sync('system_dataset.request_directory', 'simulator'), disk['id'])
        normalize(disk, {
            'vendor': 'FreeNAS',
            'path': defpath,
            'model': 'Virtual Disk',
            'serial': self.dispatcher.call_sync('share.iscsi.generate_serial'),
            'block_size': 512,
            'rpm': '7200',
            'online': True
        })

        disk['naa'] = self.dispatcher.call_sync('share.iscsi.generate_naa')

        if not os.path.exists(disk['path']):
            with open(disk['path'], 'w') as f:
                f.truncate(disk['mediasize'])

        self.datastore.insert('simulator.disks', disk)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl')
        self.dispatcher.call_sync('service.reload', 'ctl')
Пример #54
0
 def normalize_parameters(parameters):
     return normalize(
         parameters, {
             '%type': 'winbind-directory-params',
             'realm': '',
             'username': '******',
             'password': None,
             'krb_principal': None,
             'site_name': None,
             'dc_address': None,
             'gcs_address': None,
             'allow_dns_updates': True
         })
Пример #55
0
    def run(self, target):
        for i in target.get('extents', []):
            if not self.datastore.exists('shares', ('type', '=', 'iscsi'), ('name', '=', i['name'])):
                raise TaskException(errno.ENOENT, "Share {0} not found".format(i['name']))

        normalize(target, {
            'description': None,
            'auth_group': 'no-authentication',
            'portal_group': 'default',
            'extents': []
        })

        target['id'] = target['id'].lower()
        id = self.datastore.insert('iscsi.targets', target)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'ctl')
        self.dispatcher.call_sync('service.reload', 'ctl')
        self.dispatcher.dispatch_event('iscsi.target.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #56
0
    def run(self, dataset):
        normalize(dataset, {'vm_filter_op': 'NONE', 'vm_filter_entries': []})

        first = self.datastore.query('vmware.datasets', count=True) == 0
        id = self.datastore.insert('vmware.datasets', dataset)

        if first:
            # To not waste cycles, we register snapshot pre- and post-creation hooks only if there's at
            # least one VMware dataset mapping
            self.dispatcher.register_task_hook('volume.snapshot.create:before',
                                               'vmware.snapshot.take')
            self.dispatcher.register_task_hook('volume.snapshot.create:after',
                                               'vmware.snapshot.clean')
            self.dispatcher.register_task_hook('volume.snapshot.create:error',
                                               'vmware.snapshot.clean')

        self.dispatcher.emit_event('vmware.dataset.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
Пример #57
0
 def normalize_parameters(parameters):
     return normalize(
         parameters, {
             '%type': 'WinbindDirectoryParams',
             'realm': '',
             'username': '******',
             'password': None,
             'krb_principal': None,
             'site_name': None,
             'dc_address': None,
             'gcs_address': None,
             'allow_dns_updates': True,
             'sasl_wrapping': 'PLAIN'
         })
Пример #58
0
 def normalize_parameters(parameters):
     return normalize(
         parameters, {
             '%type': 'FreeipaDirectoryParams',
             'realm': '',
             'server': None,
             'kdc': None,
             'krb_principal': None,
             'username': '',
             'password': '',
             'user_suffix': 'cn=users,cn=accounts',
             'group_suffix': 'cn=groups,cn=accounts',
             'encryption': 'NONE',
             'verify_certificate': True
         })
Пример #59
0
 def normalize_parameters(parameters):
     return normalize(
         parameters, {
             '%type': 'WinbindDirectoryParams',
             'realm': '',
             'username': '******',
             'password': None,
             'krb_principal': None,
             'site_name': None,
             'dc_address': None,
             'gcs_address': None,
             'allow_dns_updates': True,
             'sasl_wrapping': 'SIGN',
             'idmap_type': 'RID',
             'idmap': {
                 '%type': 'WinbindIdmapRidConfig',
                 'base_rid': 0,
                 'range_start': 20000,
                 'range_end': 10000000
             }
         })
Пример #60
0
    def run(self, share, dataset_properties=None, enable_service=False):
        if share['target_type'] == 'ZVOL':
            parent_ds = '/'.join(share['target_path'].split('/')[:-1])
            shareable = bool(self.dispatcher.call_sync('volume.dataset.query', [('name', '=', parent_ds)]))
        else:
            share_path = self.dispatcher.call_sync('share.expand_path', share['target_path'], share['target_type'])
            if share['target_type'] != 'FILE':
                share_path = os.path.dirname(share_path)
            shareable = os.path.exists(share_path)

        if not shareable:
            raise TaskException(errno.ENOENT, 'Selected share target {0} does not exist or cannot be created'.format(
                share['target_path']
            ))

        root = self.dispatcher.call_sync('volume.get_volumes_root')
        share_type = self.dispatcher.call_sync('share.supported_types').get(share['type'])
        pool_mountpoints = tuple(self.dispatcher.call_sync('volume.query', [], {'select': 'mountpoint'}))

        assert share_type['subtype'] in ('FILE', 'BLOCK'),\
            "Unsupported Share subtype: {0}".format(share_type['subtype'])

        if self.datastore.exists(
            'shares',
            ('type', '=', share['type']),
            ('name', '=', share['name'])
        ):
            raise TaskException(errno.EEXIST, 'Share {0} of type {1} already exists'.format(
                share['name'],
                share['type']
            ))

        normalize(share, {
            'enabled': True,
            'immutable': False,
            'description': ''
        })

        if share['target_type'] in ('DATASET', 'ZVOL'):
            dataset = share['target_path']
            pool = share['target_path'].split('/')[0]
            path = os.path.join(root, dataset)

            if not self.dispatcher.call_sync('zfs.dataset.query', [('name', '=', dataset)], {'single': True}):
                if share_type['subtype'] == 'FILE':
                    self.run_subtask_sync('volume.dataset.create', {
                        'volume': pool,
                        'id': dataset,
                        'permissions_type': share_type['perm_type'],
                        'properties': dataset_properties or {}
                    })

                if share_type['subtype'] == 'BLOCK':
                    self.run_subtask_sync('volume.dataset.create', {
                        'volume': pool,
                        'id': dataset,
                        'type': 'VOLUME',
                        'volsize': share['properties']['size'],
                        'properties': dataset_properties or {}
                    })
            else:
                if share_type['subtype'] == 'FILE':
                    self.run_subtask('volume.dataset.update', dataset, {
                        'permissions_type': share_type['perm_type']
                    })

        elif share['target_type'] == 'DIRECTORY':
            if not share['target_path'].startswith(pool_mountpoints):
                raise TaskException(errno.EINVAL, "Provided directory has to reside within user defined ZFS pool")

            # Verify that target directory exists
            path = share['target_path']
            if not os.path.isdir(path):
                raise TaskException(errno.ENOENT, "Target directory {0} doesn't exist".format(path))

        elif share['target_type'] == 'FILE':
            if not share['target_path'].startswith(pool_mountpoints):
                raise TaskException(errno.EINVAL, "Provided file has to reside within user defined ZFS pool")
            # Verify that target file exists
            path = share['target_path']
            if not os.path.isfile(path):
                raise TaskException(errno.ENOENT, "Target file {0} doesn't exist".format(path))

        else:
            raise AssertionError('Invalid target type')

        if share.get('permissions') and share['target_type'] not in ('ZVOL', 'FILE'):
            self.run_subtask_sync('file.set_permissions', path, share.pop('permissions'))

        id = self.run_subtask_sync('share.{0}.create'.format(share['type']), share)
        self.dispatcher.dispatch_event('share.changed', {
            'operation': 'create',
            'ids': [id]
        })

        new_share = self.datastore.get_by_id('shares', id)
        path = self.dispatcher.call_sync('share.get_directory_path', new_share['id'])
        try:
            save_config(
                path,
                '{0}-{1}'.format(new_share['type'], new_share['name']),
                new_share,
                file_perms=0o600
            )
        except OSError as err:
            self.add_warning(TaskWarning(errno.ENXIO, 'Cannot save backup config file: {0}'.format(str(err))))

        service_state = self.dispatcher.call_sync('service.query', [('name', '=', share['type'])], {'single': True})
        if service_state['state'] != 'RUNNING':
            if enable_service:
                config = service_state['config']
                config['enable'] = True
                self.run_subtask_sync('service.update', service_state['id'], {'config': config})
            else:
                self.add_warning(TaskWarning(
                    errno.ENXIO, "Share has been created but the service {0} is not currently running "
                                 "Please enable the {0} service.".format(share['type'])
                ))

        return id