Пример #1
0
    def __rquery_to_start_end(self, query):
        unit = query.get('unit')
        if unit:
            verrors = ValidationErrors()
            for i in ('start', 'end'):
                if i in query:
                    verrors.add(
                        f'reporting_query.{i}',
                        f'{i!r} should only be used if "unit" attribute is not provided.',
                    )
            verrors.check()
        else:
            if 'start' not in query:
                unit = 'HOURLY'
            else:
                starttime = query['start']
                endtime = query.get('end') or 'now'

        if unit:
            unit = unit[0].lower()
            page = query['page']
            starttime = f'end-{page + 1}{unit}'
            if not page:
                endtime = 'now'
            else:
                endtime = f'now-{page}{unit}'
        return starttime, endtime
Пример #2
0
    def add_jwt_secret(self, data):
        """
        Add a `secret` key used to encode/decode
        JWT messages for sending/receiving gluster
        events.

        `secret` String representing the key to be used
                    to encode/decode JWT messages
        `force` Boolean if set to True, will forcefully
                    wipe any existing jwt key for this
                    peer. Note, if forcefully adding a
                    new key, the other peers in the TSP
                    will also need to be sent this key.

        Note: this secret is only used for messages
        that are destined for the api endpoint at
        http://*:6000/_clusterevents for each peer
        in the trusted storage pool.
        """

        if not data['force'] and self.JWT_SECRET is not None:
            verrors = ValidationErrors()
            verrors.add(
                'localevent_add_jwt_secret.{data["secret"]}',
                'An existing secret key already exists. Use force to ignore this error'
            )
            verrors.check()

        self.JWT_SECRET = data['secret']
        with open(SECRETS_FILE, 'w+') as f:
            f.write(data['secret'])
Пример #3
0
    def update_zfs_object_props(self, properties, zfs_object):
        verrors = ValidationErrors()
        for k, v in properties.items():
            # If prop already exists we just update it,
            # otherwise create a user property
            prop = zfs_object.properties.get(k)
            if v.get('source') == 'INHERIT':
                if not prop:
                    verrors.add(
                        f'properties.{k}',
                        'Property does not exist and cannot be inherited')
            else:
                if not any(i in v for i in ('parsed', 'value')):
                    verrors.add(
                        f'properties.{k}',
                        '"value" or "parsed" must be specified when setting a property'
                    )
                if not prop and ':' not in k:
                    verrors.add(f'properties.{k}',
                                'User property needs a colon (:) in its name')

        verrors.check()

        try:
            zfs_object.update_properties(properties)
        except libzfs.ZFSException as e:
            raise CallError(f'Failed to update properties: {e!r}')
Пример #4
0
    async def do_update(self, data):
        """
        `appdefaults_aux` add parameters to "appdefaults" section of the krb5.conf file.

        `libdefaults_aux` add parameters to "libdefaults" section of the krb5.conf file.
        """
        verrors = ValidationErrors()

        old = await self.config()
        new = old.copy()
        new.update(data)
        verrors.add_child(
            'kerberos_settings_update',
            await self._validate_appdefaults(new['appdefaults_aux'])
        )
        verrors.add_child(
            'kerberos_settings_update',
            await self._validate_libdefaults(new['libdefaults_aux'])
        )
        verrors.check()

        await super().do_update(data)

        await self.middleware.call('etc.generate', 'kerberos')
        return await self.config()
Пример #5
0
    def do_create(self, job, data):

        """
        Add `url` webhook that will be called
        with a POST request that will include
        the event that was triggered along with the
        relevant data.

        `url` is a http address (i.e. http://192.168.1.50/endpoint)
        `bearer_token` is a bearer token
        `secret` secret to add JWT bearer token
        """

        verrors = ValidationErrors()

        # there doesn't seem to be an upper limit on the amount
        # of webhook endpoints that can be added to the daemon
        # so place an arbitrary limit of 5 for now
        if len(self.middleware.call_sync('gluster.eventsd.webhooks')['webhooks']) >= 5:
            verrors.add(
                f'webhook_create.{data["url"]}',
                'Maximum number of webhooks has been met. '
                'Delete one or more and try again.'
            )

        verrors.check()

        cmd = self.format_cmd(data)

        return self.run_cmd(cmd)
Пример #6
0
    async def get_cred(self, data):
        '''
        Get kerberos cred from directory services config to use for `do_kinit`.
        '''
        conf = data.get('conf', {})
        if conf.get('kerberos_principal'):
            return {'kerberos_principal': conf['kerberos_principal']}

        verrors = ValidationErrors()
        dstype = DSType[data['dstype']]
        if dstype is DSType.DS_TYPE_ACTIVEDIRECTORY:
            for k in ['bindname', 'bindpw', 'domainname']:
                if not conf.get(k):
                    verrors.add(f'conf.{k}', 'Parameter is required.')

            verrors.check()
            return {
                'username': f'{conf["bindname"]}@{conf["domainname"].upper()}',
                'password': conf['bindpw']
            }

        for k in ['binddn', 'bindpw', 'kerberos_realm']:
            if not conf.get(k):
                verrors.add(f'conf.{k}', 'Parameter is required.')

        verrors.check()
        krb_realm = await self.middleware.call(
            'kerberos.realm.query', [('id', '=', conf['kerberos_realm'])],
            {'get': True})
        bind_cn = (conf['binddn'].split(','))[0].split("=")
        return {
            'username': f'{bind_cn[1]}@{krb_realm["realm"]}',
            'password': conf['bindpw']
        }
Пример #7
0
    async def get_versions(self, release, options):
        current_chart = release['chart_metadata']
        chart = current_chart['name']
        item_details = await self.middleware.call('catalog.get_item_details', chart, {
            'catalog': release['catalog'],
            'train': release['catalog_train'],
        })

        new_version = options['item_version']
        if new_version == 'latest':
            new_version = await self.middleware.call(
                'chart.release.get_latest_version_from_item_versions', item_details['versions']
            )

        if new_version not in item_details['versions']:
            raise CallError(f'Unable to locate specified {new_version!r} item version.')

        verrors = ValidationErrors()
        if parse_version(new_version) <= parse_version(current_chart['version']):
            verrors.add(
                'upgrade_options.item_version',
                f'Upgrade version must be greater than {current_chart["version"]!r} current version.'
            )

        verrors.check()

        return {
            'specified_version': item_details['versions'][new_version],
            'versions': item_details['versions'],
            'latest_version': item_details['versions'][await self.middleware.call(
                'chart.release.get_latest_version_from_item_versions', item_details['versions']
            )]
        }
Пример #8
0
    async def do_update(self, pk, data):
        """
        Update attributes of an existing group.
        """

        group = await self._get_instance(pk)
        add_groupmap = False

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, 'group_update', pk=pk)
        verrors.check()
        old_smb = group['smb']

        group.update(data)
        group.pop('users', None)
        new_smb = group['smb']

        if 'name' in data and data['name'] != group['group']:
            if g := (await self.middleware.call('smb.groupmap_list')).get(group['group']):
                await self.middleware.call(
                    'smb.groupmap_delete',
                    {"sid": g['SID']}
                )

            group['group'] = group.pop('name')
            if new_smb:
                add_groupmap = True
Пример #9
0
    async def started(self):
        """
        Returns False if disabled, True if healthy, raises exception if faulted.
        """
        verrors = ValidationErrors()
        ldap = await self.config()
        if not ldap['enable']:
            return False

        await self.common_validate(ldap, ldap, verrors)
        try:
            verrors.check()
        except Exception:
            await self.middleware.call(
                'datastore.update',
                'directoryservice.ldap',
                ldap['id'],
                {'ldap_enable': False}
            )
            raise CallError('Automatically disabling LDAP service due to invalid configuration.',
                            errno.EINVAL)

        try:
            await asyncio.wait_for(self.middleware.call('ldap.get_root_DSE', ldap),
                                   timeout=ldap['timeout'])
        except asyncio.TimeoutError:
            raise CallError(f'LDAP status check timed out after {ldap["timeout"]} seconds.', errno.ETIMEDOUT)

        except Exception as e:
            raise CallError(e)

        return True
Пример #10
0
    async def do_update(self, id, data):
        """
        Update AFP share `id`.
        """
        verrors = ValidationErrors()
        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)],
            {'extend': self._config.datastore_extend,
             'prefix': self._config.datastore_prefix,
             'get': True})
        path = data.get('path')

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingafp_update', verrors, id=id)
        await self.validate(new, 'sharingafp_update', verrors, old=old)

        verrors.check()

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.middleware.call(
            'datastore.update', self._config.datastore, id, new,
            {'prefix': self._config.datastore_prefix})

        await self._service_change('afp', 'reload')

        return await self.get_instance(id)
Пример #11
0
    async def do_create(self, data):
        """
        Create a new group.

        If `gid` is not provided it is automatically filled with the next one available.

        `allow_duplicate_gid` allows distinct group names to share the same gid.

        `users` is a list of user ids (`id` attribute from `user.query`).
        """

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, 'group_create')
        verrors.check()

        if not data.get('gid'):
            data['gid'] = await self.get_next_gid()

        group = data.copy()
        group['group'] = group.pop('name')

        users = group.pop('users', [])

        pk = await self.middleware.call('datastore.insert', 'account.bsdgroups', group, {'prefix': 'bsdgrp_'})

        for user in users:
            await self.middleware.call('datastore.insert', 'account.bsdgroupmembership', {'bsdgrpmember_group': pk, 'bsdgrpmember_user': user})

        await self.middleware.call('service.reload', 'user')

        await self.middleware.call('smb.groupmap_add', data['name'])

        return pk
Пример #12
0
    async def do_update(self, data):
        """
        Update AFP service settings.

        `bindip` is a list of IPs to bind AFP to. Leave blank (empty list) to bind to all
        available IPs.

        `map_acls` defines how to map the effective permissions of authenticated users.
        RIGHTS - Unix-style permissions
        MODE - ACLs
        NONE - Do not map

        `chmod_request` defines advanced permission control that deals with ACLs.
        PRESERVE - Preserve ZFS ACEs for named users and groups or POSIX ACL group mask
        SIMPLE - Change permission as requested without any extra steps
        IGNORE - Permission change requests are ignored
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new['dbpath']:
            await check_path_resides_within_volume(
                verrors, self.middleware, 'afp_update.dbpath', new['dbpath'],
            )

        verrors.check()

        new = await self.compress(new)
        await self._update_service(old, new)

        return await self.config()
Пример #13
0
    def __rquery_to_start_end(self, query):
        unit = query.get('unit')
        if unit:
            verrors = ValidationErrors()
            for i in ('start', 'end'):
                if i in query:
                    verrors.add(
                        f'reporting_query.{i}',
                        f'{i!r} should only be used if "unit" attribute is not provided.',
                    )
            verrors.check()
        else:
            if 'start' not in query:
                unit = 'HOURLY'
            else:
                starttime = query['start']
                endtime = query.get('end') or 'now'

        if unit:
            unit = unit[0].lower()
            page = query['page']
            starttime = f'end-{page + 1}{unit}'
            if not page:
                endtime = 'now'
            else:
                endtime = f'now-{page}{unit}'
        return starttime, endtime
Пример #14
0
    async def do_create(self, data):
        """
        Create a new group.

        If `gid` is not provided it is automatically filled with the next one available.

        `allow_duplicate_gid` allows distinct group names to share the same gid.

        `users` is a list of user ids (`id` attribute from `user.query`).
        """

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, 'group_create')
        verrors.check()

        if not data.get('gid'):
            data['gid'] = await self.get_next_gid()

        group = data.copy()
        group['group'] = group.pop('name')

        users = group.pop('users', [])

        group = await self.group_compress(group)
        pk = await self.middleware.call('datastore.insert', 'account.bsdgroups', group, {'prefix': 'bsdgrp_'})

        for user in users:
            await self.middleware.call('datastore.insert', 'account.bsdgroupmembership', {'bsdgrpmember_group': pk, 'bsdgrpmember_user': user})

        await self.middleware.call('service.reload', 'user')

        await self.middleware.call('smb.groupmap_add', data['name'])

        return pk
Пример #15
0
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        if data['pool'] and not await self.middleware.call(
                'pool.query', [['name', '=', data['pool']]]):
            verrors.add(
                f'{schema}.pool',
                'Please provide a valid pool configured in the system.')

        if ipaddress.ip_address(
                data['cluster_dns_ip']) not in ipaddress.ip_network(
                    data['service_cidr']):
            verrors.add(f'{schema}.cluster_dns_ip',
                        'Must be in range of "service_cidr".')

        if data['node_ip'] not in await self.bindip_choices():
            verrors.add(f'{schema}.node_ip',
                        'Please provide a valid IP address.')

        for k, _ in await self.validate_interfaces(data):
            verrors.add(f'{schema}.{k}', 'Please specify a valid interface.')

        for k in ('route_v4', 'route_v6'):
            gateway = data[f'{k}_gateway']
            interface = data[f'{k}_interface']
            if (not gateway and not interface) or (gateway and interface):
                continue
            for k2 in ('gateway', 'interface'):
                verrors.add(
                    f'{schema}.{k}_{k2}',
                    f'{k}_gateway and {k}_interface must be specified together.'
                )

        verrors.check()
Пример #16
0
    async def resolve_hostnames(self, hostnames):
        """
        Takes a list of hostnames to be asynchronously resolved to their respective IP address.
        If IP addresses are given, then it will simply return the IP address
        """
        hostnames = list(set(hostnames))
        loop = asyncio.get_event_loop()
        timeout = 5
        verrors = ValidationErrors()

        results = await asyncio.gather(*[
            self._resolve_hostname(loop, host, timeout) for host in hostnames
        ],
                                       return_exceptions=True)

        ips = []
        for host, result in zip(hostnames, results):
            if isinstance(result, (type(None), asyncio.TimeoutError)):
                verrors.add(f'resolve_hostname.{host}',
                            'Failed to resolve hostname')
            else:
                ips.append(result)

        # if any hostnames failed to be resolved
        # it will be raised here
        verrors.check()

        return list(set(ips))
Пример #17
0
    async def do_update(self, id, data):
        """
        update filesystem ACL template with `id`.
        """
        old = await self.get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        if old['builtin']:
            verrors.add("filesystem_acltemplate_update.builtin",
                        "built-in ACL templates may not be changed")

        if new['name'] != old['name']:
            name_exists = bool(await self.query([('name', '=', new['name'])]))
            if name_exists:
                verrors.add("filesystem_acltemplate_update.name",
                            f"{data['name']}: name is not unique")

        if len(new['acl']) == 0:
            verrors.add("filesystem_acltemplate_update.acl",
                        "At least one ACL entry must be specified.")
        await self.validate_acl(new, "filesystem_acltemplate_update.acl",
                                verrors)
        verrors.check()

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        return await self.get_instance(id)
Пример #18
0
    async def do_create(self, data):
        """
        Create a new boot environment using `name`.

        If a new boot environment is desired which is a clone of another boot environment, `source` can be passed.
        Then, a new boot environment of `name` is created using boot environment `source` by cloning it.

        Ensure that `name` and `source` are valid boot environment names.
        """
        verrors = ValidationErrors()
        await self._clean_be_name(verrors, 'bootenv_create', data['name'])
        verrors.check()

        args = [self.BE_TOOL, 'create']
        source = data.get('source')
        if source:
            args += [
                '-e',
                os.path.join(await self.middleware.call('boot.pool_name'),
                             'ROOT', source) if osc.IS_LINUX else source
            ]
        args.append(data['name'])
        try:
            await run(args, encoding='utf8', check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to create boot environment: {cpe.stdout}')
        return data['name']
Пример #19
0
    async def do_update(self, data):
        """
        Update AFP service settings.

        `bindip` is a list of IPs to bind AFP to. Leave blank (empty list) to bind to all
        available IPs.

        `map_acls` defines how to map the effective permissions of authenticated users.
        RIGHTS - Unix-style permissions
        MODE - ACLs
        NONE - Do not map

        `chmod_request` defines advanced permission control that deals with ACLs.
        PRESERVE - Preserve ZFS ACEs for named users and groups or POSIX ACL group mask
        SIMPLE - Change permission as requested without any extra steps
        IGNORE - Permission change requests are ignored
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new['dbpath']:
            await check_path_resides_within_volume(
                verrors, self.middleware, 'afp_update.dbpath', new['dbpath'],
            )

        verrors.check()

        new = await self.compress(new)
        await self._update_service(old, new)

        return await self.config()
Пример #20
0
    async def do_create(self, data):
        """
        Create AFP share.

        `allow`, `deny`, `ro`, and `rw` are lists of users and groups. Groups are designated by
        an @ prefix.

        `hostsallow` and `hostsdeny` are lists of hosts and/or networks.
        """
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingafp_create', verrors)
        await self.validate(data, 'sharingafp_create', verrors)

        await check_path_resides_within_volume(
            verrors, self.middleware, 'sharingafp_create.path', path)

        verrors.check()

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(data)
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.extend(data)

        await self._service_change('afp', 'reload')

        return data
Пример #21
0
    async def validate_credentials(middleware, data):
        # We would like to validate the following bits:
        # 1) script exists and is executable
        # 2) user exists
        # 3) User can access the script in question
        verrors = ValidationErrors()
        try:
            await middleware.call('user.get_user_obj', {'username': data['user']})
        except KeyError:
            verrors.add('user', f'Unable to locate {data["user"]!r} user')

        await check_path_resides_within_volume(verrors, middleware, 'script', data['script'])

        try:
            can_access = await middleware.call(
                'filesystem.can_access_as_user', data['user'], data['script'], {'execute': True}
            )
        except CallError as e:
            verrors.add('script', f'Unable to validate script: {e}')
        else:
            if not can_access:
                verrors.add('user', f'{data["user"]!r} user does not has permission to execute the script')

        verrors.check()
        return data
Пример #22
0
    async def do_create(self, data):
        """
        Create AFP share.

        `allow`, `deny`, `ro`, and `rw` are lists of users and groups. Groups are designated by
        an @ prefix.

        `hostsallow` and `hostsdeny` are lists of hosts and/or networks.
        """
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingafp_create', verrors)
        await self.validate(data, 'sharingafp_create', verrors)
        verrors.check()

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(data)
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self._service_change('afp', 'reload')

        return await self.get_instance(data['id'])
Пример #23
0
 def check_errors(self, func, *args, **kwargs):
     verrors = ValidationErrors()
     try:
         func(*args, **kwargs)
     except CatalogValidationErrors as e:
         verrors.extend(e)
     verrors.check()
Пример #24
0
    async def do_update(self, data):
        """
        Update SMB Service Configuration.

        `netbiosname` defaults to the original hostname of the system.

        `workgroup` and `netbiosname` should have different values.

        `enable_smb1` allows legacy SMB clients to connect to the server when enabled.

        `localmaster` when set, determines if the system participates in a browser election.

        `domain_logons` is used to provide netlogin service for older Windows clients if enabled.

        `guest` attribute is specified to select the account to be used for guest access. It defaults to "nobody".

        `nullpw` when enabled allows the users to authorize access without a password.

        `hostlookup` when enabled, allows using hostnames rather then IP addresses in "hostsallow"/"hostsdeny" fields
        of SMB Shares.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()
        ad_enabled = (await self.middleware.call('activedirectory.get_state')
                      != "DISABLED")
        if ad_enabled:
            for i in ('workgroup', 'netbiosname', 'netbiosname_b',
                      'netbiosalias'):
                if old[i] != new[i]:
                    verrors.add(
                        f'smb_update.{i}',
                        'This parameter may not be changed after joining Active Directory (AD). '
                        'If it must be changed, the proper procedure is to leave the AD domain '
                        'and then alter the parameter before re-joining the domain.'
                    )

        await self.validate_smb(new, verrors)
        verrors.check()

        if new['admin_group'] and new['admin_group'] != old['admin_group']:
            await self.middleware.call('smb.add_admin_group',
                                       new['admin_group'])

        # TODO: consider using bidict
        for k, v in LOGLEVEL_MAP.items():
            if new['loglevel'] == v:
                new['loglevel'] = k
                break

        await self.compress(new)

        await self._update_service(old, new)
        await self.reset_smb_ha_mode()

        return await self.config()
Пример #25
0
    async def do_update(self, job, data):
        """
        Update System Dataset Service Configuration.

        `pool` is the name of a valid pool configured in the system which will be used to host the system dataset.

        `pool_exclude` can be specified to make sure that we don't place the system dataset on that pool if `pool`
        is not provided.
        """
        config = await self.config()

        new = config.copy()
        new.update(data)

        verrors = ValidationErrors()
        if new['pool'] and new['pool'] != 'freenas-boot':
            pool = await self.middleware.call('pool.query',
                                              [['name', '=', new['pool']]])
            if not pool:
                verrors.add('sysdataset_update.pool',
                            f'Pool "{new["pool"]}" not found', errno.ENOENT)
            elif pool[0]['encrypt'] == 2:
                # This will cover two cases - passphrase being set for a pool and that it might be locked as well
                verrors.add(
                    'sysdataset_update.pool',
                    f'Pool "{new["pool"]}" has an encryption passphrase set. '
                    'The system dataset cannot be placed on this pool.')
        elif not new['pool']:
            for pool in await self.middleware.call('pool.query',
                                                   [['encrypt', '!=', 2]]):
                if data.get('pool_exclude') == pool['name']:
                    continue
                new['pool'] = pool['name']
                break
            else:
                new['pool'] = 'freenas-boot'
        verrors.check()

        new['syslog_usedataset'] = new['syslog']

        update_dict = new.copy()
        for key in ('is_decrypted', 'basename', 'uuid_a', 'syslog', 'path',
                    'pool_exclude'):
            update_dict.pop(key, None)

        await self.middleware.call('datastore.update', 'system.systemdataset',
                                   config['id'], update_dict,
                                   {'prefix': 'sys_'})

        if config['pool'] != new['pool']:
            await self.migrate(config['pool'], new['pool'])

        await self.setup()

        if config['syslog'] != new['syslog']:
            await self.middleware.call('service.restart', 'syslogd')

        return await self.config()
Пример #26
0
    def common_validation(self, hostname=None):

        verrors = ValidationErrors()

        if hostname:
            self.middleware.call_sync('gluster.peer.resolve_host_or_ip',
                                      hostname, verrors)

        verrors.check()
Пример #27
0
    async def manual_test(self, disks):
        """
        Run manual SMART tests for `disks`.

        `type` indicates what type of SMART test will be ran and must be specified.
        """
        verrors = ValidationErrors()
        if not disks:
            verrors.add(
                'disks',
                'Please specify at least one disk.'
            )
        else:
            test_disks_list = []
            disks_data = await self.middleware.call('disk.query')
            devices = await self.middleware.call('device.get_storage_devices_topology')

            for index, disk in enumerate(disks):
                for d in disks_data:
                    if disk['identifier'] == d['identifier']:
                        current_disk = d
                        test_disks_list.append({
                            'disk': current_disk['name'],
                            **disk
                        })
                        break
                else:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'{disk["identifier"]} is not valid. Please provide a valid disk identifier.'
                    )
                    continue

                if current_disk['name'] is None:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]} disk. Failed to retrieve name.'
                    )

                if current_disk['name'].startswith('nvd'):
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]} disk. NVMe devices cannot be mapped yet.'
                    )

                device = devices.get(current_disk['name'])
                if not device:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]}. Unable to retrieve disk details.'
                    )

        verrors.check()

        return list(
            await asyncio_map(functools.partial(self.__manual_test, devices), test_disks_list, 16)
        )
Пример #28
0
    async def cert_services_validation(self,
                                       id,
                                       schema_name,
                                       raise_verrors=True):
        # General method to check certificate health wrt usage in services
        cert = await self.middleware.call('certificate.query',
                                          [['id', '=', id]])
        verrors = ValidationErrors()
        valid_key_size = {'EC': 28, 'RSA': 2048}
        if cert:
            cert = cert[0]
            if cert['cert_type'] != 'CERTIFICATE' or cert['cert_type_CSR']:
                verrors.add(
                    schema_name,
                    'Selected certificate id is not a valid certificate')
            elif not cert.get('fingerprint'):
                verrors.add(schema_name,
                            f'{cert["name"]} certificate is malformed')

            if not cert['privatekey']:
                verrors.add(
                    schema_name,
                    'Selected certificate does not have a private key')
            elif not cert['key_length']:
                verrors.add(schema_name,
                            'Failed to parse certificate\'s private key')
            elif cert['key_length'] < valid_key_size[cert['key_type']]:
                verrors.add(
                    schema_name,
                    f'{cert["name"]}\'s private key size is less then {valid_key_size[cert["key_type"]]} bits'
                )

            if cert['until'] and datetime.datetime.strptime(
                    cert['until'],
                    '%a %b  %d %H:%M:%S %Y') < datetime.datetime.now():
                verrors.add(
                    schema_name,
                    f'{cert["name"]!r} has expired (it was valid until {cert["until"]!r})'
                )

            if cert['digest_algorithm'] in ['MD5', 'SHA1']:
                verrors.add(
                    schema_name,
                    'Please use a certificate whose digest algorithm has at least 112 security bits'
                )

            if cert['revoked']:
                verrors.add(schema_name, 'This certificate is revoked')
        else:
            verrors.add(schema_name,
                        f'No Certificate found with the provided id: {id}')

        if raise_verrors:
            verrors.check()
        else:
            return verrors
Пример #29
0
    async def do_update(self, pk, data):
        """
        Update attributes of an existing group.
        """

        group = await self._get_instance(pk)

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, 'group_update', pk=pk)
        verrors.check()

        group.update(data)
        delete_groupmap = False
        group.pop('users', None)

        if 'name' in data and data['name'] != group['group']:
            delete_groupmap = group['group']
            group['group'] = group.pop('name')
        else:
            group.pop('name', None)

        group = await self.group_compress(group)
        await self.middleware.call('datastore.update', 'account.bsdgroups', pk,
                                   group, {'prefix': 'bsdgrp_'})

        if 'users' in data:
            existing = {
                i['bsdgrpmember_user']['id']: i
                for i in await self.middleware.call(
                    'datastore.query', 'account.bsdgroupmembership', [(
                        'bsdgrpmember_group', '=', pk)])
            }
            to_remove = set(existing.keys()) - set(data['users'])
            for i in to_remove:
                await self.middleware.call('datastore.delete',
                                           'account.bsdgroupmembership',
                                           existing[i]['id'])

            to_add = set(data['users']) - set(existing.keys())
            for i in to_add:
                await self.middleware.call('datastore.insert',
                                           'account.bsdgroupmembership', {
                                               'bsdgrpmember_group': pk,
                                               'bsdgrpmember_user': i
                                           })

        if delete_groupmap:
            await self.middleware.call('notifier.groupmap_delete',
                                       delete_groupmap)

        await self.middleware.call('service.reload', 'user')

        await self.middleware.call('smb.groupmap_add', group['group'])

        return pk
Пример #30
0
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()
        create_request = schema_name == 'glustervolume_create'

        if data['name'] == CTDB_VOL_NAME and create_request:
            verrors.add(
                f'{schema_name}.{data["name"]}',
                f'"{data["name"]}" is a reserved name. Choose a different volume name.'
            )

        verrors.check()
Пример #31
0
    async def by_path(self, data):
        """
        Retrieve list of available ACL templates for a given `path`.

        Supports `query-filters` and `query-options`.
        `format-options` gives additional options to alter the results of
        the template query:

        `canonicalize` - place ACL entries for NFSv4 ACLs in Microsoft canonical order.
        `ensure_builtins` - ensure all results contain entries for `builtin_users` and `builtin_administrators`
        groups.
        `resolve_names` - convert ids in ACL entries into names.
        """
        verrors = ValidationErrors()
        filters = data.get('query-filters')
        if data['path']:
            path = await self.middleware.call(
                "filesystem.resolve_cluster_path", data['path'])
            acltype = await self.middleware.call('filesystem.path_get_acltype',
                                                 path)
            if acltype == ACLType.DISABLED.name:
                return []

            if acltype == ACLType.POSIX1E.name and data['format-options'][
                    'canonicalize']:
                verrors.add(
                    "filesystem.acltemplate_by_path.format-options.canonicalize",
                    "POSIX1E ACLs may not be sorted into Windows canonical order."
                )
            filters.append(("acltype", "=", acltype))

        if not data['path'] and data['format-options']['resolve_names']:
            verrors.add(
                "filesystem.acltemplate_by_path.format-options.canonicalize",
                "ACL entry ids may not be resolved into names unless path is provided."
            )

        verrors.check()

        templates = await self.query(filters, data['query-options'])
        for t in templates:
            if data['format-options']['ensure_builtins']:
                await self.append_builtins(t)

            if data['format-options']['resolve_names']:
                st = await self.middleware.run_in_thread(os.stat(path))
                await self.resolve_names(st.st_uid, st.st_gid, t)

            if data['format-options']['canonicalize'] and t[
                    'acltype'] == ACLType.NFS4.name:
                canonicalized = ACLType[t['acltype']].canonicalize(t['acl'])
                t['acl'] = canonicalized

        return templates
Пример #32
0
    async def do_create(self, data):
        """
        Create a new group.

        If `gid` is not provided it is automatically filled with the next one available.

        `allow_duplicate_gid` allows distinct group names to share the same gid.

        `users` is a list of user ids (`id` attribute from `user.query`).

        `smb` specifies whether the group should be mapped into an NT group.
        """
        allow_duplicate_gid = data['allow_duplicate_gid']
        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, 'group_create')
        verrors.check()

        if not data.get('gid'):
            data['gid'] = await self.get_next_gid()

        group = data.copy()
        group['group'] = group.pop('name')

        users = group.pop('users', [])

        group = await self.group_compress(group)
        pk = await self.middleware.call('datastore.insert',
                                        'account.bsdgroups', group,
                                        {'prefix': 'bsdgrp_'})

        for user in users:
            await self.middleware.call('datastore.insert',
                                       'account.bsdgroupmembership', {
                                           'bsdgrpmember_group': pk,
                                           'bsdgrpmember_user': user
                                       })

        await self.middleware.call('service.reload', 'user')
        if data['smb']:
            try:
                await self.middleware.call('smb.groupmap_add', data['name'])
            except Exception:
                """
                Samba's group mapping database does not allow duplicate gids.
                Unfortunately, we don't get a useful error message at -d 0.
                """
                if not allow_duplicate_gid:
                    raise
                else:
                    self.logger.debug(
                        'Refusing to generate duplicate gid mapping in group_mapping.tdb: %s -> %s',
                        data['name'], data['gid'])

        return pk
Пример #33
0
    async def common_validation(self, data, schema_name):
        name = data.get('name', None)
        all_vols = data.get('all', None)

        verrors = ValidationErrors()
        if not name and not all_vols:
            verrors.add(
                f'{schema_name}',
                'A gluster volume name is rquired or the "all" key must be set to True'
            )

        verrors.check()
Пример #34
0
    async def do_update(self, id, data):
        """
        Update a domain by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        tmp = data.copy()
        verrors = ValidationErrors()
        if old['name'] in [x.name
                           for x in DSType] and old['name'] != new['name']:
            verrors.add(
                'idmap_domain_update.name',
                f'Changing name of default domain {old["name"]} is not permitted'
            )

        if new['options'].get(
                'sssd_compat') and not old['options'].get('sssd_compat'):
            if await self.middleware.call('activedirectory.get_state'
                                          ) != 'HEALTHY':
                verrors.add(
                    'idmap_domain_update.options',
                    'AD service must be enabled and started to '
                    'generate an SSSD-compatible id range')
                verrors.check()

            new['range_low'] = await self.get_sssd_low_range(new['name'])
            new['range_high'] = new['range_low'] + 100000000

        await self.validate('idmap_domain_update', new, verrors)
        await self.validate_options('idmap_domain_update', new, verrors,
                                    ['MISSING'])
        tmp['idmap_backend'] = new['idmap_backend']
        if data.get('options'):
            await self.validate_options('idmap_domain_update', tmp, verrors,
                                        ['EXTRA'])

        if data.get('certificate_id') and not data['options'].get('ssl'):
            verrors.add(
                'idmap_domain_update.certificate_id',
                f'The {new["idmap_backend"]} idmap backend does not '
                'generate LDAP traffic. Certificates do not apply.')
        verrors.check()
        await self.prune_keys(new)
        final_options = IdmapBackend[data['idmap_backend']].defaults()
        final_options.update(new['options'])
        new['options'] = final_options
        await self.idmap_compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.middleware.call('idmap.clear_idmap_cache')
        return await self._get_instance(id)
Пример #35
0
    async def do_update(self, pk, data):
        """
        Update attributes of an existing group.
        """

        group = await self._get_instance(pk)

        verrors = ValidationErrors()
        await self.__common_validation(verrors, data, 'group_update', pk=pk)
        verrors.check()

        group.update(data)
        delete_groupmap = False
        group.pop('users', None)

        if 'name' in data and data['name'] != group['group']:
            delete_groupmap = group['group']
            group['group'] = group.pop('name')
        else:
            group.pop('name', None)

        await self.middleware.call('datastore.update', 'account.bsdgroups', pk, group, {'prefix': 'bsdgrp_'})

        if 'users' in data:
            existing = {i['bsdgrpmember_user']['id']: i for i in await self.middleware.call('datastore.query', 'account.bsdgroupmembership', [('bsdgrpmember_group', '=', pk)])}
            to_remove = set(existing.keys()) - set(data['users'])
            for i in to_remove:
                await self.middleware.call('datastore.delete', 'account.bsdgroupmembership', existing[i]['id'])

            to_add = set(data['users']) - set(existing.keys())
            for i in to_add:
                await self.middleware.call('datastore.insert', 'account.bsdgroupmembership', {'bsdgrpmember_group': pk, 'bsdgrpmember_user': i})

        if delete_groupmap:
            await self.middleware.call('notifier.groupmap_delete', delete_groupmap)

        await self.middleware.call('service.reload', 'user')

        await self.middleware.call('smb.groupmap_add', group['group'])

        return pk
Пример #36
0
    def validate_data_keys(self, data, model, schema, prefix):
        verrors = ValidationErrors()
        fields = list(
            map(
                lambda f: f.name.replace(prefix or '', '', 1),
                chain(model._meta.fields, model._meta.many_to_many)
            )
        )

        # _id is a special condition in filter where the key in question can be a related descriptor in django
        # i.e share_id - so we remove _id and check if the field is present in `fields` list
        for key in filter(
            lambda v: all(c not in fields for c in (v, v if not v.endswith('_id') else v[:-3])),
            data
        ):
            verrors.add(
                f'{schema}.{key}',
                f'{key} field not recognized'
            )

        verrors.check()
Пример #37
0
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        await check_path_resides_within_volume(verrors, self.middleware, f'{schema_name}.path', data.get('path'))

        for entity in ('user', 'group'):
            value = data.get(entity)
            if value not in map(
                    lambda e: e[entity if entity == 'group' else 'username'],
                    await self.middleware.call(f'{entity}.query')
            ):
                verrors.add(
                    f'{schema_name}.{entity}',
                    f'Please specify a valid {entity}'
                )

        verrors.check()

        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data['mode'] = data['mode'].lower()

        return data
Пример #38
0
    async def validate_attrs(self, data):
        verrors = ValidationErrors()

        additional_params = data.get('additional_params')
        if additional_params:
            # Let's be very generic here and introduce very basic validation
            # Expected format is as following
            # [ipv6.icmpneighbor]
            #   history = 86400
            #   enabled = yes
            #
            # While we are here, we will also introduce basic formatting to the file to ensure
            # that we can make it as compliable as possible

            param_str = ''
            for i in additional_params.split('\n'):
                i = i.strip()
                if not i:
                    continue
                if i.startswith('#'):
                    # Let's not validate this
                    if i.replace('#', '').startswith('['):
                        param_str += f'\n\n{i}'
                    else:
                        param_str += f'\n\t{i}'

                    continue

                if i.startswith('[') and not i.endswith(']'):
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e [system.intr]'
                    )
                elif not i.startswith('[') and '=' not in i:
                    verrors.add(
                        'netdata_update.additional_params',
                        f'Please correct format for {i}. i.e enabled = yes'
                    )

                if i.startswith('['):
                    param_str += f'\n\n{i}'
                else:
                    param_str += f'\n\t{i}'

            data['additional_params'] = param_str + '\n'

        bind_to_ips = data.get('bind')
        if bind_to_ips:
            valid_ips = [ip['address'] for ip in await self.middleware.call('interface.ip_in_use')]
            valid_ips.extend(['127.0.0.1', '::1', '0.0.0.0', '::'])

            for bind_ip in bind_to_ips:
                if bind_ip not in valid_ips:
                    verrors.add(
                        'netdata_update.bind',
                        f'Invalid {bind_ip} bind IP'
                    )
        else:
            verrors.add(
                'netdata_update.bind',
                'This field is required'
            )

        update_alarms = data.pop('update_alarms', {})
        valid_alarms = self._alarms
        if update_alarms:
            for alarm in update_alarms:
                if alarm not in valid_alarms:
                    verrors.add(
                        'netdata_update.alarms',
                        f'{alarm} not a valid alarm'
                    )

            verrors.extend(
                validate_attributes(
                    [Dict(key, Bool('enabled', required=True)) for key in update_alarms],
                    {'attributes': update_alarms}
                )
            )

        # Validating streaming metrics now
        stream_mode = data.get('stream_mode')
        if stream_mode == 'SLAVE':
            for key in ('api_key', 'destination'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as SLAVE'
                    )

            destinations = data.get('destination')
            if destinations:
                ip_addr = IpAddress()
                port = Port()
                for dest in destinations:
                    ip = dest.split(':')[0]
                    try:
                        ip_addr(ip)
                    except ValueError as e:
                        verrors.add(
                            'netdata_update.destination',
                            str(e)
                        )
                    else:
                        if ':' in dest:
                            try:
                                port(int(dest.split(':')[1]))
                            except ValueError as e:
                                verrors.add(
                                    'netdata_update.destination',
                                    f'Not a valid port: {e}'
                                )
        elif stream_mode == 'MASTER':
            for key in ('allow_from', 'api_key'):
                if not data.get(key):
                    verrors.add(
                        f'netdata_update.{key}',
                        f'{key} is required with stream mode as MASTER'
                    )

        verrors.check()

        data['alarms'].update(update_alarms)

        return data
Пример #39
0
    async def do_update(self, job, data):
        """
        Update System Dataset Service Configuration.

        `pool` is the name of a valid pool configured in the system which will be used to host the system dataset.

        `pool_exclude` can be specified to make sure that we don't place the system dataset on that pool if `pool`
        is not provided.
        """
        config = await self.config()

        new = config.copy()
        new.update(data)

        verrors = ValidationErrors()
        if new['pool'] and new['pool'] != 'freenas-boot':
            pool = await self.middleware.call('pool.query', [['name', '=', new['pool']]])
            if not pool:
                verrors.add(
                    'sysdataset_update.pool',
                    f'Pool "{new["pool"]}" not found',
                    errno.ENOENT
                )
            elif pool[0]['encrypt'] == 2:
                # This will cover two cases - passphrase being set for a pool and that it might be locked as well
                verrors.add(
                    'sysdataset_update.pool',
                    f'Pool "{new["pool"]}" has an encryption passphrase set. '
                    'The system dataset cannot be placed on this pool.'
                )
        elif not new['pool']:
            for pool in await self.middleware.call(
                'pool.query', [
                    ['encrypt', '!=', 2]
                ]
            ):
                if data.get('pool_exclude') == pool['name']:
                    continue
                new['pool'] = pool['name']
                break
            else:
                # If a data pool could not be found, reset it to blank
                # Which will eventually mean its back to freenas-boot (temporarily)
                new['pool'] = ''
        verrors.check()

        new['syslog_usedataset'] = new['syslog']

        update_dict = new.copy()
        for key in ('is_decrypted', 'basename', 'uuid_a', 'syslog', 'path', 'pool_exclude'):
            update_dict.pop(key, None)

        await self.middleware.call(
            'datastore.update',
            'system.systemdataset',
            config['id'],
            update_dict,
            {'prefix': 'sys_'}
        )

        new = await self.config()

        if config['pool'] != new['pool']:
            await self.migrate(config['pool'], new['pool'])

        await self.setup(True, data.get('pool_exclude'))

        if config['syslog'] != new['syslog']:
            await self.middleware.call('service.restart', 'syslogd')

        return await self.config()
Пример #40
0
    async def do_create(self, data):
        """
        Create a new user.

        If `uid` is not provided it is automatically filled with the next one available.

        `group` is required if `group_create` is false.

        `password` is required if `password_disabled` is false.

        Available choices for `shell` can be retrieved with `user.shell_choices`.

        `attributes` is a general-purpose object for storing arbitrary user information.
        """
        verrors = ValidationErrors()

        if (
            not data.get('group') and not data.get('group_create')
        ) or (
            data.get('group') is not None and data.get('group_create')
        ):
            verrors.add(
                'user_create.group',
                f'Enter either a group name or create a new group to '
                'continue.',
                errno.EINVAL
            )

        await self.__common_validation(verrors, data, 'user_create')

        if data.get('sshpubkey') and not data['home'].startswith('/mnt'):
            verrors.add(
                'user_create.sshpubkey',
                'The home directory is not writable. Leave this field blank.'
            )

        verrors.check()

        groups = data.pop('groups')
        create = data.pop('group_create')

        if create:
            group = await self.middleware.call('group.query', [('group', '=', data['username'])])
            if group:
                group = group[0]
            else:
                group = await self.middleware.call('group.create', {'name': data['username']})
                group = (await self.middleware.call('group.query', [('id', '=', group)]))[0]
            data['group'] = group['id']
        else:
            group = await self.middleware.call('group.query', [('id', '=', data['group'])])
            if not group:
                raise CallError(f'Group {data["group"]} not found')
            group = group[0]

        # Is this a new directory or not? Let's not nuke existing directories,
        # e.g. /, /root, /mnt/tank/my-dataset, etc ;).
        new_homedir = False
        home_mode = data.pop('home_mode')
        if data['home'] and data['home'] != '/nonexistent':
            try:
                try:
                    os.makedirs(data['home'], mode=int(home_mode, 8))
                    new_homedir = True
                    os.chown(data['home'], data['uid'], group['gid'])
                except FileExistsError:
                    if not os.path.isdir(data['home']):
                        raise CallError(
                            'Path for home directory already '
                            'exists and is not a directory',
                            errno.EEXIST
                        )

                    # If it exists, ensure the user is owner
                    os.chown(data['home'], data['uid'], group['gid'])
                except OSError as oe:
                    raise CallError(
                        'Failed to create the home directory '
                        f'({data["home"]}) for user: {oe}'
                    )
                if os.stat(data['home']).st_dev == os.stat('/mnt').st_dev:
                    raise CallError(
                        f'The path for the home directory "({data["home"]})" '
                        'must include a volume or dataset.'
                    )
            except Exception:
                if new_homedir:
                    shutil.rmtree(data['home'])
                raise

        if not data.get('uid'):
            data['uid'] = await self.get_next_uid()

        pk = None  # Make sure pk exists to rollback in case of an error
        try:
            await self.__set_password(data)
            sshpubkey = data.pop('sshpubkey', None)  # datastore does not have sshpubkey

            pk = await self.middleware.call('datastore.insert', 'account.bsdusers', data, {'prefix': 'bsdusr_'})

            await self.__set_groups(pk, groups)

        except Exception:
            if pk is not None:
                await self.middleware.call('datastore.delete', 'account.bsdusers', pk)
            if new_homedir:
                # Be as atomic as possible when creating the user if
                # commands failed to execute cleanly.
                shutil.rmtree(data['home'])
            raise

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(data['username'])

        if os.path.exists(data['home']):
            for f in os.listdir(SKEL_PATH):
                if f.startswith('dot'):
                    dest_file = os.path.join(data['home'], f[3:])
                else:
                    dest_file = os.path.join(data['home'], f)
                if not os.path.exists(dest_file):
                    shutil.copyfile(os.path.join(SKEL_PATH, f), dest_file)
                    os.chown(dest_file, data['uid'], group['gid'])

            data['sshpubkey'] = sshpubkey
            try:
                await self.__update_sshpubkey(data['home'], data, group['group'])
            except PermissionError as e:
                self.logger.warn('Failed to update authorized keys', exc_info=True)
                raise CallError(f'Failed to update authorized keys: {e}')

        return pk
Пример #41
0
    async def do_update(self, pk, data):
        """
        Update attributes of an existing user.
        """

        user = await self._get_instance(pk)

        verrors = ValidationErrors()

        if 'group' in data:
            group = await self.middleware.call('datastore.query', 'account.bsdgroups', [
                ('id', '=', data['group'])
            ])
            if not group:
                verrors.add('user_update.group', f'Group {data["group"]} not found', errno.ENOENT)
            group = group[0]
        else:
            group = user['group']
            user['group'] = group['id']

        await self.__common_validation(verrors, data, 'user_update', pk=pk)

        home = data.get('home') or user['home']
        # root user (uid 0) is an exception to the rule
        if data.get('sshpubkey') and not home.startswith('/mnt') and user['uid'] != 0:
            verrors.add('user_update.sshpubkey', 'Home directory is not writable, leave this blank"')

        # Do not allow attributes to be changed for builtin user
        if user['builtin']:
            for i in ('group', 'home', 'home_mode', 'uid', 'username'):
                if i in data:
                    verrors.add(f'user_update.{i}', 'This attribute cannot be changed')

        verrors.check()

        # Copy the home directory if it changed
        if (
            'home' in data and
            data['home'] not in (user['home'], '/nonexistent') and
            not data['home'].startswith(f'{user["home"]}/')
        ):
            home_copy = True
            home_old = user['home']
        else:
            home_copy = False

        # After this point user dict has values from data
        user.update(data)

        if home_copy and not os.path.isdir(user['home']):
            try:
                os.makedirs(user['home'])
                os.chown(user['home'], user['uid'], group['bsdgrp_gid'])
            except OSError:
                self.logger.warn('Failed to chown homedir', exc_info=True)
            if not os.path.isdir(user['home']):
                raise CallError(f'{user["home"]} is not a directory')

        home_mode = user.pop('home_mode', None)
        if user['builtin']:
            home_mode = None

        def set_home_mode():
            if home_mode is not None:
                try:
                    os.chmod(user['home'], int(home_mode, 8))
                except OSError:
                    self.logger.warn('Failed to set homedir mode', exc_info=True)

        try:
            await self.__update_sshpubkey(
                home_old if home_copy else user['home'], user, group['bsdgrp_group'],
            )
        except PermissionError as e:
            self.logger.warn('Failed to update authorized keys', exc_info=True)
            raise CallError(f'Failed to update authorized keys: {e}')

        if home_copy:
            def do_home_copy():
                try:
                    subprocess.run(f"/usr/bin/su - {user['username']} -c '/bin/cp -a {home_old}/ {user['home']}/'", shell=True, check=True)
                except subprocess.CalledProcessError as e:
                    self.logger.warn(f"Failed to copy homedir: {e}")
                set_home_mode()

            asyncio.ensure_future(self.middleware.run_in_thread(do_home_copy))
        else:
            set_home_mode()

        user.pop('sshpubkey', None)
        await self.__set_password(user)

        if 'groups' in user:
            groups = user.pop('groups')
            await self.__set_groups(pk, groups)

        await self.middleware.call('datastore.update', 'account.bsdusers', pk, user, {'prefix': 'bsdusr_'})

        await self.middleware.call('service.reload', 'user')

        await self.__set_smbpasswd(user['username'])

        return pk