Exemplo n.º 1
0
    async def stop(self, force=False):
        """
        Remove NIS_state entry entirely after stopping ypbind. This is so that the 'enable' checkbox
        becomes the sole source of truth regarding a service's state when it is disabled.
        """
        state = await self.get_state()
        nis = await self.config()
        if not force:
            if state in ['LEAVING', 'JOINING']:
                raise CallError(
                    f'Current state of NIS service is: [{state}]. Wait until operation completes.',
                    errno.EBUSY)

        await self.__set_state(DSStatus['LEAVING'])
        await self.middleware.call('datastore.update', 'directoryservice.nis',
                                   nis['id'], {'nis_enable': False})

        ypbind = await run(['/usr/sbin/service', 'ypbind', 'onestop'],
                           check=False)
        if ypbind.returncode != 0:
            await self.__set_state(DSStatus['FAULTED'])
            errmsg = ypbind.stderr.decode().strip()
            if 'ypbind not running' not in errmsg:
                raise CallError(
                    f'ypbind failed to stop: [{ypbind.stderr.decode().strip()}]'
                )

        await self.middleware.call('cache.pop', 'NIS_State')
        await self.middleware.call('etc.generate', 'rc')
        await self.middleware.call('etc.generate', 'pam')
        await self.middleware.call('etc.generate', 'hostname')
        await self.middleware.call('etc.generate', 'nss')
        await self.__set_state(DSStatus['DISABLED'])
        self.logger.debug(
            f'NIS service successfully stopped. Setting state to DISABLED.')
        return True
Exemplo n.º 2
0
    def read_plugin_pkg_db(self, db, pkg):
        try:
            conn = sqlite3.connect(db)
        except sqlite3.Error as e:
            raise CallError(e)

        with conn:
            cur = conn.cursor()
            cur.execute(
                f'SELECT * FROM packages WHERE origin="{pkg}" OR name="{pkg}"'
            )

            rows = cur.fetchall()

            return rows
Exemplo n.º 3
0
def update_zfs_default(root):
    with libzfs.ZFS() as zfs:
        existing_pools = [p.name for p in zfs.pools]

    for i in ['freenas-boot', 'boot-pool']:
        if i in existing_pools:
            boot_pool = i
            break
    else:
        raise CallError(f'Failed to locate valid boot pool. Pools located were: {", ".join(existing_pools)}')

    with libzfs.ZFS() as zfs:
        disks = [disk.replace("/dev/", "") for disk in zfs.get(boot_pool).disks]

    mapping = {}
    for dev in filter(
        lambda d: not d.sys_name.startswith("sr") and d.get("DEVTYPE") in ("disk", "partition"),
        pyudev.Context().list_devices(subsystem="block")
    ):
        if dev.get("DEVTYPE") == "disk":
            mapping[dev.sys_name] = dev.get("ID_BUS")
        elif dev.get("ID_PART_ENTRY_UUID"):
            parent = dev.find_parent("block")
            mapping[dev.sys_name] = parent.get("ID_BUS")
            mapping[os.path.join("disk/by-partuuid", dev.get("ID_PART_ENTRY_UUID"))] = parent.get("ID_BUS")

    has_usb = False
    for dev in disks:
        if mapping.get(dev) == "usb":
            has_usb = True
            break

    zfs_config_path = os.path.join(root, "etc/default/zfs")
    with open(zfs_config_path) as f:
        original_config = f.read()
        lines = original_config.rstrip().split("\n")

    zfs_var_name = "ZFS_INITRD_POST_MODPROBE_SLEEP"
    lines = [line for line in lines if not line.startswith(f"{zfs_var_name}=")]
    if has_usb:
        lines.append(f"{zfs_var_name}=15")

    new_config = "\n".join(lines) + "\n"
    if new_config != original_config:
        with open(zfs_config_path, "w") as f:
            f.write(new_config)
        return True
    return False
Exemplo n.º 4
0
    async def start(self, job):
        """
        Refuse to start service if the service is alreading in process of starting or stopping.
        If state is 'HEALTHY' or 'FAULTED', then stop the service first before restarting it to ensure
        that the service begins in a clean state.
        """
        ldap_state = await self.middleware.call('ldap.get_state')
        if ldap_state in ['LEAVING', 'JOINING']:
            raise CallError(f'LDAP state is [{ldap_state}]. Please wait until directory service operation completes.', errno.EBUSY)

        job.set_progress(0, 'Preparing to configure LDAP directory service.')
        ldap = await self.direct_update({"enable": True})
        if ldap['kerberos_realm']:
            job.set_progress(5, 'Starting kerberos')
            await self.middleware.call('kerberos.start')

        job.set_progress(15, 'Generating configuration files')
        await self.middleware.call('etc.generate', 'rc')
        await self.middleware.call('etc.generate', 'ldap')
        await self.middleware.call('etc.generate', 'pam')

        job.set_progress(30, 'Starting nslcd service')
        if not await self.nslcd_status():
            await self.nslcd_cmd('start')
        else:
            await self.nslcd_cmd('restart')

        job.set_progress(50, 'Reconfiguring SMB service')
        await self.middleware.call('smb.initialize_globals')
        await self.synchronize()
        job.set_progress(60, 'Reconfiguring idmap service')
        await self.middleware.call('idmap.synchronize')

        if ldap['has_samba_schema']:
            job.set_progress(70, 'Storing LDAP password for SMB configuration')
            await self.middleware.call('smb.store_ldap_admin_password')

        await self._service_change('cifs', 'restart')
        await self.set_state(DSStatus['HEALTHY'])
        job.set_progress(80, 'Reloading directory service cache.')
        await self.middleware.call('service.start', 'dscache')
        ha_mode = await self.middleware.call('smb.get_smb_ha_mode')
        if ha_mode == 'CLUSTERED':
            job.set_progress(90, 'Reloading LDAP service on other cluster nodes')
            cl_job = await self.middleware.call('clusterjob.submit', 'ldap.cluster_reload', 'START')
            await cl_job.wait(raise_error=True)

        job.set_progress(100, 'LDAP directory service started.')
Exemplo n.º 5
0
 async def __ypwhich(self):
     """
     The return code from ypwhich is not a reliable health indicator. For example, RPC failure will return 0.
     There are edge cases where ypwhich can hang when NIS is misconfigured.
     """
     nis = await self.config()
     ypwhich = await run(['/usr/bin/ypwhich'], check=False)
     if ypwhich.returncode != 0:
         if nis['enable']:
             await self.__set_state(DSStatus['FAULTED'])
             self.logger.debug(f'NIS status check returned [{ypwhich.stderr.decode().strip()}]. Setting state to FAULTED.')
         return False
     if ypwhich.stderr:
         await self.__set_state(DSStatus['FAULTED'])
         raise CallError(f'NIS status check returned [{ypwhich.stderr.decode().strip()}]. Setting state to FAULTED.')
     return True
Exemplo n.º 6
0
    async def wrap(self, subjob):
        """
        Wrap a job in another job, proxying progress and result/error.
        This is useful when we want to run a job inside a job.

        :param subjob: The job to wrap.
        """
        while not subjob.time_finished:
            try:
                await subjob.wait(1)
            except asyncio.TimeoutError:
                pass
            self.set_progress(**subjob.progress)
        if subjob.exception:
            raise CallError(subjob.exception)
        return subjob.result
Exemplo n.º 7
0
 async def _sharesec(self, **kwargs):
     """
     wrapper for sharesec(1). This manipulates share permissions on SMB file shares.
     The permissions are stored in share_info.tdb, and apply to the share as a whole.
     This is in contrast with filesystem permissions, which define the permissions for a file
     or directory, and in the latter case may also define permissions inheritance rules
     for newly created files in the directory. The SMB Share ACL only affects access through
     the SMB protocol.
     """
     action = kwargs.get('action')
     share = kwargs.get('share', '')
     args = kwargs.get('args', '')
     sharesec = await run([SMBCmd.SHARESEC.value, share, action, args], check=False)
     if sharesec.returncode != 0:
         raise CallError(f'sharesec {action} failed with error: {sharesec.stderr.decode()}')
     return sharesec.stdout.decode()
Exemplo n.º 8
0
    def wait_sync(self, raise_error=False):
        """
        Synchronous method to wait for a job in another thread.
        """
        fut = asyncio.run_coroutine_threadsafe(self._finished.wait(), self.loop)
        event = threading.Event()

        def done(_):
            event.set()

        fut.add_done_callback(done)
        event.wait()
        if raise_error:
            if self.error:
                raise CallError(self.error)
        return self.result
Exemplo n.º 9
0
    async def groupmap_listmem(self, sid):
        payload = json.dumps({"alias": sid})
        lm = await run([
            SMBCmd.NET.value, "--json", "groupmap", "listmem", payload
        ], check=False)

        # Command will return ENOENT when fails with STATUS_NO_SUCH_ALIAS
        if lm.returncode == 2:
            return []
        elif lm.returncode != 0:
            raise CallError(f"Failed to list membership of alias [{sid}]: "
                            f"{lm.stderr.decode()}")

        output = json.loads(lm.stdout.decode())
        await self.json_check_version(output['version'])

        return [x["sid"] for x in output['members']]
Exemplo n.º 10
0
    async def _service_change(self, service, verb):

        svc_state = (await self.middleware.call(
            'service.query',
            [('service', '=', service)],
            {'get': True}
        ))['state'].lower()

        if svc_state == 'running':
            started = await self.middleware.call(f'service.{verb}', service, {'onetime': True})

            if not started:
                raise CallError(
                    f'The {service} service failed to start',
                    CallError.ESERVICESTARTFAILURE,
                    [service],
                )
Exemplo n.º 11
0
    async def started(self):
        ldap = await self.config()
        if not ldap['enable']:
            return False

        try:
            ret = await asyncio.wait_for(self.middleware.call('ldap.get_root_DSE', ldap),
                                         timeout=ldap['timeout'])
        except asyncio.TimeoutError:
            raise CallError(f'LDAP status check timed out after {ldap["timeout"]} seconds.', errno.ETIMEDOUT)

        if ret:
            await self.__set_state(DSStatus['HEALTHY'])
        else:
            await self.__set_state(DSStatus['FAULTED'])

        return True if ret else False
Exemplo n.º 12
0
    async def diff_middleware_and_registry(self, share, data):
        if share is None:
            raise CallError('Share name must be specified.')

        if data is None:
            data = await self.middleware.call('sharing.smb.query', [('name', '=', share)], {'get': True})

        share_conf = await self.share_to_smbconf(data)
        reg_conf = await self.reg_showshare(share if not data['home'] else 'homes')
        s_keys = set(share_conf.keys())
        r_keys = set(reg_conf.keys())
        intersect = s_keys.intersection(r_keys)
        return {
            'added': {x: share_conf[x] for x in s_keys - r_keys},
            'removed': {x: reg_conf[x] for x in r_keys - s_keys},
            'modified': {x: (share_conf[x], reg_conf[x]) for x in intersect if share_conf[x] != reg_conf[x]},
        }
Exemplo n.º 13
0
    async def net_keytab_add_update_ads(self, service_class):
        if not (await self.middleware.call('nfs.config'))['v4_krb']:
            return False

        cmd = [
            SMBCmd.NET.value, '--use-kerberos', 'required',
            '--use-krb5-ccache', krb5ccache.SYSTEM.value, 'ads', 'keytab',
            'add_update_ads', service_class
        ]

        netads = await run(cmd, check=False)
        if netads.returncode != 0:
            raise CallError(
                'failed to set spn entry '
                f'[{service_class}]: {netads.stdout.decode().strip()}')

        return True
Exemplo n.º 14
0
    async def validate_admin_groups(self, sid):
        """
        Check if group mapping already exists because 'net groupmap addmem' will fail
        if the mapping exists. Remove any entries that should not be present. Extra
        entries here can pose a significant security risk. The only default entry will
        have a RID value of "512" (Domain Admins).
        In LDAP environments, members of S-1-5-32-544 cannot be removed without impacting
        the entire LDAP environment because this alias exists on the remote LDAP server.
        """
        sid_is_present = False
        if await self.middleware.call('ldap.get_state') != 'DISABLED':
            self.logger.debug(
                "As a safety precaution, extra alias entries for S-1-5-32-544"
                "cannot be removed while LDAP is enabled. Skipping removal.")
            return True
        listmem = await run(
            [SMBCmd.NET.value, 'groupmap', 'listmem', 'S-1-5-32-544'],
            check=False)
        member_list = listmem.stdout.decode()
        if not member_list:
            return True

        for group in member_list.splitlines():
            group = group.strip()
            if group == sid:
                self.logger.debug(
                    "SID [%s] is already a member of BUILTIN\\administrators",
                    sid)
                sid_is_present = True
            if group.rsplit('-', 1)[-1] != "512" and group != sid:
                self.logger.debug(f"Removing {group} from local admins group.")
                rem = await run([
                    SMBCmd.NET.value, 'groupmap', 'delmem', 'S-1-5-32-544',
                    group
                ],
                                check=False)
                if rem.returncode != 0:
                    raise CallError(
                        f'Failed to remove sid [{sid}] from S-1-5-32-544: {rem.stderr.decode()}'
                    )

        if sid_is_present:
            return False
        else:
            return True
Exemplo n.º 15
0
    def getparm(self, parm, section):
        """
        Get a parameter from the smb4.conf file. This is more reliable than
        'testparm --parameter-name'. testparm will fail in a variety of
        conditions without returning the parameter's value.
        """
        try:
            if section.upper() == 'GLOBAL':
                return param.LoadParm(SMBPath.GLOBALCONF.platform()).get(
                    parm, section)
            else:
                return self.middleware.call_sync('sharing.smb.reg_getparm',
                                                 section, parm)

        except Exception as e:
            raise CallError(
                f'Attempt to query smb4.conf parameter [{parm}] failed with error: {e}'
            )
Exemplo n.º 16
0
    def port_is_listening(self, host, port, timeout=1):
        ret = False

        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        if timeout:
            s.settimeout(timeout)

        try:
            s.connect((host, port))
            ret = True

        except Exception as e:
            raise CallError(e)

        finally:
            s.close()

        return ret
Exemplo n.º 17
0
    async def groupmap_delete(self, ntgroup=None, sid=None):
        if not ntgroup and not sid:
            raise CallError("ntgroup or sid is required")

        if ntgroup:
            target = f"ntgroup={ntgroup}"
        elif sid:
            target = f"sid={sid}"

        gm_delete = await run(
            [SMBCmd.NET.value, '-d'
             '0', 'groupmap', 'delete', target],
            check=False)

        if gm_delete.returncode != 0:
            self.logger.debug(
                f'Failed to delete groupmap for [{target}]: ({gm_delete.stderr.decode()})'
            )
Exemplo n.º 18
0
    def export(self, identifier, starttime, endtime, aggregate=True):
        for rrd_file in self.get_rrd_files(identifier):
            cp = subprocess.run([
                'rrdtool',
                'info',
                '--daemon', 'unix:/var/run/rrdcached.sock',
                rrd_file,
            ], capture_output=True, encoding='utf-8')

            if m := RE_LAST_UPDATE.search(cp.stdout):
                last_update = int(m.group(1))
                now = time.time()
                if last_update > now + 1800:  # Tolerance for small system time adjustments
                    raise CallError(
                        f"RRD file {os.path.relpath(rrd_file, self._base_path)} has update time in the future. "
                        f"Data collection will be paused for {humanfriendly.format_timespan(last_update - now)}.",
                        ErrnoMixin.EINVALIDRRDTIMESTAMP,
                    )
Exemplo n.º 19
0
    async def retrieve(self, ds, data, options):
        who_str = data.get('who')
        who_id = data.get('id')
        if who_str is None and who_id is None:
            raise CallError("`who` or `id` entry is required to uniquely "
                            "identify the entry to be retrieved.")

        tdb_name = f'{ds.lower()}_{data["idtype"].lower()}'
        prefix = "NAME" if who_str else "ID"
        tdb_key = f'{prefix}_{who_str if who_str else who_id}'

        try:
            entry = await self.middleware.call("tdb.fetch", {
                "name": tdb_name,
                "key": tdb_key
            })
        except MatchNotFound:
            entry = None

        if not entry and options['synthesize']:
            """
            if cache lacks entry, create one from passwd / grp info,
            insert into cache and return synthesized value.
            get_uncached_* will raise KeyError if NSS lookup fails.
            """
            try:
                if data['idtype'] == 'USER':
                    pwdobj = await self.middleware.call(
                        'dscache.get_uncached_user', who_str, who_id)
                    entry = await self.middleware.call('idmap.synthetic_user',
                                                       ds.lower(), pwdobj)
                else:
                    grpobj = await self.middleware.call(
                        'dscache.get_uncached_group', who_str, who_id)
                    entry = await self.middleware.call('idmap.synthetic_group',
                                                       ds.lower(), grpobj)
                await self.insert(ds, data['idtype'], entry)
            except KeyError:
                entry = None

        elif not entry:
            raise KeyError(who_str if who_str else who_id)

        return entry
Exemplo n.º 20
0
    def do_update(self, jail, options):
        """Sets a jail property."""
        plugin = options.pop("plugin")
        _, _, iocage = self.check_jail_existence(jail)

        name = options.pop("name", None)

        for prop, val in options.items():
            p = f"{prop}={val}"

            try:
                iocage.set(p, plugin)
            except RuntimeError as err:
                raise CallError(err)

        if name:
            iocage.rename(name)

        return True
Exemplo n.º 21
0
    async def _update_service(self, old, new):
        await self.middleware.call(
            'datastore.update',
            f'services.{self._config.service_model or self._config.service}',
            old['id'], new, {'prefix': self._config.datastore_prefix})

        enabled = (await self.middleware.call(
            'datastore.query', 'services.services',
            [('srv_service', '=', self._config.service)],
            {'get': True}))['srv_enable']

        started = await self.middleware.call(
            f'service.{self._config.service_verb}', self._config.service,
            {'onetime': False})

        if enabled and not started:
            raise CallError(
                f'The {self._config.service} service failed to start',
                CallError.ESERVICESTARTFAILURE)
Exemplo n.º 22
0
    async def process_queue(self, job):
        gl_enabled = (await
                      self.middleware.call('service.query',
                                           [('service', '=', 'glusterd')],
                                           {'get': True}))['enable']
        if not gl_enabled:
            return

        node = (await self.middleware.call('ctdb.general.status',
                                           {'all_nodes': False}))[0]
        if node['flags_str'] != 'OK':
            CallError(
                f'Cannot reload directory service. Node health: {node["flags_str"]}'
            )

        job_list = await self.list()
        for idx, entry in enumerate(job_list.get(node["pnn"], [])):
            p = (100 / len(job_list) * idx)
            job.set_progress(
                p, f'Processing queued job for [{entry["method"]}].')
            if entry['status'] == CLStatus.EXPIRED.name:
                continue

            await self.update_status(entry['key'], CLStatus.RUNNING.name,
                                     entry['timeout'])
            try:
                if entry['payload']:
                    rv = await self.middleware.call(entry['method'],
                                                    entry['payload'])
                else:
                    rv = await self.middleware.call(entry['method'])
            except Exception:
                self.logger.warning(
                    "Cluster cached job for method [%s] failed.",
                    entry['method'],
                    exc_info=True)

            if entry['job']:
                rv = await rv.wait()

            await self.middleware.call('clustercache.pop', entry['key'])

        job.set_progress(100, 'Finished processing queue.')
Exemplo n.º 23
0
    async def groupmap_add(self, group, passdb_backend=None):
        """
        Map Unix group to NT group. This is required for group members to be
        able to access the SMB share. Name collisions with well-known and
        builtin groups must be avoided. Mapping groups with the same
        names as users should also be avoided.
        """
        if passdb_backend is None:
            passdb_backend = await self.middleware.call(
                'smb.getparm', 'passdb backend', 'global')

        if passdb_backend != 'tdbsam':
            return

        if group in SMBBuiltin.unix_groups():
            return await self.add_builtin_group(group)

        disallowed_list = ['USERS', 'ADMINISTRATORS', 'GUESTS']
        existing_groupmap = await self.groupmap_list()

        if existing_groupmap.get(group):
            self.logger.debug(
                'Setting group map for %s is not permitted. '
                'Entry already exists.', group)
            return False

        if group.upper() in disallowed_list:
            self.logger.debug(
                'Setting group map for %s is not permitted. '
                'Entry mirrors existing builtin groupmap.', group)
            return False

        next_rid = str(await self.middleware.call("smb.get_next_rid"))
        gm_add = await run([
            SMBCmd.NET.value, '-d', '0', 'groupmap', 'add', 'type=local',
            f'rid={next_rid}', f'unixgroup={group}', f'ntgroup={group}'
        ],
                           check=False)
        if gm_add.returncode != 0:
            raise CallError(
                f'Failed to generate groupmap for [{group}]: ({gm_add.stderr.decode()})'
            )
Exemplo n.º 24
0
    async def do_update(self, id, data):
        """
        Update AFP share `id`.
        """
        verrors = ValidationErrors()
        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })
        path = data.get('path')

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingafp_update', verrors, id=id)
        await self.validate(new, 'sharingafp_update', verrors, old=old)

        if path:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   'sharingafp_create.path',
                                                   path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)

        await self._service_change('afp', 'reload')

        return new
Exemplo n.º 25
0
 async def change_trust_account_pw(self):
     """
     Force an update of the AD machine account password. This can be used to
     refresh the Kerberos principals in the server's system keytab.
     """
     await self.middleware.call("kerberos.check_ticket")
     workgroup = (await self.middleware.call('smb.config'))['workgroup']
     cmd = [
         SMBCmd.NET.value,
         '--use-kerberos', 'required',
         '--use-krb5-ccache', krb5ccache.SYSTEM.value,
         '-w', workgroup,
         'ads', 'changetrustpw',
     ]
     netads = await run(cmd, check=False)
     if netads.returncode != 0:
         raise CallError(
             f"Failed to update trust password: [{netads.stderr.decode().strip()}] "
             f"stdout: [{netads.stdout.decode().strip()}] "
         )
Exemplo n.º 26
0
    async def add_nfs_spn(self,
                          job,
                          netbiosname,
                          domain,
                          check_health=True,
                          update_keytab=False):
        if check_health:
            ad_state = await self.middleware.call('activedirectory.get_state')
            if ad_state != DSStatus.HEALTHY.name:
                raise CallError(
                    "Service Principal Names that are registered in Active Directory "
                    "may only be manipulated when the Active Directory Service is Healthy. "
                    f"Current state is: {ad_state}")

        ok = await self.net_keytab_add_update_ads('nfs')
        if not ok:
            return False

        await self.middleware.call('kerberos.keytab.store_samba_keytab')
        return True
Exemplo n.º 27
0
        def progress_callback(content):
            level = content['level']
            msg = content['message'].strip('\n')

            if job.progress['percent'] == 90:
                for split_msg in msg.split('\n'):
                    fetch_output['install_notes'].append(split_msg)

            if level == 'EXCEPTION':
                fetch_output['error'] = True
                raise CallError(msg)

            job.set_progress(None, msg)

            if '  These pkgs will be installed:' in msg:
                job.set_progress(50, msg)
            elif 'Installing plugin packages:' in msg:
                job.set_progress(75, msg)
            elif 'Command output:' in msg:
                job.set_progress(90, msg)
Exemplo n.º 28
0
Arquivo: smb.py Projeto: tejp/freenas
    async def do_update(self, id, data):
        """
        Update SMB Share of `id`.
        """
        verrors = ValidationErrors()
        path = data.get('path')
        default_perms = data.pop('default_permissions', False)

        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })

        new = old.copy()
        new.update(data)

        new['vuid'] = await self.generate_vuid(new['timemachine'], new['vuid'])
        await self.clean(new, 'sharingsmb_update', verrors, id=id)
        await self.validate(new, 'sharingsmb_update', verrors, old=old)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)  # same here ?

        await self._service_change('cifs', 'reload')
        await self.apply_default_perms(default_perms, path, data['home'])

        return new
Exemplo n.º 29
0
    async def validate_admin_groups(self, sid):
        """
        Check if group mapping already exists because 'net groupmap addmem' will fail
        if the mapping exists. Remove any entries that should not be present. Extra
        entries here can pose a significant security risk. The only default entry will
        have a RID value of "512" (Domain Admins).
        In LDAP environments, members of S-1-5-32-544 cannot be removed without impacting
        the entire LDAP environment because this alias exists on the remote LDAP server.
        """
        sid_is_present = False
        ldap = await self.middleware.call('datastore.config', 'directoryservice.ldap')
        if ldap['ldap_enable']:
            self.logger.debug("As a safety precaution, extra alias entries for S-1-5-32-544 cannot be removed while LDAP is enabled. Skipping removal.")
            return True
        proc = await Popen(
            ['/usr/local/bin/net', 'groupmap', 'listmem', 'S-1-5-32-544'],
            stdout=subprocess.PIPE, stderr=subprocess.PIPE
        )
        member_list = (await proc.communicate())[0].decode()
        if not member_list:
            return True

        for group in member_list.splitlines():
            group = group.strip()
            if group == sid:
                self.logger.debug(f"SID [{sid}] is already a member of BUILTIN\\administrators")
                sid_is_present = True
            if group.rsplit('-', 1)[-1] != "512" and group != sid:
                self.logger.debug(f"Removing {group} from local admins group.")
                rem = await Popen(
                    ['/usr/local/bin/net', 'groupmap', 'delmem', 'S-1-5-32-544', group],
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE
                )
                remout = await rem.communicate()
                if rem.returncode != 0:
                    raise CallError(f'Failed to remove sid [{sid}] from S-1-5-32-544: {remout[1].decode()}')

        if sid_is_present:
            return False
        else:
            return True
Exemplo n.º 30
0
    async def do_update(self, id, data):
        verrors = ValidationErrors()
        path = data['path']
        default_perms = data.pop('default_permissions', False)

        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingcifs_update', verrors, id=id)
        await self.validate(new, 'sharingcifs_update', verrors, old=old)

        await check_path_resides_within_volume(verrors, self.middleware,
                                               "sharingcifs_update.path", path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.set_storage_tasks(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)

        await self.middleware.call('service.reload', 'cifs')
        await self.apply_default_perms(default_perms, path)

        return new