Beispiel #1
0
    async def do_update(self, id, data):
        """
        Update AFP share `id`.
        """
        verrors = ValidationErrors()
        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })
        path = data.get('path')

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingafp_update', verrors, id=id)
        await self.validate(new, 'sharingafp_update', verrors, old=old)

        if path:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   'sharingafp_create.path',
                                                   path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)

        await self._service_change('afp', 'reload')

        return new
Beispiel #2
0
    async def groupmap_add(self, group, passdb_backend=None):
        """
        Map Unix group to NT group. This is required for group members to be
        able to access the SMB share. Name collisions with well-known and
        builtin groups must be avoided. Mapping groups with the same
        names as users should also be avoided.
        """
        if passdb_backend is None:
            passdb_backend = await self.middleware.call(
                'smb.getparm', 'passdb backend', 'global')

        if passdb_backend != 'tdbsam':
            return

        if group in SMBBuiltin.unix_groups():
            return await self.add_builtin_group(group)

        disallowed_list = ['USERS', 'ADMINISTRATORS', 'GUESTS']
        existing_groupmap = await self.groupmap_list()

        if existing_groupmap.get(group):
            self.logger.debug(
                'Setting group map for %s is not permitted. '
                'Entry already exists.', group)
            return False

        if group.upper() in disallowed_list:
            self.logger.debug(
                'Setting group map for %s is not permitted. '
                'Entry mirrors existing builtin groupmap.', group)
            return False

        next_rid = str(await self.middleware.call("smb.get_next_rid"))
        gm_add = await run([
            SMBCmd.NET.value, '-d', '0', 'groupmap', 'add', 'type=local',
            f'rid={next_rid}', f'unixgroup={group}', f'ntgroup={group}'
        ],
                           check=False)
        if gm_add.returncode != 0:
            raise CallError(
                f'Failed to generate groupmap for [{group}]: ({gm_add.stderr.decode()})'
            )
Beispiel #3
0
        def progress_callback(content):
            level = content['level']
            msg = content['message'].strip('\n')

            if job.progress['percent'] == 90:
                for split_msg in msg.split('\n'):
                    fetch_output['install_notes'].append(split_msg)

            if level == 'EXCEPTION':
                fetch_output['error'] = True
                raise CallError(msg)

            job.set_progress(None, msg)

            if '  These pkgs will be installed:' in msg:
                job.set_progress(50, msg)
            elif 'Installing plugin packages:' in msg:
                job.set_progress(75, msg)
            elif 'Command output:' in msg:
                job.set_progress(90, msg)
Beispiel #4
0
    async def add_nfs_spn(self,
                          job,
                          netbiosname,
                          domain,
                          check_health=True,
                          update_keytab=False):
        if check_health:
            ad_state = await self.middleware.call('activedirectory.get_state')
            if ad_state != DSStatus.HEALTHY.name:
                raise CallError(
                    "Service Principal Names that are registered in Active Directory "
                    "may only be manipulated when the Active Directory Service is Healthy. "
                    f"Current state is: {ad_state}")

        ok = await self.net_keytab_add_update_ads('nfs')
        if not ok:
            return False

        await self.middleware.call('kerberos.keytab.store_samba_keytab')
        return True
Beispiel #5
0
    def wait_sync(self, raise_error=False):
        """
        Synchronous method to wait for a job in another thread.
        """
        fut = asyncio.run_coroutine_threadsafe(self._finished.wait(),
                                               self.loop)
        event = threading.Event()

        def done(_):
            event.set()

        fut.add_done_callback(done)
        event.wait()
        if raise_error:
            if self.error:
                if isinstance(self.exc_info[1], CallError):
                    raise self.exc_info[1]

                raise CallError(self.error)
        return self.result
Beispiel #6
0
 async def __ypwhich(self):
     """
     The return code from ypwhich is not a reliable health indicator. For example, RPC failure will return 0.
     There are edge cases where ypwhich can hang when NIS is misconfigured.
     """
     nis = await self.config()
     ypwhich = await run(['/usr/bin/ypwhich'], check=False)
     if ypwhich.returncode != 0:
         if nis['enable']:
             await self.__set_state(DSStatus['FAULTED'])
             self.logger.debug(
                 f'NIS status check returned [{ypwhich.stderr.decode().strip()}]. Setting state to FAULTED.'
             )
         return False
     if ypwhich.stderr:
         await self.__set_state(DSStatus['FAULTED'])
         raise CallError(
             f'NIS status check returned [{ypwhich.stderr.decode().strip()}]. Setting state to FAULTED.'
         )
     return True
Beispiel #7
0
    async def started(self):
        ldap = await self.config()
        if not ldap['enable']:
            return False

        try:
            ret = await asyncio.wait_for(self.middleware.call(
                'ldap.get_root_DSE', ldap),
                                         timeout=ldap['timeout'])
        except asyncio.TimeoutError:
            raise CallError(
                f'LDAP status check timed out after {ldap["timeout"]} seconds.',
                errno.ETIMEDOUT)

        if ret:
            await self.__set_state(DSStatus['HEALTHY'])
        else:
            await self.__set_state(DSStatus['FAULTED'])

        return True if ret else False
Beispiel #8
0
 async def change_trust_account_pw(self):
     """
     Force an update of the AD machine account password. This can be used to
     refresh the Kerberos principals in the server's system keytab.
     """
     await self.middleware.call("kerberos.check_ticket")
     workgroup = (await self.middleware.call('smb.config'))['workgroup']
     cmd = [
         SMBCmd.NET.value,
         '--use-kerberos', 'required',
         '--use-krb5-ccache', krb5ccache.SYSTEM.value,
         '-w', workgroup,
         'ads', 'changetrustpw',
     ]
     netads = await run(cmd, check=False)
     if netads.returncode != 0:
         raise CallError(
             f"Failed to update trust password: [{netads.stderr.decode().strip()}] "
             f"stdout: [{netads.stdout.decode().strip()}] "
         )
Beispiel #9
0
    async def retrieve(self, ds, data, options):
        who_str = data.get('who')
        who_id = data.get('id')
        if who_str is None and who_id is None:
            raise CallError("`who` or `id` entry is required to uniquely "
                            "identify the entry to be retrieved.")

        tdb_name = f'{ds.lower()}_{data["idtype"].lower()}'
        prefix = "NAME" if who_str else "ID"
        tdb_key = f'{prefix}_{who_str if who_str else who_id}'

        try:
            entry = await self.middleware.call("tdb.fetch", {"name": tdb_name, "key": tdb_key})
        except MatchNotFound:
            entry = None

        if not entry and options['synthesize']:
            """
            if cache lacks entry, create one from passwd / grp info,
            insert into cache and return synthesized value.
            get_uncached_* will raise KeyError if NSS lookup fails.
            """
            try:
                if data['idtype'] == 'USER':
                    pwdobj = await self.middleware.call('dscache.get_uncached_user',
                                                        who_str, who_id)
                    entry = await self.middleware.call('idmap.synthetic_user',
                                                       ds.lower(), pwdobj)
                else:
                    grpobj = await self.middleware.call('dscache.get_uncached_group',
                                                        who_str, who_id)
                    entry = await self.middleware.call('idmap.synthetic_group',
                                                       ds.lower(), grpobj)
                await self.insert(ds, data['idtype'], entry)
            except KeyError:
                entry = None

        elif not entry:
            raise KeyError(who_str if who_str else who_id)

        return entry
Beispiel #10
0
    async def do_update(self, id, data):
        verrors = ValidationErrors()
        path = data['path']
        default_perms = data.pop('default_permissions', False)

        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingcifs_update', verrors, id=id)
        await self.validate(new, 'sharingcifs_update', verrors, old=old)

        await check_path_resides_within_volume(verrors, self.middleware,
                                               "sharingcifs_update.path", path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.set_storage_tasks(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)

        await self.middleware.call('service.reload', 'cifs')
        await self.apply_default_perms(default_perms, path)

        return new
Beispiel #11
0
    async def process_queue(self, job):
        if not (await self.middleware.call('service.query', [
            ('service', '=', 'glusterd')
        ], {'get': True}))['enable']:
            return

        node = (await self.middleware.call('ctdb.general.status',
                                           {'all_nodes': False}))[0]
        if node['flags_str'] != 'OK':
            CallError(
                f'Cannot reload directory service. Node health: {node["flags_str"]}'
            )

        job_list = await self.list()
        for idx, entry in enumerate(job_list.get(node["pnn"], [])):
            p = (100 / len(job_list) * idx)
            job.set_progress(
                p, f'Processing queued job for [{entry["method"]}].')
            if entry['status'] == CLStatus.EXPIRED.name:
                continue

            await self.update_status(entry['key'], CLStatus.RUNNING.name,
                                     entry['timeout'])
            try:
                if entry['payload']:
                    rv = await self.middleware.call(entry['method'],
                                                    entry['payload'])
                else:
                    rv = await self.middleware.call(entry['method'])
            except Exception:
                self.logger.warning(
                    "Cluster cached job for method [%s] failed.",
                    entry['method'],
                    exc_info=True)

            if entry['job']:
                rv = await rv.wait()

            await self.middleware.call('clustercache.pop', entry['key'])

        job.set_progress(100, 'Finished processing queue.')
Beispiel #12
0
    async def do_update(self, id, data):
        """
        Update SMB Share of `id`.
        """
        verrors = ValidationErrors()
        path = data.get('path')
        default_perms = data.pop('default_permissions', False)

        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })

        new = old.copy()
        new.update(data)

        new['vuid'] = await self.generate_vuid(new['timemachine'], new['vuid'])
        await self.clean(new, 'sharingsmb_update', verrors, id=id)
        await self.validate(new, 'sharingsmb_update', verrors, old=old)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)  # same here ?

        await self._service_change('cifs', 'reload')
        await self.apply_default_perms(default_perms, path, data['home'])

        return new
Beispiel #13
0
    async def validate_admin_groups(self, sid):
        """
        Check if group mapping already exists because 'net groupmap addmem' will fail
        if the mapping exists. Remove any entries that should not be present. Extra
        entries here can pose a significant security risk. The only default entry will
        have a RID value of "512" (Domain Admins).
        In LDAP environments, members of S-1-5-32-544 cannot be removed without impacting
        the entire LDAP environment because this alias exists on the remote LDAP server.
        """
        sid_is_present = False
        ldap = await self.middleware.call('datastore.config', 'directoryservice.ldap')
        if ldap['ldap_enable']:
            self.logger.debug("As a safety precaution, extra alias entries for S-1-5-32-544 cannot be removed while LDAP is enabled. Skipping removal.")
            return True
        proc = await Popen(
            ['/usr/local/bin/net', 'groupmap', 'listmem', 'S-1-5-32-544'],
            stdout=subprocess.PIPE, stderr=subprocess.PIPE
        )
        member_list = (await proc.communicate())[0].decode()
        if not member_list:
            return True

        for group in member_list.splitlines():
            group = group.strip()
            if group == sid:
                self.logger.debug(f"SID [{sid}] is already a member of BUILTIN\\administrators")
                sid_is_present = True
            if group.rsplit('-', 1)[-1] != "512" and group != sid:
                self.logger.debug(f"Removing {group} from local admins group.")
                rem = await Popen(
                    ['/usr/local/bin/net', 'groupmap', 'delmem', 'S-1-5-32-544', group],
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE
                )
                remout = await rem.communicate()
                if rem.returncode != 0:
                    raise CallError(f'Failed to remove sid [{sid}] from S-1-5-32-544: {remout[1].decode()}')

        if sid_is_present:
            return False
        else:
            return True
Beispiel #14
0
    async def groupmap_delete(self, data):
        ntgroup = data.get("ntgroup")
        sid = data.get("sid")
        if not ntgroup and not sid:
            raise CallError("ntgroup or sid is required")

        if ntgroup:
            target = f"ntgroup={ntgroup}"
        elif sid:
            if sid.startswith("S-1-5-32"):
                self.logger.debug("Refusing to delete group mapping for BUILTIN group: %s", sid)
                return

            target = f"sid={sid}"

        gm_delete = await run(
            [SMBCmd.NET.value, '-d' '0', 'groupmap', 'delete', target], check=False
        )

        if gm_delete.returncode != 0:
            self.logger.debug(f'Failed to delete groupmap for [{target}]: ({gm_delete.stderr.decode()})')
Beispiel #15
0
    async def _service_change(self, service, verb):

        svc_state = (await
                     self.middleware.call('service.query',
                                          [('service', '=', service)],
                                          {'get': True}))['state'].lower()

        # For now its hard to keep track of which services change rc.conf.
        # To be safe run this every time any service is updated.
        # This adds up ~180ms so its seems a reasonable workaround for the time being.
        await self.middleware.call('etc.generate', 'rc')

        if svc_state == 'running':
            started = await self.middleware.call(f'service.{verb}', service)

            if not started:
                raise CallError(
                    f'The {service} service failed to start',
                    CallError.ESERVICESTARTFAILURE,
                    [service],
                )
Beispiel #16
0
    def export(self, identifier, starttime, endtime, aggregate=True):
        for rrd_file in self.get_rrd_files(identifier):
            cp = subprocess.run([
                'rrdtool',
                'info',
                '--daemon',
                'unix:/var/run/rrdcached.sock',
                rrd_file,
            ],
                                capture_output=True,
                                encoding='utf-8')

            if m := RE_LAST_UPDATE.search(cp.stdout):
                last_update = int(m.group(1))
                now = time.time()
                if last_update > now + 1800:  # Tolerance for small system time adjustments
                    raise CallError(
                        f"RRD file {os.path.relpath(rrd_file, self._base_path)} has update time in the future. "
                        f"Data collection will be paused for {humanfriendly.format_timespan(last_update - now)}.",
                        ErrnoMixin.EINVALIDRRDTIMESTAMP,
                    )
Beispiel #17
0
    async def start(self):
        """
        Refuse to start service if the service is alreading in process of starting or stopping.
        If state is 'HEALTHY' or 'FAULTED', then stop the service first before restarting it to ensure
        that the service begins in a clean state.
        """
        ldap_state = await self.middleware.call('ldap.get_state')
        if ldap_state in ['LEAVING', 'JOINING']:
            raise CallError(
                f'LDAP state is [{ldap_state}]. Please wait until directory service operation completes.',
                errno.EBUSY)

        ldap = await super().do_update({"enable": True})
        if ldap['kerberos_realm']:
            await self.middleware.call('kerberos.start')

        await self.middleware.call('etc.generate', 'rc')
        await self.middleware.call('etc.generate', 'nss')
        await self.middleware.call('etc.generate', 'ldap')
        await self.middleware.call('etc.generate', 'pam')

        if not await self.nslcd_status():
            await self.nslcd_cmd('start')
        else:
            await self.nslcd_cmd('restart')

        if ldap['has_samba_schema']:
            await self.middleware.call('smb.initialize_globals')
            await self.synchronize()
            await self.middleware.call('idmap.synchronize')
            await self.middleware.call('smb.store_ldap_admin_password')
            await self.middleware.call('idmap.synchronize')
            await self.middleware.call('service.restart', 'cifs')

        await self.set_state(DSStatus['HEALTHY'])
        await self.middleware.call('service.start', 'dscache')
        ha_mode = await self.middleware.call('smb.get_smb_ha_mode')
        if ha_mode == 'CLUSTERED':
            await self.middleware.call('clusterjob.submit',
                                       'ldap.cluster_reload')
Beispiel #18
0
    async def net_ads_setspn(self, spn_list):
        """
        Only automatically add NFS SPN entries on domain join
        if kerberized nfsv4 is enabled.
        """
        if not (await self.middleware.call('nfs.config'))['v4_krb']:
            return False

        for spn in spn_list:
            cmd = [
                SMBCmd.NET.value,
                '--use-kerberos', 'required',
                '--use-krb5-ccache', krb5ccache.SYSTEM.value,
                'ads', 'setspn',
                'add', spn,
            ]
            netads = await run(cmd, check=False)
            if netads.returncode != 0:
                raise CallError('failed to set spn entry '
                                f'[{spn}]: {netads.stdout.decode().strip()}')

        return True
Beispiel #19
0
    async def ldap_conf_to_client_config(self, data=None):
        if data is None:
            data = await self.config()

        if not data['enable']:
            raise CallError("LDAP directory service is not enabled.")

        client_config = {
            "uri_list": data["uri_list"],
            "basedn": data.get("basedn", ""),
            "credentials": {
                "binddn": "",
                "bindpw": "",
            },
            "security": {
                "ssl": data["ssl"],
                "sasl": "SEAL",
                "client_certificate": data["cert_name"],
                "validate_certificates": data["validate_certificates"],
            },
            "options": {
                "timeout": data["timeout"],
                "dns_timeout": data["dns_timeout"],
            }
        }
        if data['anonbind']:
            client_config['bind_type'] = 'ANONYMOUS'
        elif data['cert_name']:
            client_config['bind_type'] = 'EXTERNAL'
        elif data['kerberos_realm']:
            client_config['bind_type'] = 'GSSAPI'
        else:
            client_config['bind_type'] = 'PLAIN'
            client_config['credentials'] = {
                'binddn': data['binddn'],
                'bindpw': data['bindpw']
            }

        return client_config
Beispiel #20
0
    async def synchronize_passdb(self):
        """
        Create any missing entries in the passdb.tdb.
        Replace NT hashes of users if they do not match what is the the config file.
        Synchronize the "disabled" state of users
        Delete any entries in the passdb_tdb file that don't exist in the config file.
        """
        if await self.middleware.call('smb.getparm', 'passdb backend',
                                      'global') == 'ldapsam':
            return

        conf_users = await self.middleware.call('user.query', [[
            'OR',
            [
                ('smbhash', '~', r'^.+:.+:[X]{32}:.+$'),
                ('smbhash', '~', r'^.+:.+:[A-F0-9]{32}:.+$'),
            ]
        ]])
        for u in conf_users:
            await self.middleware.call('smb.update_passdb_user', u['username'])

        pdb_users = await self.passdb_list()
        if len(pdb_users) > len(conf_users):
            for entry in pdb_users:
                if not any(
                        filter(lambda x: entry['username'] == x['username'],
                               conf_users)):
                    self.logger.debug(
                        'Synchronizing passdb with config file: deleting user [%s] from passdb.tdb',
                        entry['username'])
                    deluser = await run([
                        SMBCmd.PDBEDIT.value, '-d', '0', '-x',
                        entry['username']
                    ],
                                        check=False)
                    if deluser.returncode != 0:
                        raise CallError(
                            f'Failed to delete user {entry["username"]}: {deluser.stderr.decode()}'
                        )
Beispiel #21
0
    def getparm(self, parm, section):
        """
        Get a parameter from the smb4.conf file. This is more reliable than
        'testparm --parameter-name'. testparm will fail in a variety of
        conditions without returning the parameter's value.
        """
        try:
            if section.upper() == 'GLOBAL':
                try:
                    LP_CTX.load(SMBPath.GLOBALCONF.platform())
                except Exception as e:
                    self.logger.warning("Failed to reload smb.conf: %s", e)

                return LP_CTX.get(parm)
            else:
                return self.middleware.call_sync('sharing.smb.reg_getparm',
                                                 section, parm)

        except Exception as e:
            raise CallError(
                f'Attempt to query smb4.conf parameter [{parm}] failed with error: {e}'
            )
Beispiel #22
0
    async def diff_middleware_and_registry(self, share, data):
        if share is None:
            raise CallError('Share name must be specified.')

        if data is None:
            data = await self.middleware.call('sharing.smb.query', [('name', '=', share)], {'get': True})

        await self.middleware.call('sharing.smb.strip_comments', data)
        share_conf = await self.share_to_smbconf(data)
        try:
            reg_conf = await self.reg_showshare(share if not data['home'] else 'homes')
        except Exception:
            return None

        s_keys = set(share_conf.keys())
        r_keys = set(reg_conf.keys())
        intersect = s_keys.intersection(r_keys)
        return {
            'added': {x: share_conf[x] for x in s_keys - r_keys},
            'removed': {x: reg_conf[x] for x in r_keys - s_keys},
            'modified': {x: (share_conf[x], reg_conf[x]) for x in intersect if share_conf[x] != reg_conf[x]},
        }
Beispiel #23
0
    async def time_info(self, job):
        nodes = await self.middleware.call('ctdb.general.status')
        for node in nodes:
            if not node['flags_str'] == 'OK':
                raise CallError(
                    f'Cluster node {node["pnn"]} is unhealthy. Unable to retrieve time info.'
                )
            if node['this_node']:
                my_node = node['pnn']

        tz = (await self.middleware.call('datastore.config',
                                         'system.settings'))['stg_timezone']

        cl_job = await self.middleware.call('clusterjob.submit',
                                            'cluster.utils.time_callback',
                                            my_node)
        ntp_peer = await self.middleware.call('system.ntpserver.peers',
                                              [('status', '$', 'PEER')])
        my_time = time.clock_gettime(time.CLOCK_REALTIME)
        await cl_job.wait(raise_error=True)

        key_prefix = f'{my_node}_cluster_time_req_'
        responses = []
        for node in nodes:
            if node['this_node']:
                continue

            node_resp = await self.middleware.call(
                'clustercache.pop', f'{key_prefix}{node["pnn"]}')
            responses.append(node_resp)

        responses.append({
            "clock_realtime": my_time,
            "tz": tz,
            "node": my_node,
            "ntp_peer": ntp_peer[0] if ntp_peer else None
        })
        return responses
Beispiel #24
0
    async def start(self):
        """
        Refuse to start service if the service is alreading in process of starting or stopping.
        If state is 'HEALTHY' or 'FAULTED', then stop the service first before restarting it to ensure
        that the service begins in a clean state.
        """
        ldap = await self.config()

        ldap_state = await self.middleware.call('ldap.get_state')
        if ldap_state in ['LEAVING', 'JOINING']:
            raise CallError(
                f'LDAP state is [{ldap_state}]. Please wait until directory service operation completes.',
                errno.EBUSY)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   ldap['id'], {'ldap_enable': True})
        if ldap['kerberos_realm']:
            await self.middleware.call('kerberos.start')

        await self.middleware.call('etc.generate', 'rc')
        await self.middleware.call('etc.generate', 'nss')
        await self.middleware.call('etc.generate', 'ldap')
        await self.middleware.call('etc.generate', 'pam')
        has_samba_schema = True if (
            await self.middleware.call('ldap.get_workgroup')) else False

        if not await self.nslcd_status():
            await self.nslcd_cmd('onestart')
        else:
            await self.nslcd_cmd('onerestart')

        if has_samba_schema:
            await self.middleware.call('etc.generate', 'smb')
            await self.middleware.call('smb.store_ldap_admin_password')
            await self.middleware.call('service.restart', 'cifs')

        await self.set_state(DSStatus['HEALTHY'])
        await self.middleware.call('ldap.fill_cache')
Beispiel #25
0
    def exec(self, jail, command, options):
        """Issues a command inside a jail."""
        _, _, iocage = self.check_jail_existence(jail, skip=False)

        host_user = options["host_user"]
        jail_user = options.get("jail_user", None)

        if isinstance(command[0], list):
            # iocage wants a flat list, not a list inside a list
            command = list(itertools.chain.from_iterable(command))

        # We may be getting ';', '&&' and so forth. Adding the shell for
        # safety.
        if len(command) == 1:
            command = ["/bin/sh", "-c"] + command

        host_user = "" if jail_user and host_user == "root" else host_user
        try:
            msg = iocage.exec(command, host_user, jail_user, msg_return=True)
        except RuntimeError as e:
            raise CallError(e)

        return '\n'.join(msg)
Beispiel #26
0
    def set_threadpool_mode(self, pool_mode):
        """
        Control how the NFS server code allocates CPUs to
        service thread pools.  Depending on how many NICs
        you have and where their interrupts are bound, this
        option will affect which CPUs will do NFS serving.
        Note: this parameter cannot be changed while the
        NFS server is running.

        auto        the server chooses an appropriate mode
                    automatically using heuristics
        global      a single global pool contains all CPUs
        percpu      one pool for each CPU
        pernode     one pool for each NUMA node (equivalent
                    to global on non-NUMA machines)
        """
        try:
            with open("/sys/module/sunrpc/parameters/pool_mode", "w") as f:
                f.write(pool_mode.lower())
        except OSError as e:
            raise CallError(
                "NFS service must be stopped before threadpool mode changes",
                errno=e.errno)
Beispiel #27
0
    def port_is_listening(self, host, port, timeout=1):
        ret = False

        try:
            ipaddress.IPv6Address(host)
            s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
        except ipaddress.AddressValueError:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        if timeout:
            s.settimeout(timeout)

        try:
            s.connect((host, port))
            ret = True

        except Exception as e:
            raise CallError(e)

        finally:
            s.close()

        return ret
Beispiel #28
0
    async def wait_for_method(self, job, method, percent):
        current_node_list = []
        prefix = f'CLJOB_{method}_'

        for i in range(10):
            nodes = []
            entries = await self.middleware.call('clustercache.query',
                                                 [('key', '^', prefix)])
            if not entries:
                return

            for entry in entries:
                nodes.append(int(entry['key'][len(prefix):]))

            current_node_list = nodes.copy()
            job.set_progress(
                percent,
                f'Waiting for nodes {current_node_list} to complete {method}.')
            await asyncio.sleep(5)

        raise CallError(
            "Timed out waiting for nodes {current_node_list} to complete {method}.",
            errno.ETIMEDOUT)
Beispiel #29
0
    async def get_workgroup(self, ldap=None):
        ret = None
        smb = await self.middleware.call('smb.config')
        if ldap is None:
            ldap = await self.config()

        try:
            ret = await asyncio.wait_for(self.middleware.call('ldap.get_samba_domains', ldap),
                                         timeout=ldap['timeout'])
        except asyncio.TimeoutError:
            raise CallError(f'ldap.get_workgroup timed out after {ldap["timeout"]} seconds.', errno.ETIMEDOUT)

        if len(ret) > 1:
            self.logger.warning('Multiple Samba Domains detected in LDAP environment '
                                'auto-configuration of workgroup map have failed: %s', ret)

        ret = ret[0]['data']['sambaDomainName'][0] if ret else []

        if ret and smb['workgroup'] != ret:
            self.logger.debug(f'Updating SMB workgroup to match the LDAP domain name [{ret}]')
            await self.middleware.call('datastore.update', 'services.cifs', smb['id'], {'cifs_srv_workgroup': ret})

        return ret
Beispiel #30
0
    def create_job(self, job, options):
        iocage = ioc.IOCage(skip_jails=True)

        release = options["release"]
        template = options.get("template", False)
        pkglist = options.get("pkglist", None)
        uuid = options.get("uuid", None)
        basejail = options["basejail"]
        empty = options["empty"]
        short = options["short"]
        props = options["props"]
        pool = IOCJson().json_get_value("pool")
        iocroot = IOCJson(pool).json_get_value("iocroot")

        if template:
            release = template

        if not os.path.isdir(f"{iocroot}/releases/{release}") and not \
                template and not empty:
            self.middleware.call_sync('jail.fetch', {
                "release": release
            }).wait_sync()

        err, msg = iocage.create(release,
                                 props,
                                 0,
                                 pkglist,
                                 template=template,
                                 short=short,
                                 _uuid=uuid,
                                 basejail=basejail,
                                 empty=empty)

        if err:
            raise CallError(msg)

        return True