Beispiel #1
0
class DiskService(Service):
    @private
    @lock('swaps_configure')
    @accepts(List('disks', items=[Str('disk')]),
             Dict('swap_removal_options',
                  Bool('configure_swap', default=True),
                  register=True))
    async def swaps_remove_disks(self, disks, options):
        """
        Remove a given disk (e.g. ["da0", "da1"]) from swap.
        It will offline if from swap, removing encryption and destroying the mirror ( if part of one ).
        """
        return await self.swaps_remove_disks_unlocked(disks, options)

    @private
    async def swaps_remove_disks_unlocked(self, disks, options=None):
        """
        We have a separate endpoint for this to ensure that no other swap related operations not do swap devices
        removal while swap configuration is in progress - however we still need to allow swap configuration process
        to remove swap devices and it can use this endpoint directly for that purpose.
        """
        options = options or {}
        providers = {}
        for disk in disks:
            partitions = await self.middleware.call('disk.list_partitions',
                                                    disk)
            if not partitions:
                continue
            for p in partitions:
                if p['partition_type'] in await self.middleware.call(
                        'disk.get_valid_swap_partition_type_uuids'):
                    providers[p['id']] = p
                    break

        if not providers:
            return

        swap_devices = await self.middleware.call('disk.get_swap_devices')
        for mirror in await self.middleware.call('disk.get_swap_mirrors'):
            devname = mirror['encrypted_provider'] or mirror['real_path']
            if devname in swap_devices:
                await run('swapoff', devname)
            if mirror['encrypted_provider']:
                await self.middleware.call('disk.remove_encryption',
                                           mirror['encrypted_provider'])
            await self.middleware.call('disk.destroy_swap_mirror',
                                       mirror['name'])

        configure_swap = False
        for p in providers.values():
            devname = p['encrypted_provider'] or p['path']
            if devname in swap_devices:
                await run('swapoff', devname)
            if p['encrypted_provider']:
                await self.middleware.call('disk.remove_encryption',
                                           p['encrypted_provider'])
            if not IS_LINUX and os.path.realpath('/dev/dumpdev') == p['path']:
                configure_swap = True
                try:
                    os.unlink('/dev/dumpdev')
                except OSError:
                    pass

        # Let consumer explicitly deny swap configuration if desired
        if configure_swap and options.get('configure_swap', True):
            asyncio.ensure_future(self.middleware.call('disk.swaps_configure'))
Beispiel #2
0
class ServiceService(CRUDService):

    SERVICE_DEFS = {
        's3': ServiceDefinition('minio', '/var/run/minio.pid'),
        'ssh': ServiceDefinition('sshd', '/var/run/sshd.pid'),
        'rsync': ServiceDefinition('rsync', '/var/run/rsyncd.pid'),
        'nfs': ServiceDefinition('nfsd', None),
        'afp': ServiceDefinition('netatalk', None),
        'cifs': ServiceDefinition('smbd', '/var/run/samba4/smbd.pid'),
        'dynamicdns': ServiceDefinition('inadyn', None),
        'snmp': ServiceDefinition('snmpd', '/var/run/net_snmpd.pid'),
        'ftp': ServiceDefinition('proftpd', '/var/run/proftpd.pid'),
        'tftp': ServiceDefinition('inetd', '/var/run/inetd.pid'),
        'iscsitarget': ServiceDefinition('ctld', '/var/run/ctld.pid'),
        'lldp': ServiceDefinition('ladvd', '/var/run/ladvd.pid'),
        'ups': ServiceDefinition('upsd', '/var/db/nut/upsd.pid'),
        'upsmon': ServiceDefinition('upsmon', '/var/db/nut/upsmon.pid'),
        'smartd': ServiceDefinition('smartd', 'smartd-daemon', '/var/run/smartd-daemon.pid'),
        'webshell': ServiceDefinition(None, '/var/run/webshell.pid'),
        'webdav': ServiceDefinition('httpd', '/var/run/httpd.pid'),
        'netdata': ServiceDefinition('netdata', '/var/db/netdata/netdata.pid')
    }

    @filterable
    async def query(self, filters=None, options=None):
        if options is None:
            options = {}
        options['prefix'] = 'srv_'

        services = await self.middleware.call('datastore.query', 'services.services', filters, options)

        # In case a single service has been requested
        if not isinstance(services, list):
            services = [services]

        jobs = {
            asyncio.ensure_future(self._get_status(entry)): entry
            for entry in services
        }
        await asyncio.wait(list(jobs.keys()), timeout=15)

        def result(task):
            """
            Method to handle results of the greenlets.
            In case a greenlet has timed out, provide UNKNOWN state
            """
            try:
                result = task.result()
            except Exception:
                result = None
                self.logger.warn('Failed to get status', exc_info=True)
            if result is None:
                entry = jobs.get(task)
                entry['state'] = 'UNKNOWN'
                entry['pids'] = []
                return entry
            else:
                return result

        services = list(map(result, jobs))
        return filter_list(services, filters, options)

    @accepts(
        Int('id'),
        Dict(
            'service-update',
            Bool('enable', default=False),
        ),
    )
    async def do_update(self, id, data):
        """
        Update service entry of `id`.

        Currently it only accepts `enable` option which means whether the
        service should start on boot.

        """
        return await self.middleware.call('datastore.update', 'services.services', id, {'srv_enable': data['enable']})

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime', default=True),
            register=True,
        ),
    )
    async def start(self, service, options=None):
        """ Start the service specified by `service`.

        The helper will use method self._start_[service]() to start the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_start', service)
        sn = self._started_notify("start", service)
        await self._simplecmd("start", service, options)
        return await self.started(service, sn)

    async def started(self, service, sn=None):
        """
        Test if service specified by `service` has been started.
        """
        if sn:
            await self.middleware.run_in_thread(sn.join)

        try:
            svc = await self.query([('service', '=', service)], {'get': True})
            self.middleware.send_event('service.query', 'CHANGED', fields=svc)
            return svc['state'] == 'RUNNING'
        except IndexError:
            f = getattr(self, '_started_' + service, None)
            if callable(f):
                if inspect.iscoroutinefunction(f):
                    return (await f())[0]
                else:
                    return f()[0]
            else:
                return (await self._started(service))[0]

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def stop(self, service, options=None):
        """ Stop the service specified by `service`.

        The helper will use method self._stop_[service]() to stop the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_stop', service)
        sn = self._started_notify("stop", service)
        await self._simplecmd("stop", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def restart(self, service, options=None):
        """
        Restart the service specified by `service`.

        The helper will use method self._restart_[service]() to restart the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_restart', service)
        sn = self._started_notify("restart", service)
        await self._simplecmd("restart", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def reload(self, service, options=None):
        """
        Reload the service specified by `service`.

        The helper will use method self._reload_[service]() to reload the service.
        If the method does not exist, the helper will try self.restart of the
        service instead."""
        await self.middleware.call_hook('service.pre_reload', service)
        try:
            await self._simplecmd("reload", service, options)
        except Exception as e:
            await self.restart(service, options)
        return await self.started(service)

    async def _get_status(self, service):
        f = getattr(self, '_started_' + service['service'], None)
        if callable(f):
            if inspect.iscoroutinefunction(f):
                running, pids = await f()
            else:
                running, pids = f()
        else:
            running, pids = await self._started(service['service'])

        if running:
            state = 'RUNNING'
        else:
            if service['enable']:
                state = 'CRASHED'
            else:
                state = 'STOPPED'

        service['state'] = state
        service['pids'] = pids
        return service

    async def _simplecmd(self, action, what, options=None):
        self.logger.debug("Calling: %s(%s) ", action, what)
        f = getattr(self, '_' + action + '_' + what, None)
        if f is None:
            # Provide generic start/stop/restart verbs for rc.d scripts
            if what in self.SERVICE_DEFS:
                if self.SERVICE_DEFS[what].rc_script:
                    what = self.SERVICE_DEFS[what].rc_script
            if action in ("start", "stop", "restart", "reload"):
                if action == 'restart':
                    await self._system("/usr/sbin/service " + what + " forcestop ")
                await self._service(what, action, **options)
            else:
                raise ValueError("Internal error: Unknown command")
        else:
            call = f(**(options or {}))
            if inspect.iscoroutinefunction(f):
                await call

    async def _system(self, cmd, options=None):
        stdout = DEVNULL
        if options and 'stdout' in options:
            stdout = options['stdout']
        stderr = DEVNULL
        if options and 'stderr' in options:
            stderr = options['stderr']

        proc = await Popen(cmd, stdout=stdout, stderr=stderr, shell=True, close_fds=True)
        await proc.communicate()
        return proc.returncode

    async def _service(self, service, verb, **options):
        onetime = options.pop('onetime', None)
        force = options.pop('force', None)
        quiet = options.pop('quiet', None)
        extra = options.pop('extra', '')

        # force comes before one which comes before quiet
        # they are mutually exclusive
        preverb = ''
        if force:
            preverb = 'force'
        elif onetime:
            preverb = 'one'
        elif quiet:
            preverb = 'quiet'

        return await self._system('/usr/sbin/service {} {}{} {}'.format(
            service,
            preverb,
            verb,
            extra,
        ), options)

    def _started_notify(self, verb, what):
        """
        The check for started [or not] processes is currently done in 2 steps
        This is the first step which involves a thread StartNotify that watch for event
        before actually start/stop rc.d scripts

        Returns:
            StartNotify object if the service is known or None otherwise
        """

        if what in self.SERVICE_DEFS:
            sn = StartNotify(verb=verb, pidfile=self.SERVICE_DEFS[what].pidfile)
            sn.start()
            return sn
        else:
            return None

    async def _started(self, what, notify=None):
        """
        This is the second step::
        Wait for the StartNotify thread to finish and then check for the
        status of pidfile/procname using pgrep

        Returns:
            True whether the service is alive, False otherwise
        """

        if what in self.SERVICE_DEFS:
            if notify:
                await self.middleware.run_in_thread(notify.join)

            if self.SERVICE_DEFS[what].pidfile:
                pgrep = "/bin/pgrep -F {}{}".format(
                    self.SERVICE_DEFS[what].pidfile,
                    ' ' + self.SERVICE_DEFS[what].procname if self.SERVICE_DEFS[what].procname else '',
                )
            else:
                pgrep = "/bin/pgrep {}".format(self.SERVICE_DEFS[what].procname)
            proc = await Popen(pgrep, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True)
            data = (await proc.communicate())[0].decode()

            if proc.returncode == 0:
                return True, [
                    int(i)
                    for i in data.strip().split('\n') if i.isdigit()
                ]
        return False, []

    async def _start_webdav(self, **kwargs):
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "start", **kwargs)

    async def _stop_webdav(self, **kwargs):
        await self._service("apache24", "stop", **kwargs)

    async def _restart_webdav(self, **kwargs):
        await self._service("apache24", "stop", force=True, **kwargs)
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "restart", **kwargs)

    async def _reload_webdav(self, **kwargs):
        await self._service("ix-apache", "start", force=True, **kwargs)
        await self._service("apache24", "reload", **kwargs)

    async def _restart_django(self, **kwargs):
        await self._service("django", "restart", **kwargs)

    async def _start_webshell(self, **kwargs):
        await self._system("/usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_webshell(self, **kwargs):
        try:
            with open('/var/run/webshell.pid', 'r') as f:
                pid = f.read()
                os.kill(int(pid), signal.SIGTERM)
                time.sleep(0.2)
                os.kill(int(pid), signal.SIGKILL)
        except:
            pass
        await self._system("ulimit -n 1024 && /usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", force=True, **kwargs)
        await self._service("ctld", "stop", force=True, **kwargs)
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "restart", **kwargs)

    async def _start_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "start", **kwargs)

    async def _stop_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "stop", force=True, **kwargs)
        await self._service("ctld", "stop", force=True, **kwargs)

    async def _reload_iscsitarget(self, **kwargs):
        await self._service("ix-ctld", "start", quiet=True, **kwargs)
        await self._service("ctld", "reload", **kwargs)

    async def _start_collectd(self, **kwargs):
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "restart", **kwargs)

    async def _restart_collectd(self, **kwargs):
        await self._service("collectd", "stop", **kwargs)
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "start", **kwargs)

    async def _start_sysctl(self, **kwargs):
        await self._service("sysctl", "start", **kwargs)
        await self._service("ix-sysctl", "start", quiet=True, **kwargs)

    async def _reload_sysctl(self, **kwargs):
        await self._service("sysctl", "start", **kwargs)
        await self._service("ix-sysctl", "reload", **kwargs)

    async def _start_network(self, **kwargs):
        await self.middleware.call('interfaces.sync')
        await self.middleware.call('routes.sync')

    async def _stop_jails(self, **kwargs):
        for jail in await self.middleware.call('datastore.query', 'jails.jails'):
            try:
                await self.middleware.call('notifier.warden', 'stop', [], {'jail': jail['jail_host']})
            except Exception as e:
                self.logger.debug(f'Failed to stop jail {jail["jail_host"]}', exc_info=True)

    async def _start_jails(self, **kwargs):
        await self._service("ix-warden", "start", **kwargs)
        for jail in await self.middleware.call('datastore.query', 'jails.jails'):
            if jail['jail_autostart']:
                try:
                    await self.middleware.call('notifier.warden', 'start', [], {'jail': jail['jail_host']})
                except Exception as e:
                    self.logger.debug(f'Failed to start jail {jail["jail_host"]}', exc_info=True)
        await self._service("ix-plugins", "start", **kwargs)
        await self.reload("http", kwargs)

    async def _restart_jails(self, **kwargs):
        await self._stop_jails()
        await self._start_jails()

    async def _stop_pbid(self, **kwargs):
        await self._service("pbid", "stop", **kwargs)

    async def _start_pbid(self, **kwargs):
        await self._service("pbid", "start", **kwargs)

    async def _restart_pbid(self, **kwargs):
        await self._service("pbid", "restart", **kwargs)

    async def _reload_named(self, **kwargs):
        await self._service("named", "reload", **kwargs)

    async def _reload_hostname(self, **kwargs):
        await self._system('/bin/hostname ""')
        await self._service("ix-hostname", "start", quiet=True, **kwargs)
        await self._service("hostname", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", quiet=True, **kwargs)
        await self._service("collectd", "stop", **kwargs)
        await self._service("ix-collectd", "start", quiet=True, **kwargs)
        await self._service("collectd", "start", **kwargs)

    async def _reload_resolvconf(self, **kwargs):
        await self._reload_hostname()
        await self._service("ix-resolv", "start", quiet=True, **kwargs)

    async def _reload_networkgeneral(self, **kwargs):
        await self._reload_resolvconf()
        await self._service("routing", "restart", **kwargs)

    async def _reload_timeservices(self, **kwargs):
        await self._service("ix-localtime", "start", quiet=True, **kwargs)
        await self._service("ix-ntpd", "start", quiet=True, **kwargs)
        await self._service("ntpd", "restart", **kwargs)
        os.environ['TZ'] = await self.middleware.call('datastore.query', 'system.settings', [], {'order_by': ['-id'], 'get': True})['stg_timezone']
        time.tzset()

    async def _restart_smartd(self, **kwargs):
        await self._service("ix-smartd", "start", quiet=True, **kwargs)
        await self._service("smartd-daemon", "stop", force=True, **kwargs)
        await self._service("smartd-daemon", "restart", **kwargs)

    async def _reload_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "reload", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "start", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _stop_ssh(self, **kwargs):
        await self._service("openssh", "stop", force=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)

    async def _restart_ssh(self, **kwargs):
        await self._service("ix-sshd", "start", quiet=True, **kwargs)
        await self._service("openssh", "stop", force=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("openssh", "restart", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssl(self, what=None):
        if what is not None:
            await self._service("ix-ssl", "start", quiet=True, extra=what)
        else:
            await self._service("ix-ssl", "start", quiet=True)

    async def _start_s3(self, **kwargs):
        await self.middleware.call('etc.generate', 's3')
        await self._service("minio", "start", quiet=True, stdout=None, stderr=None, **kwargs)

    async def _reload_s3(self, **kwargs):
        await self.middleware.call('etc.generate', 's3')
        await self._service("minio", "restart", quiet=True, stdout=None, stderr=None, **kwargs)

    async def _reload_rsync(self, **kwargs):
        await self._service("ix-rsyncd", "start", quiet=True, **kwargs)
        await self._service("rsyncd", "restart", **kwargs)

    async def _restart_rsync(self, **kwargs):
        await self._stop_rsync()
        await self._start_rsync()

    async def _start_rsync(self, **kwargs):
        await self._service("ix-rsyncd", "start", quiet=True, **kwargs)
        await self._service("rsyncd", "start", **kwargs)

    async def _stop_rsync(self, **kwargs):
        await self._service("rsyncd", "stop", force=True, **kwargs)

    async def _started_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl status"):
            res = True
        return res, []

    async def _start_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl start"):
            res = True
        return res

    async def _restart_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl restart"):
            res = True
        return res

    async def _stop_nis(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/NIS/ctl stop"):
            res = True
        return res

    async def _started_ldap(self, **kwargs):
        if (await self._system('/usr/sbin/service ix-ldap status') != 0):
            return False, []
        return await self.middleware.call('notifier.ldap_status'), []

    async def _start_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl start"):
            res = True
        return res

    async def _stop_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl stop"):
            res = True
        return res

    async def _restart_ldap(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl restart"):
            res = True
        return res

    async def _start_lldp(self, **kwargs):
        await self._service("ladvd", "start", **kwargs)

    async def _stop_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)

    async def _restart_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)
        await self._service("ladvd", "restart", **kwargs)

    async def _clear_activedirectory_config(self):
        await self._system("/bin/rm -f /etc/directoryservice/ActiveDirectory/config")

    async def _started_activedirectory(self, **kwargs):
        for srv in ('kinit', 'activedirectory', ):
            if await self._system('/usr/sbin/service ix-%s status' % (srv, )) != 0:
                return False, []
        if await self._system('/usr/local/bin/wbinfo -p') != 0:
                return False, []
        if await self._system('/usr/local/bin/wbinfo -t') != 0:
                return False, []
        return True, []

    async def _start_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl start"):
            res = True
        return res

    async def _stop_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl stop"):
            res = True
        return res

    async def _restart_activedirectory(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/ActiveDirectory/ctl restart"):
            res = True
        return res

    async def _started_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl status"):
            res = True
        return res, []

    async def _start_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl start"):
            res = True
        return res

    async def _stop_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl stop"):
            res = True
        return res

    async def _restart_domaincontroller(self, **kwargs):
        res = False
        if not await self._system("/etc/directoryservice/DomainController/ctl restart"):
            res = True
        return res

    async def _restart_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng restart")

    async def _start_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng start")

    async def _stop_syslogd(self, **kwargs):
        await self._system("/etc/local/rc.d/syslog-ng stop")

    async def _reload_syslogd(self, **kwargs):
        await self._service("ix-syslogd", "start", quiet=True, **kwargs)
        await self._system("/etc/local/rc.d/syslog-ng reload")

    async def _start_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "start", **kwargs)

    async def _reload_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_tftp(self, **kwargs):
        await self._service("ix-inetd", "start", quiet=True, **kwargs)
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_cron(self, **kwargs):
        await self._service("ix-crontab", "start", quiet=True, **kwargs)

    async def _start_motd(self, **kwargs):
        await self._service("ix-motd", "start", quiet=True, **kwargs)
        await self._service("motd", "start", quiet=True, **kwargs)

    async def _start_ttys(self, **kwargs):
        await self._service("ix-ttys", "start", quiet=True, **kwargs)

    async def _reload_ftp(self, **kwargs):
        await self._service("ix-proftpd", "start", quiet=True, **kwargs)
        await self._service("proftpd", "restart", **kwargs)

    async def _restart_ftp(self, **kwargs):
        await self._stop_ftp()
        await self._start_ftp()

    async def _start_ftp(self, **kwargs):
        await self._service("ix-proftpd", "start", quiet=True, **kwargs)
        await self._service("proftpd", "start", **kwargs)

    async def _stop_ftp(self, **kwargs):
        await self._service("proftpd", "stop", force=True, **kwargs)

    async def _start_ups(self, **kwargs):
        await self._service("ix-ups", "start", quiet=True, **kwargs)
        await self._service("nut", "start", **kwargs)
        await self._service("nut_upsmon", "start", **kwargs)
        await self._service("nut_upslog", "start", **kwargs)

    async def _stop_ups(self, **kwargs):
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)

    async def _restart_ups(self, **kwargs):
        await self._service("ix-ups", "start", quiet=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut", "restart", **kwargs)
        await self._service("nut_upsmon", "restart", **kwargs)
        await self._service("nut_upslog", "restart", **kwargs)

    async def _started_ups(self, **kwargs):
        mode = (await self.middleware.call('datastore.query', 'services.ups', [], {'order_by': ['-id'], 'get': True}))['ups_mode']
        if mode == "master":
            svc = "ups"
        else:
            svc = "upsmon"
        return await self._started(svc)

    async def _start_afp(self, **kwargs):
        await self._service("ix-afpd", "start", **kwargs)
        await self._service("netatalk", "start", **kwargs)

    async def _stop_afp(self, **kwargs):
        await self._service("netatalk", "stop", force=True, **kwargs)
        # when netatalk stops if afpd or cnid_metad is stuck
        # they'll get left behind, which can cause issues
        # restarting netatalk.
        await self._system("pkill -9 afpd")
        await self._system("pkill -9 cnid_metad")

    async def _restart_afp(self, **kwargs):
        await self._stop_afp()
        await self._start_afp()

    async def _reload_afp(self, **kwargs):
        await self._service("ix-afpd", "start", quiet=True, **kwargs)
        await self._system("killall -1 netatalk")

    async def _reload_nfs(self, **kwargs):
        await self._service("ix-nfsd", "start", quiet=True, **kwargs)

    async def _restart_nfs(self, **kwargs):
        await self._stop_nfs(**kwargs)
        await self._start_nfs(**kwargs)

    async def _stop_nfs(self, **kwargs):
        await self._service("lockd", "stop", force=True, **kwargs)
        await self._service("statd", "stop", force=True, **kwargs)
        await self._service("nfsd", "stop", force=True, **kwargs)
        await self._service("mountd", "stop", force=True, **kwargs)
        await self._service("nfsuserd", "stop", force=True, **kwargs)
        await self._service("gssd", "stop", force=True, **kwargs)
        await self._service("rpcbind", "stop", force=True, **kwargs)
        if not await self.middleware.call('system.is_freenas'):
            await self._service("vaaiserver", "stop", force=True, **kwargs)

    async def _start_nfs(self, **kwargs):
        nfs = await self.middleware.call('datastore.config', 'services.nfs')
        await self._service("ix-nfsd", "start", quiet=True, **kwargs)
        await self._service("rpcbind", "start", quiet=True, **kwargs)
        await self._service("gssd", "start", quiet=True, **kwargs)
        # Workaround to work with "onetime", since the rc scripts depend on rc flags.
        if nfs['nfs_srv_v4']:
            sysctl.filter('vfs.nfsd.server_max_nfsvers')[0].value = 4
            if nfs['nfs_srv_v4_v3owner']:
                # Per RFC7530, sending NFSv3 style UID/GIDs across the wire is now allowed
                # You must have both of these sysctl's set to allow the desired functionality
                sysctl.filter('vfs.nfsd.enable_stringtouid')[0].value = 1
                sysctl.filter('vfs.nfs.enable_uidtostring')[0].value = 1
                await self._service("nfsuserd", "stop", force=True, **kwargs)
            else:
                sysctl.filter('vfs.nfsd.enable_stringtouid')[0].value = 0
                sysctl.filter('vfs.nfs.enable_uidtostring')[0].value = 0
                await self._service("nfsuserd", "start", quiet=True, **kwargs)
        else:
            sysctl.filter('vfs.nfsd.server_max_nfsvers')[0].value = 3
            if nfs['nfs_srv_16']:
                await self._service("nfsuserd", "start", quiet=True, **kwargs)
        await self._service("mountd", "start", quiet=True, **kwargs)
        await self._service("nfsd", "start", quiet=True, **kwargs)
        await self._service("statd", "start", quiet=True, **kwargs)
        await self._service("lockd", "start", quiet=True, **kwargs)
        if not await self.middleware.call('system.is_freenas'):
            await self._service("vaaiserver", "start", quiet=True, **kwargs)

    async def _force_stop_jail(self, **kwargs):
        await self._service("jail", "stop", force=True, **kwargs)

    async def _start_plugins(self, jail=None, plugin=None, **kwargs):
        if jail and plugin:
            await self._system("/usr/sbin/service ix-plugins forcestart %s:%s" % (jail, plugin))
        else:
            await self._service("ix-plugins", "start", force=True, **kwargs)

    async def _stop_plugins(self, jail=None, plugin=None, **kwargs):
        if jail and plugin:
            await self._system("/usr/sbin/service ix-plugins forcestop %s:%s" % (jail, plugin))
        else:
            await self._service("ix-plugins", "stop", force=True, **kwargs)

    async def _restart_plugins(self, jail=None, plugin=None):
        await self._stop_plugins(jail=jail, plugin=plugin)
        await self._start_plugins(jail=jail, plugin=plugin)

    async def _started_plugins(self, jail=None, plugin=None, **kwargs):
        res = False
        if jail and plugin:
            if self._system("/usr/sbin/service ix-plugins status %s:%s" % (jail, plugin)) == 0:
                res = True
        else:
            if await self._service("ix-plugins", "status", **kwargs) == 0:
                res = True
        return res, []

    async def _restart_dynamicdns(self, **kwargs):
        await self._service("ix-inadyn", "start", quiet=True, **kwargs)
        await self._service("inadyn", "stop", force=True, **kwargs)
        await self._service("inadyn", "restart", **kwargs)

    async def _reload_dynamicdns(self, **kwargs):
        await self._service("ix-inadyn", "start", quiet=True, **kwargs)
        await self._service("inadyn", "stop", force=True, **kwargs)
        await self._service("inadyn", "restart", **kwargs)

    async def _restart_system(self, **kwargs):
        asyncio.ensure_future(self._system("/bin/sleep 3 && /sbin/shutdown -r now"))

    async def _stop_system(self, **kwargs):
        asyncio.ensure_future(self._system("/bin/sleep 3 && /sbin/shutdown -p now"))

    async def _reload_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "reload", force=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _restart_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("samba_server", "restart", quiet=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _start_cifs(self, **kwargs):
        await self._service("ix-pre-samba", "start", quiet=True, **kwargs)
        await self._service("samba_server", "start", quiet=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)

    async def _stop_cifs(self, **kwargs):
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("ix-post-samba", "start", quiet=True, **kwargs)

    async def _start_snmp(self, **kwargs):
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _stop_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", quiet=True, **kwargs)

    async def _restart_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _reload_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self._service("ix-snmpd", "start", quiet=True, **kwargs)
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _restart_http(self, **kwargs):
        await self._service("ix-nginx", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("nginx", "restart", **kwargs)

    async def _reload_http(self, **kwargs):
        await self._service("ix-nginx", "start", quiet=True, **kwargs)
        await self._service("ix_register", "reload", **kwargs)
        await self._service("nginx", "reload", **kwargs)

    async def _reload_loader(self, **kwargs):
        await self._service("ix-loader", "reload", **kwargs)

    async def _start_loader(self, **kwargs):
        await self._service("ix-loader", "start", quiet=True, **kwargs)

    async def __saver_loaded(self):
        pipe = os.popen("kldstat|grep daemon_saver")
        out = pipe.read().strip('\n')
        pipe.close()
        return (len(out) > 0)

    async def _start_saver(self, **kwargs):
        if not self.__saver_loaded():
            await self._system("kldload daemon_saver")

    async def _stop_saver(self, **kwargs):
        if self.__saver_loaded():
            await self._system("kldunload daemon_saver")

    async def _restart_saver(self, **kwargs):
        await self._stop_saver()
        await self._start_saver()

    async def _reload_disk(self, **kwargs):
        await self._service("ix-fstab", "start", quiet=True, **kwargs)
        await self._service("ix-swap", "start", quiet=True, **kwargs)
        await self._service("swap", "start", quiet=True, **kwargs)
        await self._service("mountlate", "start", quiet=True, **kwargs)
        # Restarting collectd may take a long time and there is no
        # benefit in waiting for it since even if it fails it wont
        # tell the user anything useful.
        asyncio.ensure_future(self.restart("collectd", kwargs))

    async def _reload_user(self, **kwargs):
        await self._service("ix-passwd", "start", quiet=True, **kwargs)
        await self._service("ix-aliases", "start", quiet=True, **kwargs)
        await self._service("ix-sudoers", "start", quiet=True, **kwargs)
        await self.reload("cifs", kwargs)

    async def _restart_system_datasets(self, **kwargs):
        systemdataset = await self.middleware.call('systemdataset.setup')
        if not systemdataset:
            return None
        if systemdataset['syslog']:
            await self.restart("syslogd", kwargs)
        await self.restart("cifs", kwargs)
        if systemdataset['rrd']:
            # Restarting collectd may take a long time and there is no
            # benefit in waiting for it since even if it fails it wont
            # tell the user anything useful.
            asyncio.ensure_future(self.restart("collectd", kwargs))
Beispiel #3
0
class AlertServiceService(CRUDService):
    class Config:
        datastore = "system.alertservice"
        datastore_extend = "alertservice._extend"
        datastore_order_by = ["name"]
        cli_namespace = "system.alert.service"

    @accepts()
    async def list_types(self):
        """
        List all types of supported Alert services which can be configured with the system.
        """
        return [{
            "name": name,
            "title": factory.title,
        } for name, factory in sorted(ALERT_SERVICES_FACTORIES.items(),
                                      key=lambda i: i[1].title.lower())]

    @private
    async def _extend(self, service):
        try:
            service["type__title"] = ALERT_SERVICES_FACTORIES[
                service["type"]].title
        except KeyError:
            service["type__title"] = "<Unknown>"

        return service

    @private
    async def _compress(self, service):
        service.pop("type__title")

        return service

    @private
    async def _validate(self, service, schema_name):
        verrors = ValidationErrors()

        factory = ALERT_SERVICES_FACTORIES.get(service["type"])
        if factory is None:
            verrors.add(f"{schema_name}.type", "This field has invalid value")
            raise verrors

        verrors.add_child(
            f"{schema_name}.attributes",
            validate_attributes(list(factory.schema.attrs.values()), service))

        if verrors:
            raise verrors

    @accepts(
        Dict(
            "alert_service_create",
            Str("name"),
            Str("type", required=True),
            Dict("attributes", additional_attrs=True),
            Str("level", enum=list(AlertLevel.__members__)),
            Bool("enabled"),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create an Alert Service of specified `type`.

        If `enabled`, it sends alerts to the configured `type` of Alert Service.

        .. examples(websocket)::

          Create an Alert Service of Mail `type`

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "alertservice.create",
                "params": [{
                    "name": "Test Email Alert",
                    "enabled": true,
                    "type": "Mail",
                    "attributes": {
                        "email": "*****@*****.**"
                    },
                    "settings": {
                        "VolumeVersion": "HOURLY"
                    }
                }]
            }
        """
        await self._validate(data, "alert_service_create")

        data["id"] = await self.middleware.call("datastore.insert",
                                                self._config.datastore, data)

        await self._extend(data)

        return data

    @accepts(Int("id"),
             Patch(
                 "alert_service_create",
                 "alert_service_update",
                 ("attr", {
                     "update": True
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update Alert Service of `id`.
        """
        old = await self.middleware.call(
            "datastore.query", self._config.datastore, [("id", "=", id)], {
                "extend": self._config.datastore_extend,
                "get": True
            })

        new = old.copy()
        new.update(data)

        await self._validate(new, "alert_service_update")

        await self._compress(new)

        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new)

        await self._extend(new)

        return new

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete Alert Service of `id`.
        """
        return await self.middleware.call("datastore.delete",
                                          self._config.datastore, id)

    @accepts(Ref('alert_service_create'))
    async def test(self, data):
        """
        Send a test alert using `type` of Alert Service.

        .. examples(websocket)::

          Send a test alert using Alert Service of Mail `type`.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "alertservice.test",
                "params": [{
                    "name": "Test Email Alert",
                    "enabled": true,
                    "type": "Mail",
                    "attributes": {
                        "email": "*****@*****.**"
                    },
                    "settings": {}
                }]
            }
        """
        await self._validate(data, "alert_service_test")

        factory = ALERT_SERVICES_FACTORIES.get(data["type"])
        if factory is None:
            self.logger.error("Alert service %r does not exist", data["type"])
            return False

        try:
            alert_service = factory(self.middleware, data["attributes"])
        except Exception:
            self.logger.error(
                "Error creating alert service %r with parameters=%r",
                data["type"],
                data["attributes"],
                exc_info=True)
            return False

        master_node = "A"
        if await self.middleware.call("failover.licensed"):
            master_node = await self.middleware.call("failover.node")

        test_alert = Alert(
            TestAlertClass,
            node=master_node,
            datetime=datetime.utcnow(),
            last_occurrence=datetime.utcnow(),
            _uuid="test",
        )

        try:
            await alert_service.send([test_alert], [], [test_alert])
        except Exception:
            self.logger.error("Error in alert service %r",
                              data["type"],
                              exc_info=True)
            return False

        return True
Beispiel #4
0
class JailService(CRUDService):
    @filterable
    async def query(self, filters=None, options=None):
        options = options or {}
        jails = []
        try:
            jails = [
                list(jail.values())[0]
                for jail in ioc.IOCage().get("all", recursive=True)
            ]
        except BaseException:
            # Brandon is working on fixing this generic except, till then I
            # am not going to make the perfect the enemy of the good enough!
            self.logger.debug("iocage failed to fetch jails", exc_info=True)
            pass

        return filter_list(jails, filters, options)

    @accepts(
        Dict("options", Str("release"), Str("template"), Str("pkglist"),
             Str("uuid"), Bool("basejail", default=False),
             Bool("empty", default=False), Bool("short", default=False),
             List("props")))
    async def do_create(self, options):
        """Creates a jail."""
        # Typically one would return the created jail's id in this
        # create call BUT since jail creation may or may not involve
        # fetching a release, which in turn could be time consuming
        # and could then block for a long time. This dictates that we
        # make it a job, but that violates the principle that CRUD methods
        # are not jobs as yet, so I settle on making this a wrapper around
        # the main job that calls this and return said job's id instead of
        # the created jail's id

        return await self.middleware.call('jail.create_job', options)

    @private
    @accepts(
        Dict("options", Str("release"), Str("template"), Str("pkglist"),
             Str("uuid"), Bool("basejail", default=False),
             Bool("empty", default=False), Bool("short", default=False),
             List("props")))
    @job()
    def create_job(self, job, options):
        iocage = ioc.IOCage(skip_jails=True)

        release = options["release"]
        template = options.get("template", False)
        pkglist = options.get("pkglist", None)
        uuid = options.get("uuid", None)
        basejail = options["basejail"]
        empty = options["empty"]
        short = options["short"]
        props = options["props"]
        pool = IOCJson().json_get_value("pool")
        iocroot = IOCJson(pool).json_get_value("iocroot")

        if template:
            release = template

        if not os.path.isdir(f"{iocroot}/releases/{release}") and not \
                template and not empty:
            self.middleware.call_sync('jail.fetch', {
                "release": release
            }).wait_sync()

        err, msg = iocage.create(release,
                                 props,
                                 0,
                                 pkglist,
                                 template=template,
                                 short=short,
                                 _uuid=uuid,
                                 basejail=basejail,
                                 empty=empty)

        if err:
            raise CallError(msg)

        return True

    @accepts(Str("jail"),
             Dict(
                 "options",
                 Bool("plugin", default=False),
                 additional_attrs=True,
             ))
    def do_update(self, jail, options):
        """Sets a jail property."""
        plugin = options.pop("plugin")
        _, _, iocage = self.check_jail_existence(jail)

        name = options.pop("name", None)

        for prop, val in options.items():
            p = f"{prop}={val}"

            iocage.set(p, plugin)

        if name:
            iocage.rename(name)

        return True

    @accepts(Str("jail"))
    def do_delete(self, jail):
        """Takes a jail and destroys it."""
        _, _, iocage = self.check_jail_existence(jail)

        # TODO: Port children checking, release destroying.
        iocage.destroy_jail()

        return True

    @private
    def check_dataset_existence(self):
        IOCCheck()

    @private
    def check_jail_existence(self, jail, skip=True):
        """Wrapper for iocage's API, as a few commands aren't ported to it"""
        try:
            iocage = ioc.IOCage(skip_jails=skip, jail=jail)
            jail, path = iocage.__check_jail_existence__()
        except SystemExit:
            raise CallError(f"jail '{jail}' not found!")

        return jail, path, iocage

    @accepts()
    def get_activated_pool(self):
        """Returns the activated pool if there is one, or None"""
        try:
            pool = ioc.IOCage(skip_jails=True).get("", pool=True)
        except Exception:
            pool = None

        return pool

    @accepts(
        Dict(
            "options", Str("release"),
            Str("server", default="download.freebsd.org"),
            Str("user", default="anonymous"),
            Str("password", default="anonymous@"), Str("name", default=None),
            Bool("accept", default=False), List("props"),
            List("files",
                 default=["MANIFEST", "base.txz", "lib32.txz", "doc.txz"])))
    @job(lock=lambda args: f"jail_fetch:{args[-1]}")
    def fetch(self, job, options):
        """Fetches a release or plugin."""
        self.check_dataset_existence()  # Make sure our datasets exist.

        if options["name"] is not None:
            options["plugins"] = True

        iocage = ioc.IOCage(silent=True)

        iocage.fetch(**options)

        return True

    @accepts(Str("resource", enum=["RELEASE", "TEMPLATE", "PLUGIN"]),
             Bool("remote", default=False))
    def list_resource(self, resource, remote):
        """Returns a JSON list of the supplied resource on the host"""
        self.check_dataset_existence()  # Make sure our datasets exist.
        iocage = ioc.IOCage(skip_jails=True)
        resource = "base" if resource == "RELEASE" else resource.lower()

        if resource == "plugin":
            if remote:
                resource_list = iocage.fetch(list=True,
                                             plugins=True,
                                             header=False)
            else:
                resource_list = iocage.list("all", plugin=True)

            for plugin in resource_list:
                for i, elem in enumerate(plugin):
                    # iocage returns - for None
                    plugin[i] = elem if elem != "-" else None
        elif resource == "base":
            resource_list = iocage.fetch(list=True, remote=remote, http=True)
        else:
            resource_list = iocage.list(resource)

        return resource_list

    @accepts(Str("jail"))
    def start(self, jail):
        """Takes a jail and starts it."""
        _, _, iocage = self.check_jail_existence(jail)

        iocage.start()

        return True

    @accepts(Str("jail"))
    def stop(self, jail):
        """Takes a jail and stops it."""
        _, _, iocage = self.check_jail_existence(jail)

        iocage.stop()

        return True

    @accepts(Str("jail"),
             Dict(
                 "options",
                 Str("action",
                     enum=["ADD", "EDIT", "REMOVE", "REPLACE", "LIST"]),
                 Str("source"),
                 Str("destination"),
                 Str("fstype"),
                 Str("fsoptions"),
                 Str("dump"),
                 Str("pass"),
                 Int("index", default=None),
             ))
    def fstab(self, jail, options):
        """
        Adds an fstab mount to the jail, mounts if the jail is running.
        """
        _, _, iocage = self.check_jail_existence(jail, skip=False)

        action = options["action"].lower()
        source = options["source"]
        destination = options["destination"]
        fstype = options["fstype"]
        fsoptions = options["fsoptions"]
        dump = options["dump"]
        _pass = options["pass"]
        index = options["index"]

        if action == "replace" and index is None:
            raise ValueError(
                "index must not be None when replacing fstab entry")

        _list = iocage.fstab(action,
                             source,
                             destination,
                             fstype,
                             fsoptions,
                             dump,
                             _pass,
                             index=index)

        if action == "list":
            split_list = {}
            for i in _list:
                split_list[i[0]] = i[1].split()

            return split_list

        return True

    @accepts(Str("pool"))
    def activate(self, pool):
        """Activates a pool for iocage usage, and deactivates the rest."""
        zfs = libzfs.ZFS(history=True, history_prefix="<iocage>")
        pools = zfs.pools
        prop = "org.freebsd.ioc:active"

        for _pool in pools:
            if _pool.name == pool:
                ds = zfs.get_dataset(_pool.name)
                ds.properties[prop] = libzfs.ZFSUserProperty("yes")
            else:
                ds = zfs.get_dataset(_pool.name)
                ds.properties[prop] = libzfs.ZFSUserProperty("no")

        return True

    @accepts(Str("ds_type", enum=["ALL", "JAIL", "TEMPLATE", "RELEASE"]))
    def clean(self, ds_type):
        """Cleans all iocage datasets of ds_type"""

        if ds_type == "JAIL":
            IOCClean().clean_jails()
        elif ds_type == "ALL":
            IOCClean().clean_all()
        elif ds_type == "RELEASE":
            pass
        elif ds_type == "TEMPLATE":
            IOCClean().clean_templates()

        return True

    @accepts(Str("jail"), List("command"),
             Dict("options", Str("host_user", default="root"),
                  Str("jail_user")))
    def exec(self, jail, command, options):
        """Issues a command inside a jail."""
        _, _, iocage = self.check_jail_existence(jail, skip=False)

        host_user = options["host_user"]
        jail_user = options.get("jail_user", None)

        # We may be getting ';', '&&' and so forth. Adding the shell for
        # safety.

        if len(command) == 1:
            command = ["/bin/sh", "-c"] + command

        host_user = "" if jail_user and host_user == "root" else host_user
        msg = iocage.exec(command, host_user, jail_user, return_msg=True)

        return msg.decode("utf-8")

    @accepts(Str("jail"))
    @job(lock=lambda args: f"jail_update:{args[-1]}")
    def update_to_latest_patch(self, job, jail):
        """Updates specified jail to latest patch level."""

        uuid, path, _ = self.check_jail_existence(jail)
        status, jid = IOCList.list_get_jid(uuid)
        conf = IOCJson(path).json_load()

        # Sometimes if they don't have an existing patch level, this
        # becomes 11.1 instead of 11.1-RELEASE
        _release = conf["release"].rsplit("-", 1)[0]
        release = _release if "-RELEASE" in _release else conf["release"]

        started = False

        if conf["type"] == "jail":
            if not status:
                self.start(jail)
                started = True
        else:
            return False

        if conf["basejail"] != "yes":
            IOCFetch(release).fetch_update(True, uuid)
        else:
            # Basejails only need their base RELEASE updated
            IOCFetch(release).fetch_update()

        if started:
            self.stop(jail)

        return True

    @accepts(Str("jail"), Str("release"))
    @job(lock=lambda args: f"jail_upgrade:{args[-1]}")
    def upgrade(self, job, jail, release):
        """Upgrades specified jail to specified RELEASE."""

        uuid, path, _ = self.check_jail_existence(jail)
        status, jid = IOCList.list_get_jid(uuid)
        conf = IOCJson(path).json_load()
        root_path = f"{path}/root"
        started = False

        if conf["type"] == "jail":
            if not status:
                self.start(jail)
                started = True
        else:
            return False

        IOCUpgrade(conf, release, root_path).upgrade_jail()

        if started:
            self.stop(jail)

        return True

    @accepts(Str("jail"))
    @job(lock=lambda args: f"jail_export:{args[-1]}")
    def export(self, job, jail):
        """Exports jail to zip file"""
        uuid, path, _ = self.check_jail_existence(jail)
        status, jid = IOCList.list_get_jid(uuid)
        started = False

        if status:
            self.stop(jail)
            started = True

        IOCImage().export_jail(uuid, path)

        if started:
            self.start(jail)

        return True

    @accepts(Str("jail"))
    @job(lock=lambda args: f"jail_import:{args[-1]}")
    def _import(self, job, jail):
        """Imports jail from zip file"""

        IOCImage().import_jail(jail)

        return True
Beispiel #5
0
class ZFSDatasetService(CRUDService):
    class Config:
        namespace = 'zfs.dataset'
        private = True
        process_pool = True

    def locked_datasets(self):
        try:
            about_to_lock_dataset = self.middleware.call_sync(
                'cache.get', 'about_to_lock_dataset')
        except KeyError:
            about_to_lock_dataset = None

        or_filters = [
            'OR', [['key_loaded', '=', False]] +
            ([['id', '=', about_to_lock_dataset],
              ['id', '^', f'{about_to_lock_dataset}/']]
             if about_to_lock_dataset else [])
        ]
        return self.query(
            [['encrypted', '=', True], or_filters], {
                'extra': {
                    'properties': ['encryption', 'keystatus', 'mountpoint']
                },
                'select': ['id', 'mountpoint']
            })

    def flatten_datasets(self, datasets):
        return sum([[deepcopy(ds)] + self.flatten_datasets(ds['children'])
                    for ds in datasets], [])

    @filterable
    def query(self, filters=None, options=None):
        """
        In `query-options` we can provide `extra` arguments which control which data should be retrieved
        for a dataset.

        `query-options.extra.top_level_properties` is a list of properties which we will like to include in the
        top level dict of dataset. It defaults to adding only mountpoint key keeping legacy behavior. If none are
        desired in top level dataset, an empty list should be passed else if null is specified it will add mountpoint
        key to the top level dict if it's present in `query-options.extra.properties` or it's null as well.

        `query-options.extra.properties` is a list of properties which should be retrieved. If null ( by default ),
        it would retrieve all properties, if empty, it will retrieve no property ( `mountpoint` is special in this
        case and is controlled by `query-options.extra.mountpoint` attribute ).

        We provide 2 ways how zfs.dataset.query returns dataset's data. First is a flat structure ( default ), which
        means that all the datasets in the system are returned as separate objects which also contain all the data
        their is for their children. This retrieval type is slightly slower because of duplicates which exist in
        each object.
        Second type is hierarchical where only top level datasets are returned in the list and they contain all the
        children there are for them in `children` key. This retrieval type is slightly faster.
        These options are controlled by `query-options.extra.flat` attribute which defaults to true.

        `query-options.extra.user_properties` controls if user defined properties of datasets should be retrieved
        or not.

        While we provide a way to exclude all properties from data retrieval, we introduce a single attribute
        `query-options.extra.retrieve_properties` which if set to false will make sure that no property is retrieved
        whatsoever and overrides any other property retrieval attribute.
        """
        options = options or {}
        extra = options.get('extra', {}).copy()
        top_level_props = None if extra.get(
            'top_level_properties'
        ) is None else extra['top_level_properties'].copy()
        props = extra.get('properties', None)
        flat = extra.get('flat', True)
        user_properties = extra.get('user_properties', True)
        retrieve_properties = extra.get('retrieve_properties', True)
        if not retrieve_properties:
            # This is a short hand version where consumer can specify that they don't want any property to
            # be retrieved
            user_properties = False
            props = []

        with libzfs.ZFS() as zfs:
            # Handle `id` filter specially to avoiding getting all datasets
            if filters and len(filters) == 1 and list(
                    filters[0][:2]) == ['id', '=']:
                state_options = {
                    'snapshots': extra.get('snapshots', False),
                    'recursive': extra.get('recursive', True),
                    'snapshots_recursive': extra.get('snapshots_recursive',
                                                     False)
                }
                try:
                    datasets = [
                        zfs.get_dataset(
                            filters[0][2]).__getstate__(**state_options)
                    ]
                except libzfs.ZFSException:
                    datasets = []
            else:
                datasets = zfs.datasets_serialized(
                    props=props,
                    top_level_props=top_level_props,
                    user_props=user_properties)
                if flat:
                    datasets = self.flatten_datasets(datasets)
                else:
                    datasets = list(datasets)

        return filter_list(datasets, filters, options)

    def query_for_quota_alert(self):
        return [{
            k: v
            for k, v in dataset['properties'].items() if k in [
                "name", "quota", "available", "refquota", "usedbydataset",
                "mounted", "mountpoint", "org.freenas:quota_warning",
                "org.freenas:quota_critical", "org.freenas:refquota_warning",
                "org.freenas:refquota_critical"
            ]
        } for dataset in self.query()]

    def common_load_dataset_checks(self, ds):
        self.common_encryption_checks(ds)
        if ds.key_loaded:
            raise CallError(f'{id} key is already loaded')

    def common_encryption_checks(self, ds):
        if not ds.encrypted:
            raise CallError(f'{id} is not encrypted')

    def path_to_dataset(self, path):
        with libzfs.ZFS() as zfs:
            try:
                zh = zfs.get_dataset_by_path(path)
                ds_name = zh.name
            except libzfs.ZFSException:
                ds_name = None

        return ds_name

    def get_quota(self, ds, quota_type):
        if quota_type == 'dataset':
            dataset = self.query([('id', '=', ds)], {'get': True})
            return [{
                'quota_type':
                'DATASET',
                'id':
                ds,
                'name':
                ds,
                'quota':
                int(dataset['properties']['quota']['rawvalue']),
                'refquota':
                int(dataset['properties']['refquota']['rawvalue']),
                'used_bytes':
                int(dataset['properties']['used']['rawvalue']),
            }]

        quota_list = []
        quota_get = subprocess.run(
            [
                'zfs', f'{quota_type}space', '-H', '-n', '-p', '-o',
                'name,used,quota,objquota,objused', ds
            ],
            capture_output=True,
            check=False,
        )
        if quota_get.returncode != 0:
            raise CallError(
                f'Failed to get {quota_type} quota for {ds}: [{quota_get.stderr.decode()}]'
            )

        for quota in quota_get.stdout.decode().splitlines():
            m = quota.split('\t')
            if len(m) != 5:
                self.logger.debug('Invalid %s quota: %s', quota_type.lower(),
                                  quota)
                continue

            entry = {
                'quota_type': quota_type.upper(),
                'id': int(m[0]),
                'name': None,
                'quota': int(m[2]),
                'used_bytes': int(m[1]),
                'used_percent': 0,
                'obj_quota': int(m[3]) if m[3] != '-' else 0,
                'obj_used': int(m[4]) if m[4] != '-' else 0,
                'obj_used_percent': 0,
            }
            if entry['quota'] > 0:
                entry['used_percent'] = entry['used_bytes'] / entry[
                    'quota'] * 100

            if entry['obj_quota'] > 0:
                entry['obj_used_percent'] = entry['obj_used'] / entry[
                    'obj_quota'] * 100

            try:
                if entry['quota_type'] == 'USER':
                    entry['name'] = (self.middleware.call_sync(
                        'user.get_user_obj', {'uid': entry['id']}))['pw_name']
                else:
                    entry['name'] = (self.middleware.call_sync(
                        'group.get_group_obj',
                        {'gid': entry['id']}))['gr_name']

            except Exception:
                self.logger.debug('Unable to resolve %s id %d to name',
                                  quota_type.lower(), entry['id'])
                pass

            quota_list.append(entry)

        return quota_list

    def set_quota(self, ds, quota_list):
        cmd = ['zfs', 'set']
        cmd.extend(quota_list)
        cmd.append(ds)
        quota_set = subprocess.run(cmd, check=False)
        if quota_set.returncode != 0:
            raise CallError(
                f'Failed to set userspace quota on {ds}: [{quota_set.stderr.decode()}]'
            )

    @accepts(
        Str('id'),
        Dict(
            'load_key_options',
            Bool('mount', default=True),
            Bool('recursive', default=False),
            Any('key', default=None, null=True),
            Str('key_location', default=None, null=True),
        ),
    )
    def load_key(self, id, options):
        mount_ds = options.pop('mount')
        recursive = options.pop('recursive')
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_load_dataset_checks(ds)
                ds.load_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to load key for {id}', exc_info=True)
            raise CallError(f'Failed to load key for {id}: {e}')
        else:
            if mount_ds:
                self.mount(id, {'recursive': recursive})

    @accepts(Str('name'), List('params', default=[], private=True))
    @job()
    def bulk_process(self, job, name, params):
        f = getattr(self, name, None)
        if not f:
            raise CallError(f'{name} method not found in zfs.dataset')

        statuses = []
        for i in params:
            result = error = None
            try:
                result = f(*i)
            except Exception as e:
                error = str(e)
            finally:
                statuses.append({'result': result, 'error': error})

        return statuses

    @accepts(Str('id'),
             Dict(
                 'check_key',
                 Any('key', default=None, null=True),
                 Str('key_location', default=None, null=True),
             ))
    def check_key(self, id, options):
        """
        Returns `true` if the `key` is valid, `false` otherwise.
        """
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                return ds.check_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to check key for {id}', exc_info=True)
            raise CallError(f'Failed to check key for {id}: {e}')

    @accepts(Str('id'),
             Dict(
                 'unload_key_options',
                 Bool('recursive', default=False),
                 Bool('force_umount', default=False),
                 Bool('umount', default=False),
             ))
    def unload_key(self, id, options):
        force = options.pop('force_umount')
        if options.pop('umount') and self.middleware.call_sync(
                'zfs.dataset.get_instance', id)['mountpoint']:
            self.umount(id, {'force': force})
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                if not ds.key_loaded:
                    raise CallError(f'{id}\'s key is not loaded')
                ds.unload_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to unload key for {id}', exc_info=True)
            raise CallError(f'Failed to unload key for {id}: {e}')

    @accepts(
        Str('id'),
        Dict(
            'change_key_options',
            Dict('encryption_properties', Str('keyformat'), Str('keylocation'),
                 Int('pbkdf2iters')),
            Bool('load_key', default=True),
            Any('key', default=None, null=True),
        ),
    )
    def change_key(self, id, options):
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                ds.change_key(props=options['encryption_properties'],
                              load_key=options['load_key'],
                              key=options['key'])
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to change key for {id}', exc_info=True)
            raise CallError(f'Failed to change key for {id}: {e}')

    @accepts(Str('id'),
             Dict(
                 'change_encryption_root_options',
                 Bool('load_key', default=True),
             ))
    def change_encryption_root(self, id, options):
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                ds.change_key(load_key=options['load_key'], inherit=True)
        except libzfs.ZFSException as e:
            raise CallError(f'Failed to change encryption root for {id}: {e}')

    @accepts(
        Dict(
            'dataset_create',
            Str('name', required=True),
            Str('type', enum=['FILESYSTEM', 'VOLUME'], default='FILESYSTEM'),
            Dict(
                'properties',
                Bool('sparse'),
                additional_attrs=True,
            ),
        ))
    def do_create(self, data):
        """
        Creates a ZFS dataset.
        """

        verrors = ValidationErrors()

        if '/' not in data['name']:
            verrors.add('name', 'You need a full name, e.g. pool/newdataset')

        if verrors:
            raise verrors

        properties = data.get('properties') or {}
        sparse = properties.pop('sparse', False)
        params = {}

        for k, v in data['properties'].items():
            params[k] = v

        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(data['name'].split('/')[0])
                pool.create(data['name'],
                            params,
                            fstype=getattr(libzfs.DatasetType, data['type']),
                            sparse_vol=sparse)
        except libzfs.ZFSException as e:
            self.logger.error('Failed to create dataset', exc_info=True)
            raise CallError(f'Failed to create dataset: {e}')

    @accepts(
        Str('id'),
        Dict(
            'dataset_update',
            Dict(
                'properties',
                additional_attrs=True,
            ),
        ),
    )
    def do_update(self, id, data):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(id)

                if 'properties' in data:
                    properties = data['properties'].copy()
                    # Set these after reservations
                    for k in ['quota', 'refquota']:
                        if k in properties:
                            properties[k] = properties.pop(k)  # Set them last
                    for k, v in properties.items():

                        # If prop already exists we just update it,
                        # otherwise create a user property
                        prop = dataset.properties.get(k)
                        try:
                            if prop:
                                if v.get('source') == 'INHERIT':
                                    prop.inherit(
                                        recursive=v.get('recursive', False))
                                elif 'value' in v and (prop.value != v['value']
                                                       or prop.source.name
                                                       == 'INHERITED'):
                                    prop.value = v['value']
                                elif 'parsed' in v and (
                                        prop.parsed != v['parsed']
                                        or prop.source.name == 'INHERITED'):
                                    prop.parsed = v['parsed']
                            else:
                                if v.get('source') == 'INHERIT':
                                    pass
                                else:
                                    if 'value' not in v:
                                        raise ValidationError(
                                            'properties',
                                            f'properties.{k} needs a "value" attribute'
                                        )
                                    if ':' not in k:
                                        raise ValidationError(
                                            'properties',
                                            f'User property needs a colon (:) in its name`'
                                        )
                                    prop = libzfs.ZFSUserProperty(v['value'])
                                    dataset.properties[k] = prop
                        except libzfs.ZFSException as e:
                            raise ZFSSetPropertyError(k, str(e))

        except libzfs.ZFSException as e:
            self.logger.error('Failed to update dataset', exc_info=True)
            raise CallError(f'Failed to update dataset: {e}')

    def do_delete(self, id, options=None):
        options = options or {}
        force = options.get('force', False)
        recursive = options.get('recursive', False)

        args = []
        if force:
            args += ['-f']
        if recursive:
            args += ['-r']

        # If dataset is mounted and has receive_resume_token, we should destroy it or ZFS will say
        # "cannot destroy 'pool/dataset': dataset already exists"
        recv_run = subprocess.run(['zfs', 'recv', '-A', id],
                                  stdout=subprocess.DEVNULL,
                                  stderr=subprocess.DEVNULL)
        # Destroying may take a long time, lets not use py-libzfs as it will block
        # other ZFS operations.
        try:
            subprocess.run(
                ['zfs', 'destroy'] + args + [id],
                text=True,
                capture_output=True,
                check=True,
            )
        except subprocess.CalledProcessError as e:
            if recv_run.returncode == 0 and e.stderr.strip().endswith(
                    'dataset does not exist'):
                # This operation might have deleted this dataset if it was created by `zfs recv` operation
                return
            self.logger.error('Failed to delete dataset', exc_info=True)
            error = e.stderr.strip()
            errno_ = errno.EFAULT
            if "Device busy" in error:
                errno_ = errno.EBUSY
            raise CallError(f'Failed to delete dataset: {error}', errno_)

    @accepts(Str('name'), Dict('options', Bool('recursive', default=False)))
    def mount(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                if options['recursive']:
                    dataset.mount_recursive()
                else:
                    dataset.mount()
        except libzfs.ZFSException as e:
            self.logger.error('Failed to mount dataset', exc_info=True)
            raise CallError(f'Failed to mount dataset: {e}')

    @accepts(Str('name'), Dict('options', Bool('force', default=False)))
    def umount(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.umount(force=options['force'])
        except libzfs.ZFSException as e:
            self.logger.error('Failed to umount dataset', exc_info=True)
            raise CallError(f'Failed to umount dataset: {e}')

    @accepts(Str('dataset'),
             Dict('options', Str('new_name', required=True, empty=False),
                  Bool('recursive', default=False)))
    def rename(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.rename(options['new_name'],
                               recursive=options['recursive'])
        except libzfs.ZFSException as e:
            self.logger.error('Failed to rename dataset', exc_info=True)
            raise CallError(f'Failed to rename dataset: {e}')

    def promote(self, name):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.promote()
        except libzfs.ZFSException as e:
            self.logger.error('Failed to promote dataset', exc_info=True)
            raise CallError(f'Failed to promote dataset: {e}')

    def inherit(self, name, prop, recursive=False):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                zprop = dataset.properties.get(prop)
                if not zprop:
                    raise CallError(f'Property {prop!r} not found.',
                                    errno.ENOENT)
                zprop.inherit(recursive=recursive)
        except libzfs.ZFSException as e:
            raise CallError(str(e))
class PoolService(Service):
    @item_method
    @accepts(Int('id'),
             Dict(
                 'options',
                 Str('label', required=True),
                 Str('disk', required=True),
                 Bool('force', default=False),
                 Str('passphrase', private=True),
                 Bool('preserve_settings', default=True),
             ))
    @job(lock='pool_replace')
    async def replace(self, job, oid, options):
        """
        Replace a disk on a pool.

        `label` is the ZFS guid or a device name
        `disk` is the identifier of a disk
        `passphrase` is only valid for TrueNAS Core/Enterprise platform where pool is GELI encrypted
        If `preserve_settings` is true, then settings (power management, S.M.A.R.T., etc.) of a disk being replaced
        will be applied to a new disk.

        .. examples(websocket)::

          Replace missing ZFS device with disk {serial}FOO.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.replace",
                "params": [1, {
                    "label": "80802394992848654",
                    "disk": "{serial}FOO"
                }]
            }
        """
        pool = await self.middleware.call('pool.get_instance', oid)

        verrors = ValidationErrors()

        unused_disks = await self.middleware.call('disk.get_unused')
        disk = list(
            filter(lambda x: x['identifier'] == options['disk'], unused_disks))
        if not disk:
            verrors.add('options.disk', 'Disk not found.', errno.ENOENT)
        else:
            disk = disk[0]

            if not options['force'] and not await self.middleware.call(
                    'disk.check_clean', disk['devname']):
                verrors.add('options.force',
                            'Disk is not clean, partitions were found.')

        if osc.IS_FREEBSD and pool['encrypt'] == 2:
            if not options.get('passphrase'):
                verrors.add('options.passphrase',
                            'Passphrase is required for encrypted pool.')
            elif not await self.middleware.call('disk.geli_testkey', pool,
                                                options['passphrase']):
                verrors.add('options.passphrase', 'Passphrase is not valid.')

        if osc.IS_LINUX and options.get('passphrase'):
            verrors.add('options.passphrase',
                        'This field is not valid on this platform.')

        found = await self.middleware.call('pool.find_disk_from_topology',
                                           options['label'], pool)

        if not found:
            verrors.add('options.label',
                        f'Label {options["label"]} not found.', errno.ENOENT)

        if verrors:
            raise verrors

        old_disk = None
        if options['preserve_settings']:
            try:
                old_disk = await self.middleware.call(
                    'disk.query', [['zfs_guid', '=', options['label']]], {
                        'extra': {
                            'include_expired': True
                        },
                        'get': True
                    })
            except MatchNotFound:
                pass

        create_swap = found[0] in ('data', 'spare')

        swap_disks = [disk['devname']]
        # If the disk we are replacing is still available, remove it from swap as well
        if found[1] and os.path.exists(found[1]['path']):
            from_disk = await self.middleware.call(
                'disk.label_to_disk', found[1]['path'].replace('/dev/', ''))
            if from_disk:
                swap_disks.append(from_disk)

        await self.middleware.call('disk.swaps_remove_disks', swap_disks)

        vdev = []
        enc_disks = await self.middleware.call(
            'pool.format_disks',
            job,
            {disk['devname']: {
                 'vdev': vdev,
                 'create_swap': create_swap
             }},
            {
                'enc_keypath': pool['encryptkey_path'],
                'passphrase': options.get('passphrase')
            },
        )

        new_devname = vdev[0].replace('/dev/', '')

        job.set_progress(30, 'Replacing disk')
        try:
            await self.middleware.call('zfs.pool.replace', pool['name'],
                                       options['label'], new_devname)
            # If we are replacing a faulted disk, kick it right after replace
            # is initiated.
            try:
                vdev = await self.middleware.call(
                    'zfs.pool.get_vdev',
                    pool['name'],
                    options['label'],
                )
                if vdev['status'] not in ('ONLINE', 'DEGRADED'):
                    await self.middleware.call('zfs.pool.detach', pool['name'],
                                               options['label'])
            except Exception:
                self.logger.warn('Failed to detach device', exc_info=True)
        except Exception as e:
            if osc.IS_FREEBSD:
                try:
                    # If replace has failed lets detach geli to not keep disk busy
                    await self.middleware.call('disk.geli_detach_single',
                                               new_devname)
                except Exception:
                    self.logger.warn(f'Failed to geli detach {new_devname}',
                                     exc_info=True)
            raise e
        finally:
            # Needs to happen even if replace failed to put back disk that had been
            # removed from swap prior to replacement
            asyncio.ensure_future(self.middleware.call('disk.swaps_configure'))

        if osc.IS_FREEBSD:
            await self.middleware.call('pool.save_encrypteddisks', oid,
                                       enc_disks, {disk['devname']: disk})

        if old_disk:
            await self.middleware.call('disk.copy_settings', old_disk, disk)

        return True
Beispiel #7
0
class RsyncModService(SharingService):

    share_task_type = 'Rsync Module'

    class Config:
        datastore = 'services.rsyncmod'
        datastore_prefix = 'rsyncmod_'
        datastore_extend = 'rsyncmod.rsync_mod_extend'
        cli_namespace = 'service.rsync_mod'

    ENTRY = Patch(
        'rsyncmod_create',
        'rsyncmod_entry',
        ('add', Bool('locked')),
        ('add', Int('id')),
    )

    @private
    async def rsync_mod_extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        data['mode'] = data['mode'].upper()
        return data

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        await self.validate_path_field(data, schema_name, verrors)

        for entity in ('user', 'group'):
            value = data.get(entity)
            try:
                await self.middleware.call(f'{entity}.get_{entity}_obj',
                                           {f'{entity}name': value})
            except Exception:
                verrors.add(f'{schema_name}.{entity}',
                            f'Please specify a valid {entity}')

        verrors.check()

        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data['mode'] = data['mode'].lower()

        return data

    @accepts(
        Dict(
            'rsyncmod_create',
            Bool('enabled', default=True),
            Str('name', validators=[Match(r'[^/\]]')]),
            Str('comment'),
            Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
            Str('mode', enum=['RO', 'RW', 'WO'], required=True),
            Int('maxconn'),
            Str('user', default='nobody'),
            Str('group', default='nobody'),
            List('hostsallow', items=[Str('hostsallow')]),
            List('hostsdeny', items=[Str('hostdeny')]),
            Str('auxiliary', max_length=None),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsyncmod module.

        `path` represents the path to a dataset. Path length is limited to 1023 characters maximum as per the limit
        enforced by FreeBSD. It is possible that we reach this max length recursively while transferring data. In that
        case, the user must ensure the maximum path will not be too long or modify the recursed path to shorter
        than the limit.

        `maxconn` is an integer value representing the maximum number of simultaneous connections. Zero represents
        unlimited.

        `hostsallow` is a list of patterns to match hostname/ip address of a connecting client. If list is empty,
        all hosts are allowed.

        `hostsdeny` is a list of patterns to match hostname/ip address of a connecting client. If the pattern is
        matched, access is denied to the client. If no client should be denied, this should be left empty.

        `auxiliary` attribute can be used to pass on any additional parameters from rsyncd.conf(5).
        """

        data = await self.common_validation(data, 'rsyncmod_create')

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self._get_instance(data['id'])

    @accepts(Int('id'),
             Patch('rsyncmod_create', 'rsyncmod_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsyncmod module of `id`.
        """
        module = await self.get_instance(id)
        module.update(data)

        module = await self.common_validation(module, 'rsyncmod_update')
        module.pop(self.locked_field)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, module,
                                   {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete Rsyncmod module of `id`.
        """
        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)
Beispiel #8
0
class NFSService(SystemServiceService):
    class Config:
        service = "nfs"
        service_verb = "restart"
        datastore_prefix = "nfs_srv_"
        datastore_extend = 'nfs.nfs_extend'

    @private
    async def nfs_extend(self, nfs):
        keytab_has_nfs = await self.middleware.call(
            "kerberos.keytab.has_nfs_principal")
        nfs["v4_krb_enabled"] = (nfs["v4_krb"] or keytab_has_nfs)
        nfs["userd_manage_gids"] = nfs.pop("16")
        return nfs

    @private
    async def nfs_compress(self, nfs):
        nfs.pop("v4_krb_enabled")
        nfs["16"] = nfs.pop("userd_manage_gids")
        return nfs

    @accepts()
    async def bindip_choices(self):
        """
        Returns ip choices for NFS service to use
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use', {
                'static': True,
                'any': True
            })
        }

    @private
    async def bindip(self, config):
        bindip = config['bindip']
        if osc.IS_LINUX:
            bindip = bindip[:1]

        if bindip:
            found = False
            for iface in await self.middleware.call('interface.query'):
                for alias in iface['state']['aliases']:
                    if alias['address'] in bindip:
                        found = True
                        break
                if found:
                    break
        else:
            found = True

        if found:
            await self.middleware.call('alert.oneshot_delete',
                                       'NFSBindAddress', None)

            return bindip
        else:
            if await self.middleware.call('cache.has_key',
                                          'interfaces_are_set_up'):
                await self.middleware.call('alert.oneshot_create',
                                           'NFSBindAddress', None)

            return []

    @accepts(
        Dict('nfs_update',
             Int('servers', validators=[Range(min=1, max=256)]),
             Bool('udp'),
             Bool('allow_nonroot'),
             Bool('v4'),
             Bool('v4_v3owner'),
             Bool('v4_krb'),
             Str('v4_domain'),
             List('bindip', items=[IPAddr('ip')]),
             Int('mountd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpcstatd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Int('rpclockd_port',
                 null=True,
                 validators=[Range(min=1, max=65535)]),
             Bool('userd_manage_gids'),
             Bool('mountd_log'),
             Bool('statd_lockd_log'),
             update=True))
    async def do_update(self, data):
        """
        Update NFS Service Configuration.

        `servers` represents number of servers to create.

        When `allow_nonroot` is set, it allows non-root mount requests to be served.

        `bindip` is a list of IP's on which NFS will listen for requests. When it is unset/empty, NFS listens on
        all available addresses.

        `v4` when set means that we switch from NFSv3 to NFSv4.

        `v4_v3owner` when set means that system will use NFSv3 ownership model for NFSv4.

        `v4_krb` will force NFS shares to fail if the Kerberos ticket is unavailable.

        `v4_domain` overrides the default DNS domain name for NFSv4.

        `mountd_port` specifies the port mountd(8) binds to.

        `rpcstatd_port` specifies the port rpc.statd(8) binds to.

        `rpclockd_port` specifies the port rpclockd_port(8) binds to.

        .. examples(websocket)::

          Update NFS Service Configuration to listen on 192.168.0.10 and use NFSv4

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.resilver.update",
                "params": [{
                    "bindip": [
                        "192.168.0.10"
                    ],
                    "v4": true
                }]
            }
        """
        if data.get("v4") is False:
            data.setdefault("v4_v3owner", False)

        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        keytab_has_nfs = await self.middleware.call(
            "kerberos.keytab.has_nfs_principal")
        new_v4_krb_enabled = new["v4_krb"] or keytab_has_nfs

        if new["v4"] and new_v4_krb_enabled and not await self.middleware.call(
                "system.is_freenas"):
            if await self.middleware.call("failover.licensed"):
                gc = await self.middleware.call("datastore.config",
                                                "network.globalconfiguration")
                if not gc["gc_hostname_virtual"] or not gc["gc_domain"]:
                    verrors.add(
                        "nfs_update.v4",
                        "Enabling kerberos authentication on TrueNAS HA requires setting the virtual hostname and "
                        "domain")

        if osc.IS_LINUX:
            if len(new['bindip']) > 1:
                verrors.add(
                    'nfs_update.bindip',
                    'Listening on more than one address is not supported')
        bindip_choices = await self.bindip_choices()
        for i, bindip in enumerate(new['bindip']):
            if bindip not in bindip_choices:
                verrors.add(f'nfs_update.bindip.{i}',
                            'Please provide a valid ip address')

        if new["v4"] and new_v4_krb_enabled and await self.middleware.call(
                'activedirectory.get_state') != "DISABLED":
            """
            In environments with kerberized NFSv4 enabled, we need to tell winbindd to not prefix
            usernames with the short form of the AD domain. Directly update the db and regenerate
            the smb.conf to avoid having a service disruption due to restarting the samba server.
            """
            if await self.middleware.call('smb.get_smb_ha_mode') == 'LEGACY':
                raise ValidationError(
                    'nfs_update.v4',
                    'Enabling kerberos authentication on TrueNAS HA requires '
                    'the system dataset to be located on a data pool.')
            ad = await self.middleware.call('activedirectory.config')
            await self.middleware.call('datastore.update',
                                       'directoryservice.activedirectory',
                                       ad['id'],
                                       {'ad_use_default_domain': True})
            await self.middleware.call('etc.generate', 'smb')
            await self.middleware.call('service.reload', 'cifs')

        if not new["v4"] and new["v4_v3owner"]:
            verrors.add("nfs_update.v4_v3owner",
                        "This option requires enabling NFSv4")

        if new["v4_v3owner"] and new["userd_manage_gids"]:
            verrors.add(
                "nfs_update.userd_manage_gids",
                "This option is incompatible with NFSv3 ownership model for NFSv4"
            )

        if not new["v4"] and new["v4_domain"]:
            verrors.add("nfs_update.v4_domain",
                        "This option does not apply to NFSv3")

        if verrors:
            raise verrors

        await self.nfs_compress(new)

        await self._update_service(old, new)

        await self.nfs_extend(new)

        return new
Beispiel #9
0
class DiskService(Service):

    RE_IDENTIFIER = re.compile(r'^\{(?P<type>.+?)\}(?P<value>.+)$')

    @private
    @accepts(Str('name'), Dict('disks', additional_attrs=True))
    async def device_to_identifier(self, name, disks):
        """
        Given a device `name` (e.g. sda) returns an unique identifier string
        for this device.
        This identifier is in the form of {type}string, "type" can be one of
        the following:
          - serial_lunid - for disk serial concatenated with the lunid
          - serial - disk serial
          - uuid - uuid of a ZFS GPT partition
          - label - label name from geom label
          - devicename - name of the device if any other could not be used/found

        `disks` is value returned by `device.get_disks`. This can be passed to avoid collecting system
        data again if the consumer already has it.
        Returns:
            str - identifier
        """
        disks = disks or await self.middleware.call('device.get_disks')
        if name not in disks:
            return ''
        else:
            block_device = disks[name]

        if block_device['serial_lunid']:
            return f'{{serial_lunid}}{block_device["serial_lunid"]}'
        elif block_device['serial']:
            return f'{{serial}}{block_device["serial"]}'

        dev = pyudev.Devices.from_name(pyudev.Context(), 'block', name)
        for partition in filter(
                lambda p: all(
                    p.get(k)
                    for k in ('ID_PART_ENTRY_TYPE', 'ID_PART_ENTRY_UUID')),
                dev.children):
            if partition[
                    'ID_PART_ENTRY_TYPE'] not in await self.middleware.call(
                        'disk.get_valid_zfs_partition_type_uuids'):
                continue
            return f'{{uuid}}{partition["ID_PART_ENTRY_UUID"]}'

        return f'{{devicename}}{name}'

    @private
    async def identifier_to_device(self, ident, disks):
        if not ident:
            return None

        search = self.RE_IDENTIFIER.search(ident)
        if not search:
            return None

        tp = search.group('type')
        value = search.group('value')
        mapping = {
            'uuid': 'uuid',
            'devicename': 'name',
            'serial_lunid': 'serial_lunid',
            'serial': 'serial'
        }
        if tp not in mapping:
            return None
        elif tp == 'uuid':
            partition = await self.middleware.call(
                'disk.list_all_partitions', [['partition_uuid', '=', value]])
            if partition:
                return partition[0]['disk']
        else:
            disk = next((b for b in (disks or await self.middleware.call(
                'device.get_disks')).values() if b[mapping[tp]] == value),
                        None)
            return disk['name'] if disk else None
Beispiel #10
0
class ImageService(CRUDService):
    class Config:
        namespace = 'webui.image'
        datastore = 'system.filesystem'
        datastore_extend = 'webui.image.url_extend'
        cli_private = True

    @private
    async def url_extend(self, image):
        """
        Adds the URL field to the image which is /images/ID
        """
        image["url"] = f"/images/{image['id']}.png"

        return image

    @accepts(Dict("options", Str("identifier")))
    @job(pipes=["input"])
    async def do_create(self, job, options):
        """
        Create a new database entry with identifier as the tag, all entries are
        lowercased

        Then puts the file in the /var/db/system/webui/images directory
        """
        identifier = options.get('identifier')
        self.__ensure_dir()

        try:
            id = await self.middleware.call('datastore.insert',
                                            'system.filesystem',
                                            {'identifier': identifier.lower()})
        except IntegrityError as e:
            # Likely a duplicate entry
            raise CallError(e)

        final_location = f"/var/db/system/webui/images/{id}.png"
        put_job = await self.middleware.call(
            'filesystem.put',
            final_location, {"mode": 0o755},
            pipes=Pipes(input=job.pipes.input))
        await put_job.wait()

        return id

    @accepts(Int("id"))
    def do_delete(self, id):
        """
        Remove the database entry, and then the item if it exists
        """
        self.__ensure_dir()
        item = f"/var/db/system/webui/images/{id}.png"

        self.middleware.call_sync('datastore.delete', 'system.filesystem', id)

        if os.path.exists(item):
            os.remove(item)

        return True

    def __ensure_dir(self):
        """
        Ensure that the images directory exists
        """
        dirname = "/var/db/system/webui/images"
        if not os.path.isdir(dirname):
            if os.path.exists(dirname):
                # This is an imposter! Nuke it.
                os.remove(dirname)

        if not os.path.exists(dirname):
            os.makedirs(dirname)
Beispiel #11
0
class SharingNFSService(SharingService):

    path_field = 'paths'
    share_task_type = 'NFS'

    class Config:
        namespace = "sharing.nfs"
        datastore = "sharing.nfs_share"
        datastore_prefix = "nfs_"
        datastore_extend = "sharing.nfs.extend"

    async def human_identifier(self, share_task):
        return ', '.join(share_task[self.path_field])

    @private
    async def sharing_task_determine_locked(self, data, locked_datasets):
        for path in data[self.path_field]:
            if await self.middleware.call(
                    'pool.dataset.path_in_locked_datasets', path,
                    locked_datasets):
                return True
        else:
            return False

    @accepts(
        Dict(
            "sharingnfs_create",
            List("paths", items=[Dir("path")], required=True, empty=False),
            List("aliases",
                 items=[Str("path", validators=[Match(r"^/.*")])],
                 default=[]),
            Str("comment", default=""),
            List("networks",
                 items=[IPAddr("network", network=True)],
                 default=[]),
            List("hosts", items=[Str("host")], default=[]),
            Bool("alldirs", default=False),
            Bool("ro", default=False),
            Bool("quiet", default=False),
            Str("maproot_user", required=False, default=None, null=True),
            Str("maproot_group", required=False, default=None, null=True),
            Str("mapall_user", required=False, default=None, null=True),
            Str("mapall_group", required=False, default=None, null=True),
            List(
                "security",
                default=[],
                items=[
                    Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"])
                ],
            ),
            Bool("enabled", default=True),
            register=True,
            strict=True,
        ))
    async def do_create(self, data):
        """
        Create a NFS Share.

        `paths` is a list of valid paths which are configured to be shared on this share.

        `aliases` is a list of aliases for each path (or an empty list if aliases are not used).

        `networks` is a list of authorized networks that are allowed to access the share having format
        "network/mask" CIDR notation. If empty, all networks are allowed.

        `hosts` is a list of IP's/hostnames which are allowed to access the share. If empty, all IP's/hostnames are
        allowed.

        `alldirs` is a boolean value which when set indicates that the client can mount any subdirectories of the
        selected pool or dataset.
        """
        verrors = ValidationErrors()

        await self.validate(data, "sharingnfs_create", verrors)

        if verrors:
            raise verrors

        await self.compress(data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        await self.extend(data)

        await self._service_change("nfs", "reload")

        return await self.get_instance(data["id"])

    @accepts(Int("id"),
             Patch("sharingnfs_create", "sharingnfs_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update NFS Share of `id`.
        """
        verrors = ValidationErrors()
        old = await self.get_instance(id)

        new = old.copy()
        new.update(data)

        await self.validate(new, "sharingnfs_update", verrors, old=old)

        if verrors:
            raise verrors

        await self.compress(new)
        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {"prefix": self._config.datastore_prefix})

        await self._service_change("nfs", "reload")

        return await self.get_instance(id)

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Delete NFS Share of `id`.
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
        await self._service_change("nfs", "reload")

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        if len(data["aliases"]):
            if not osc.IS_LINUX:
                verrors.add(
                    f"{schema_name}.aliases",
                    "This field is only supported on SCALE",
                )

            if len(data["aliases"]) != len(data["paths"]):
                verrors.add(
                    f"{schema_name}.aliases",
                    "This field should be either empty of have the same number of elements as paths",
                )

        if data["alldirs"] and len(data["paths"]) > 1:
            verrors.add(
                f"{schema_name}.alldirs",
                "This option can only be used for shares that contain single path"
            )

        # need to make sure that the nfs share is within the zpool mountpoint
        for idx, i in enumerate(data["paths"]):
            await check_path_resides_within_volume(
                verrors, self.middleware, f'{schema_name}.paths.{idx}', i)

        await self.middleware.run_in_thread(self.validate_paths, data,
                                            schema_name, verrors)

        filters = []
        if old:
            filters.append(["id", "!=", old["id"]])
        other_shares = await self.middleware.call("sharing.nfs.query", filters)
        dns_cache = await self.resolve_hostnames(
            sum([share["hosts"]
                 for share in other_shares], []) + data["hosts"])
        await self.middleware.run_in_thread(self.validate_hosts_and_networks,
                                            other_shares, data, schema_name,
                                            verrors, dns_cache)

        for k in ["maproot", "mapall"]:
            if not data[f"{k}_user"] and not data[f"{k}_group"]:
                pass
            elif not data[f"{k}_user"] and data[f"{k}_group"]:
                verrors.add(
                    f"{schema_name}.{k}_user",
                    "This field is required when map group is specified")
            else:
                user = group = None
                with contextlib.suppress(KeyError):
                    user = await self.middleware.call(
                        'dscache.get_uncached_user', data[f'{k}_user'])

                if not user:
                    verrors.add(f"{schema_name}.{k}_user", "User not found")

                if data[f'{k}_group']:
                    with contextlib.suppress(KeyError):
                        group = await self.middleware.call(
                            'dscache.get_uncached_group', data[f'{k}_group'])

                    if not group:
                        verrors.add(f"{schema_name}.{k}_group",
                                    "Group not found")

        if data["maproot_user"] and data["mapall_user"]:
            verrors.add(f"{schema_name}.mapall_user",
                        "maproot_user disqualifies mapall_user")

        if data["security"]:
            nfs_config = await self.middleware.call("nfs.config")
            if not nfs_config["v4"]:
                verrors.add(f"{schema_name}.security",
                            "This is not allowed when NFS v4 is disabled")

    @private
    def validate_paths(self, data, schema_name, verrors):
        if osc.IS_LINUX:
            # Ganesha does not have such a restriction, each path is a different share
            return

        dev = None
        for i, path in enumerate(data["paths"]):
            stat = os.stat(path)
            if dev is None:
                dev = stat.st_dev
            else:
                if dev != stat.st_dev:
                    verrors.add(
                        f'{schema_name}.paths.{i}',
                        'Paths for a NFS share must reside within the same filesystem'
                    )

    @private
    async def resolve_hostnames(self, hostnames):
        hostnames = list(set(hostnames))

        async def resolve(hostname):
            try:
                return (await asyncio.wait_for(
                    self.middleware.run_in_thread(socket.getaddrinfo, hostname,
                                                  None), 5))[0][4][0]
            except Exception as e:
                self.logger.warning("Unable to resolve host %r: %r", hostname,
                                    e)
                return None

        resolved_hostnames = await asyncio_map(resolve, hostnames, 8)

        return dict(zip(hostnames, resolved_hostnames))

    @private
    def validate_hosts_and_networks(self, other_shares, data, schema_name,
                                    verrors, dns_cache):
        dev = os.stat(data["paths"][0]).st_dev

        used_networks = set()
        for share in other_shares:
            try:
                share_dev = os.stat(share["paths"][0]).st_dev
            except Exception:
                self.logger.warning("Failed to stat first path for %r",
                                    share,
                                    exc_info=True)
                continue

            if share_dev == dev:
                for host in share["hosts"]:
                    host = dns_cache[host]
                    if host is None:
                        continue

                    try:
                        network = ipaddress.ip_network(host)
                    except Exception:
                        self.logger.warning("Got invalid host %r", host)
                        continue
                    else:
                        used_networks.add(network)

                for network in share["networks"]:
                    try:
                        network = ipaddress.ip_network(network, strict=False)
                    except Exception:
                        self.logger.warning("Got invalid network %r", network)
                        continue
                    else:
                        used_networks.add(network)

                if not share["hosts"] and not share["networks"]:
                    used_networks.add(ipaddress.ip_network("0.0.0.0/0"))
                    used_networks.add(ipaddress.ip_network("::/0"))

        for host in set(data["hosts"]):
            host = dns_cache[host]
            if host is None:
                continue

            network = ipaddress.ip_network(host)
            if network in used_networks:
                verrors.add(
                    f"{schema_name}.hosts",
                    f"Another NFS share already exports this dataset for {host}"
                )

            used_networks.add(network)

        for network in set(data["networks"]):
            network = ipaddress.ip_network(network, strict=False)

            if network in used_networks:
                verrors.add(
                    f"{schema_name}.networks",
                    f"Another NFS share already exports this dataset for {network}"
                )

            used_networks.add(network)

        if not data["hosts"] and not data["networks"]:
            if used_networks:
                verrors.add(
                    f"{schema_name}.networks",
                    "Another NFS share already exports this dataset for some network"
                )

    @private
    async def extend(self, data):
        data["networks"] = data.pop("network").split()
        data["hosts"] = data["hosts"].split()
        data["security"] = [s.upper() for s in data["security"]]
        return data

    @private
    async def compress(self, data):
        data["network"] = " ".join(data.pop("networks"))
        data["hosts"] = " ".join(data["hosts"])
        data["security"] = [s.lower() for s in data["security"]]
        data.pop(self.locked_field, None)
        return data
Beispiel #12
0
class KubernetesSnapshotService(CRUDService):

    GROUP = 'snapshot.storage.k8s.io'
    PLURAL = 'volumesnapshots'
    VERSION = 'v1'

    class Config:
        namespace = 'k8s.volume.snapshot'
        private = True

    @filterable
    async def query(self, filters, options):
        async with api_client() as (api, context):
            return filter_list(
                list(
                    itertools.chain(*[[
                        d for d in (await context['custom_object_api'].
                                    list_namespaced_custom_object(
                                        group=self.GROUP,
                                        version=self.VERSION,
                                        plural=self.PLURAL,
                                        namespace=namespace))['items']
                    ] for namespace in await self.middleware.call(
                        'k8s.namespace.namespace_names')])), filters, options)

    @accepts(
        Dict(
            'zfs_snapshot_create',
            Str('namespace', required=True),
            Dict(
                'metadata',
                Str('name', required=True),
                additional_attrs=True,
            ),
            Dict(
                'spec',
                Str('volumeSnapshotClassName', required=True),
                Dict(
                    'source',
                    Str('persistentVolumeClaimName', required=True),
                ),
                additional_attrs=True,
            ),
            additional_attrs=True,
        ))
    async def do_create(self, data):
        data.update({
            'kind': 'VolumeSnapshot',
            'apiVersion': f'snapshot.storage.k8s.io/{self.VERSION}'
        })
        namespace = data.pop('namespace')
        verrors = ValidationErrors()

        if not await self.middleware.call(
                'k8s.zfs.snapshotclass.query',
            [['metadata.name', '=', data['spec']['volumeSnapshotClassName']]]):
            verrors.add('zfs_snapshot_create.spec.volumeSnapshotClassName',
                        'Specified volumeSnapshotClassName does not exist.')

        if not await self.middleware.call('k8s.pvc.query', [[
                'metadata.name', '=',
                data['spec']['source']['persistentVolumeClaimName']
        ], ['metadata.namespace', '=', namespace]]):
            verrors.add(
                'zfs_snapshot_create.spec.source.persistentVolumeClaimName',
                f'Specified persistentVolumeClaimName does not exist in {namespace}.'
            )

        verrors.check()

        async with api_client() as (api, context):
            await context['custom_object_api'].create_namespaced_custom_object(
                group=self.GROUP,
                version=self.VERSION,
                plural=self.PLURAL,
                namespace=namespace,
                body=data)

    @accepts(Str('snapshot_name'),
             Dict(
                 'zfs_snapshot_delete',
                 Str('namespace', required=True),
             ))
    async def do_delete(self, snapshot_name, options):
        async with api_client() as (api, context):
            await context['custom_object_api'].delete_namespaced_custom_object(
                group=self.GROUP,
                version=self.VERSION,
                plural=self.PLURAL,
                namespace=options['namespace'],
                name=snapshot_name,
            )
Beispiel #13
0
class ACLTemplateService(CRUDService):

    class Config:
        cli_namespace = 'filesystem.acltemplate'
        datastore = 'filesystem.acltemplate'
        datastore_prefix = 'acltemplate_'
        namespace = 'filesystem.acltemplate'

    ENTRY = Patch(
        'acltemplate_create', 'acltemplate_entry',
        ('add', Int('id')),
        ('add', Bool('builtin')),
    )

    @private
    async def validate_acl(self, data, schema, verrors):
        acltype = ACLType[data['acltype']]
        aclcheck = acltype.validate({'dacl': data['acl']})
        if not aclcheck['is_valid']:
            for err in aclcheck['errors']:
                if err[2]:
                    v = f'{schema}.{err[0]}.{err[2]}'
                else:
                    v = f'{schema}.{err[0]}'

                verrors.add(v, err[1])

        if acltype is ACLType.POSIX1E:
            await self.middleware.call(
                "filesystem.gen_aclstring_posix1e",
                copy.deepcopy(data["acl"]), False, verrors
            )

        for idx, ace in enumerate(data['acl']):
            if ace['id'] is None:
                verrors.add(f'{schema}.{idx}.id', 'null id is not permitted.')

    @accepts(Dict(
        "acltemplate_create",
        Str("name", required=True),
        Str("acltype", required=True, enum=["NFS4", "POSIX1E"]),
        OROperator(Ref('nfs4_acl'), Ref('posix1e_acl'), name='acl', required=True),
        register=True
    ))
    async def do_create(self, data):
        """
        Create a new filesystem ACL template.
        """
        verrors = ValidationErrors()
        if len(data['acl']) == 0:
            verrors.add(
                "filesystem_acltemplate_create.acl",
                "At least one ACL entry must be specified."
            )
        await self.validate_acl(data, "filesystem_acltemplate_create.acl", verrors)
        verrors.check()
        data['builtin'] = False

        data['id'] = await self.middleware.call(
            'datastore.insert',
            self._config.datastore,
            data,
            {'prefix': self._config.datastore_prefix}
        )
        return await self._get_instance(data['id'])

    @accepts(
        Int('id'),
        Patch(
            'acltemplate_create',
            'acltemplate_update',
            ('attr', {'update': True})
        )
    )
    async def do_update(self, id, data):
        """
        update filesystem ACL template with `id`.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        if old['builtin']:
            verrors.add("filesystem_acltemplate_update.builtin",
                        "built-in ACL templates may not be changed")

        if new['name'] != old['name']:
            name_exists = bool(await self.query([('name', '=', new['name'])]))
            if name_exists:
                verrors.add("filesystem_acltemplate_update.name",
                            f"{data['name']}: name is not unique")

        if len(new['acl']) == 0:
            verrors.add(
                "filesystem_acltemplate_update.acl",
                "At least one ACL entry must be specified."
            )
        await self.validate_acl(new, "filesystem_acltemplate_update.acl", verrors)
        verrors.check()

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            id,
            new,
            {'prefix': self._config.datastore_prefix}
        )
        return await self.get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        entry = await self.get_instance(id)
        if entry['builtin']:
            raise CallError("Deletion of builtin templates is not permitted",
                            errno.EPERM)

        return await self.middleware.call(
            'datastore.delete', self._config.datastore, id
        )

    @private
    async def append_builtins(self, data):
        """
        This method ensures that ACL grants some minimum level of permissions
        to our builtin users or builtin admins accounts.
        """
        bu_id = int(SMBBuiltin.USERS.value[1][9:])
        ba_id = int(SMBBuiltin.USERS.value[1][9:])
        has_builtins = any(filter(lambda x: x["id"] in [bu_id, ba_id], data['acl']))
        if has_builtins:
            return

        if data['acltype'] == ACLType.NFS4.name:
            data['acl'].extend([
                {"tag": "GROUP", "id": bu_id, "perms": {"BASIC": "MODIFY"}, "flags": {"BASIC": "INHERIT"}, "type": "ALLOW"},
                {"tag": "GROUP", "id": ba_id, "perms": {"BASIC": "FULL_CONTROL"}, "flags": {"BASIC": "INHERIT"}, "type": "ALLOW"},
            ])
            return

        has_default_mask = any(filter(lambda x: x["tag"] == "MASK" and x["default"], data['acl']))
        has_access_mask = any(filter(lambda x: x["tag"] == "MASK" and x["default"], data['acl']))
        all_perms = {"READ": True, "WRITE": True, "EXECUTE": True}
        data['acl'].extend([
            {"tag": "GROUP", "id": bu_id, "perms": all_perms, "default": False},
            {"tag": "GROUP", "id": bu_id, "perms": all_perms, "default": True},
            {"tag": "GROUP", "id": ba_id, "perms": all_perms, "default": False},
            {"tag": "GROUP", "id": ba_id, "perms": all_perms, "default": True},
        ])

        if not has_default_mask:
            data['acl'].append({"tag": "MASK", "id": -1, "perms": all_perms, "default": False})

        if not has_access_mask:
            data['acl'].append({"tag": "MASK", "id": -1, "perms": all_perms, "default": True})

        return

    @private
    async def resolve_names(self, uid, gid, data):
        for ace in data['acl']:
            if ace['id'] != -1:
                ace['who'] = await self.middleware.call(
                    'idmap.id_to_name', ace['id'], ace['tag']
                )
            elif ace['tag'] in ('group@', 'GROUP_OBJ'):
                ace['who'] = await self.middleware.call(
                    'idmap.id_to_name', gid, 'GROUP'
                )
            elif ace['tag'] in ('owner@', 'USER_OBJ'):
                ace['who'] = await self.middleware.call(
                    'idmap.id_to_name', uid, 'USER'
                )
            else:
                ace['who'] = None

        return

    @accepts(Dict(
        "acltemplate_by_path",
        Str("path", default=""),
        Ref('query-filters'),
        Ref('query-options'),
        Dict(
            "format-options",
            Bool("canonicalize", default=False),
            Bool("ensure_builtins", default=False),
            Bool("resolve_names", default=False),
        )
    ))
    @returns(List(
        'templates',
        items=[Ref('acltemplate_entry')]
    ))
    async def by_path(self, data):
        """
        Retrieve list of available ACL templates for a given `path`.

        Supports `query-filters` and `query-options`.
        `format-options` gives additional options to alter the results of
        the template query:

        `canonicalize` - place ACL entries for NFSv4 ACLs in Microsoft canonical order.
        `ensure_builtins` - ensure all results contain entries for `builtin_users` and `builtin_administrators`
        groups.
        `resolve_names` - convert ids in ACL entries into names.
        """
        verrors = ValidationErrors()
        filters = data.get('query-filters')
        if data['path']:
            path = await self.middleware.call(
                "filesystem.resolve_cluster_path", data['path']
            )
            acltype = await self.middleware.call(
                'filesystem.path_get_acltype', path
            )
            if acltype == ACLType.DISABLED.name:
                return []

            if acltype == ACLType.POSIX1E.name and data['format-options']['canonicalize']:
                verrors.add(
                    "filesystem.acltemplate_by_path.format-options.canonicalize",
                    "POSIX1E ACLs may not be sorted into Windows canonical order."
                )
            filters.append(("acltype", "=", acltype))

        if not data['path'] and data['format-options']['resolve_names']:
            verrors.add(
                "filesystem.acltemplate_by_path.format-options.canonicalize",
                "ACL entry ids may not be resolved into names unless path is provided."
            )

        verrors.check()

        templates = await self.query(filters, data['query-options'])
        for t in templates:
            if data['format-options']['ensure_builtins']:
                await self.append_builtins(t)

            if data['format-options']['resolve_names']:
                st = await self.middleware.run_in_thread(os.stat(path))
                await self.resolve_names(st.st_uid, st.st_gid, t)

            if data['format-options']['canonicalize'] and t['acltype'] == ACLType.NFS4.name:
                canonicalized = ACLType[t['acltype']].canonicalize(t['acl'])
                t['acl'] = canonicalized

        return templates
Beispiel #14
0
class UpdateService(Service):
    @accepts()
    def get_trains(self):
        """
        Returns available trains dict and the currently configured train as well as the
        train of currently booted environment.
        """
        data = self.middleware.call_sync('datastore.config', 'system.update')
        conf = Configuration.Configuration()
        conf.LoadTrainsConfig()

        selected = None
        trains = {}
        for name, descr in (conf.AvailableTrains() or {}).items():
            train = conf._trains.get(name)
            if train is None:
                train = Train.Train(name, descr)
            if not selected and data['upd_train'] == train.Name():
                selected = data['upd_train']
            trains[train.Name()] = {
                'description': train.Description(),
                'sequence': train.LastSequence(),
            }
        if not data['upd_train'] or not selected:
            selected = conf.CurrentTrain()
        return {
            'trains': trains,
            'current': conf.CurrentTrain(),
            'selected': selected,
        }

    @accepts(
        Dict(
            'update-check-available',
            Str('train', required=False),
            required=False,
        ))
    def check_available(self, attrs=None):
        """
        Checks if there is an update available from update server.

        status:
          - REBOOT_REQUIRED: an update has already been applied
          - AVAILABLE: an update is available
          - UNAVAILABLE: no update available

        .. examples(websocket)::

          Check available update using default train:

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "update.check_available"
            }
        """

        try:
            applied = self.middleware.call_sync('cache.get', 'update.applied')
        except Exception:
            applied = False
        if applied is True:
            return {'status': 'REBOOT_REQUIRED'}

        train = (attrs or {}).get('train') or self.middleware.call_sync(
            'update.get_trains')['selected']

        handler = CheckUpdateHandler()
        manifest = CheckForUpdates(
            diff_handler=handler.diff_call,
            handler=handler.call,
            train=train,
        )

        if not manifest:
            return {'status': 'UNAVAILABLE'}

        data = {
            'status': 'AVAILABLE',
            'changes': handler.changes,
            'notice': manifest.Notice(),
            'notes': manifest.Notes(),
        }

        conf = Configuration.Configuration()
        sys_mani = conf.SystemManifest()
        if sys_mani:
            sequence = sys_mani.Sequence()
        else:
            sequence = ''
        data['changelog'] = get_changelog(train,
                                          start=sequence,
                                          end=manifest.Sequence())

        data['version'] = manifest.Version()
        return data

    @accepts(Str('path'))
    async def get_pending(self, path=None):
        """
        Gets a list of packages already downloaded and ready to be applied.
        Each entry of the lists consists of type of operation and name of it, e.g.

          {
            "operation": "upgrade",
            "name": "baseos-11.0 -> baseos-11.1"
          }
        """
        if path is None:
            path = await self.middleware.call('notifier.get_update_location')
        data = []
        try:
            changes = await self.middleware.run_in_thread(
                Update.PendingUpdatesChanges, path)
        except (
                UpdateIncompleteCacheException,
                UpdateInvalidCacheException,
                UpdateBusyCacheException,
        ):
            changes = []
        if changes:
            if changes.get("Reboot", True) is False:
                for svc in changes.get("Restart", []):
                    data.append({
                        'operation': svc,
                        'name': Update.GetServiceDescription(svc),
                    })
            for new, op, old in changes['Packages']:
                if op == 'upgrade':
                    name = '%s-%s -> %s-%s' % (
                        old.Name(),
                        old.Version(),
                        new.Name(),
                        new.Version(),
                    )
                elif op == 'install':
                    name = '%s-%s' % (new.Name(), new.Version())
                else:
                    # Its unclear why "delete" would feel out new
                    # instead of old, sounds like a pkgtools bug?
                    if old:
                        name = '%s-%s' % (old.Name(), old.Version())
                    else:
                        name = '%s-%s' % (new.Name(), new.Version())

                data.append({
                    'operation': op,
                    'name': name,
                })
        return data

    @accepts(
        Dict(
            'update',
            Str('train', required=False),
            Bool('reboot', default=False),
            required=False,
        ))
    @job(lock='update', process=True)
    async def update(self, job, attrs=None):
        """
        Downloads (if not already in cache) and apply an update.
        """
        attrs = attrs or {}
        train = attrs.get('train') or (
            await self.middleware.call('update.get_trains'))['selected']
        location = await self.middleware.call('notifier.get_update_location')

        job.set_progress(0, 'Retrieving update manifest')

        handler = UpdateHandler(self, job)

        update = Update.DownloadUpdate(
            train,
            location,
            check_handler=handler.check_handler,
            get_handler=handler.get_handler,
        )
        if update is False:
            raise ValueError('No update available')

        new_manifest = Manifest.Manifest(require_signature=True)
        new_manifest.LoadPath('{}/MANIFEST'.format(location))

        Update.ApplyUpdate(
            location,
            install_handler=handler.install_handler,
        )
        await self.middleware.call('cache.put', 'update.applied', True)

        if attrs.get('reboot'):
            await self.middleware.call('system.reboot', {'delay': 10})
        return True

    @accepts()
    @job(lock='updatedownload')
    def download(self, job):
        train = self.middleware.call_sync('update.get_trains')['selected']
        location = self.middleware.call_sync('notifier.get_update_location')

        job.set_progress(0, 'Retrieving update manifest')

        handler = UpdateHandler(self, job, 100)

        Update.DownloadUpdate(
            train,
            location,
            check_handler=handler.check_handler,
            get_handler=handler.get_handler,
        )
        update = Update.CheckForUpdates(train=train, cache_dir=location)

        if not update:
            return False

        notified = False
        try:
            if self.middleware.call_sync('cache.has_key', 'update.notified'):
                notified = self.middleware.call_sync('cache.get',
                                                     'update.notified')
        except Exception:
            pass

        if not notified:
            self.middleware.call_sync('cache.put', 'update.notified', True)
            conf = Configuration.Configuration()
            sys_mani = conf.SystemManifest()
            if sys_mani:
                sequence = sys_mani.Sequence()
            else:
                sequence = ''

            changelog = get_changelog(train,
                                      start=sequence,
                                      end=update.Sequence())
            hostname = socket.gethostname()

            try:
                # FIXME: Translation
                self.middleware.call_sync(
                    'mail.send', {
                        'subject': '{}: {}'.format(hostname,
                                                   'Update Available'),
                        'text':
                        '''A new update is available for the %(train)s train.
Version: %(version)s
Changelog:
%(changelog)s
''' % {
                            'train': train,
                            'version': update.Version(),
                            'changelog': changelog,
                        },
                    }).wait_sync()
            except Exception:
                self.logger.warn('Failed to send email about new update',
                                 exc_info=True)
        return True

    @accepts(Str('path'))
    @job(lock='updatemanual', process=True)
    async def manual(self, job, path):
        """
        Apply manual update of file `path`.
        """
        rv = await self.middleware.call('notifier.validate_update', path)
        if not rv:
            raise CallError('Invalid update file', errno.EINVAL)
        await self.middleware.call('notifier.apply_update', path, timeout=None)
        try:
            await self.middleware.call('notifier.destroy_upload_location')
        except Exception:
            self.logger.warn('Failed to destroy upload location',
                             exc_info=True)

    @accepts(Dict(
        'updatefile',
        Str('destination'),
    ))
    @job(lock='updatemanual', pipes=['input'])
    async def file(self, job, options):
        """
        Updates the system using the uploaded .tar file.

        Use null `destination` to create a temporary location.
        """

        dest = options.get('destination')

        if not dest:
            try:
                await self.middleware.call('notifier.create_upload_location')
                dest = '/var/tmp/firmware'
            except Exception as e:
                raise CallError(str(e))
        elif not dest.startswith('/mnt/'):
            raise CallError('Destination must reside within a pool')

        if not os.path.isdir(dest):
            raise CallError('Destination is not a directory')

        destfile = os.path.join(dest, 'manualupdate.tar')
        dest_extracted = os.path.join(dest, '.update')

        try:
            job.set_progress(10, 'Writing uploaded file to disk')
            with open(destfile, 'wb') as f:
                await self.middleware.run_in_io_thread(
                    shutil.copyfileobj,
                    job.pipes.input.r,
                    f,
                    1048576,
                )

            def do_update():
                try:
                    job.set_progress(30, 'Extracting uploaded file')
                    ExtractFrozenUpdate(destfile, dest_extracted, verbose=True)
                    job.set_progress(50, 'Applying update')
                    ApplyUpdate(dest_extracted)
                except Exception as e:
                    raise CallError(str(e))

            await self.middleware.run_in_io_thread(do_update)

            job.set_progress(95, 'Cleaning up')

        finally:
            if os.path.exists(destfile):
                os.unlink(destfile)

            if os.path.exists(dest_extracted):
                shutil.rmtree(dest_extracted, ignore_errors=True)

        if dest == '/var/tmp/firmware':
            await self.middleware.call('notifier.destroy_upload_location')

        job.set_progress(100, 'Update completed')
Beispiel #15
0
class RsyncTaskService(CRUDService):
    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'

    @private
    async def rsync_task_extend(self, data):
        data['extra'] = list(filter(None, re.split(r"\s+", data["extra"])))
        for field in ('mode', 'direction'):
            data[field] = data[field].upper()
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = None
        with contextlib.suppress(KeyError):
            user = await self.middleware.call('dscache.get_uncached_user',
                                              username)

        if not user:
            verrors.add(f'{schema}.user',
                        f'Provided user "{username}" does not exist')
            raise verrors

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        if data.get('extra'):
            data['extra'] = ' '.join(data['extra'])
        else:
            data['extra'] = ''

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'MODULE' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'SSH':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user['pw_dir'], '.ssh',
                                               'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(
                glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.')
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}')

            if (data.get('validate_rpath') and remote_path and remote_host
                    and remote_port):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.rsplit('@', 1)
                else:
                    remote_username = username

                try:
                    with (await asyncio.wait_for(asyncssh.connect(
                            remote_host,
                            port=remote_port,
                            username=remote_username,
                            client_keys=key_files,
                            known_hosts=None),
                                                 timeout=5)) as conn:

                        await conn.run(f'test -d {shlex.quote(remote_path)}',
                                       check=True)

                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(f'{schema}.remotehost', e.__str__())

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field')
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}')

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__(
                        )
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data.get('validate_rpath'):
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        # Keeping compatibility with legacy UI
        for field in ('mode', 'direction'):
            data[field] = data[field].lower()

        return verrors, data

    @accepts(
        Dict(
            'rsync_task_create',
            Str('path', required=True),
            Str('user', required=True),
            Str('remotehost'),
            Int('remoteport'),
            Str('mode', enum=['MODULE', 'SSH'], default='MODULE'),
            Str('remotemodule'),
            Str('remotepath'),
            Bool('validate_rpath'),
            Str('direction', enum=['PULL', 'PUSH'], default='PUSH'),
            Str('desc'),
            Cron(
                'schedule',
                defaults={'minute': '00'},
            ),
            Bool('recursive'),
            Bool('times'),
            Bool('compress'),
            Bool('archive'),
            Bool('delete'),
            Bool('quiet'),
            Bool('preserveperm'),
            Bool('preserveattr'),
            Bool('delayupdates'),
            List('extra', items=[Str('extra')]),
            Bool('enabled'),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsync Task.

        `path` represents the path to pool/dataset.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data,
                                                       'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self._get_instance(data['id'])

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('rsync_task_create', 'rsync_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsync Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new,
                                                       'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Rsync Task of `id`.
        """
        res = await self.middleware.call('datastore.delete',
                                         self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res

    @private
    async def commandline(self, id):
        """
        Helper method to generate the rsync command avoiding code duplication.
        """
        rsync = await self._get_instance(id)
        path = shlex.quote(rsync['path'])

        line = [
            '/usr/bin/lockf', '-s', '-t', '0', '-k', path,
            '/usr/local/bin/rsync'
        ]
        for name, flag in (
            ('archive', '-a'),
            ('compress', '-z'),
            ('delayupdates', '--delay-updates'),
            ('delete', '--delete-delay'),
            ('preserveattr', '-X'),
            ('preserveperm', '-p'),
            ('recursive', '-r'),
            ('times', '-t'),
        ):
            if rsync[name]:
                line.append(flag)
        if rsync['extra']:
            line.append(' '.join(rsync['extra']))

        # Do not use username if one is specified in host field
        # See #5096 for more details
        if '@' in rsync['remotehost']:
            remote = rsync['remotehost']
        else:
            remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'

        if rsync['mode'] == 'MODULE':
            module_args = [path, f'{remote}::"{rsync["remotemodule"]}"']
            if rsync['direction'] != 'PUSH':
                module_args.reverse()
            line += module_args
        else:
            line += [
                '-e',
                f'"ssh -p {rsync["remoteport"]} -o BatchMode=yes -o StrictHostKeyChecking=yes"'
            ]
            path_args = [
                path, f'{remote}:"{shlex.quote(rsync["remotepath"])}"'
            ]
            if rsync['direction'] != 'PUSH':
                path_args.reverse()
            line += path_args

        if rsync['quiet']:
            line += ['>', '/dev/null', '2>&1']

        return ' '.join(line)

    @item_method
    @accepts(Int('id'))
    @job(lock=lambda args: args[-1], logs=True)
    def run(self, job, id):
        """
        Job to run rsync task of `id`.

        Output is saved to job log excerpt as well as syslog.
        """
        rsync = self.middleware.call_sync('rsynctask._get_instance', id)
        commandline = self.middleware.call_sync('rsynctask.commandline', id)

        cp = run_command_with_user_context(commandline, rsync['user'],
                                           lambda v: job.logs_fd.write(v))

        for klass in ('RsyncSuccess',
                      'RsyncFailed') if not rsync['quiet'] else ():
            self.middleware.call_sync('alert.oneshot_delete', klass,
                                      rsync['id'])

        if cp.returncode != 0:
            if not rsync['quiet']:
                self.middleware.call_sync('alert.oneshot_create',
                                          'RsyncFailed', rsync)

            raise CallError(
                f'rsync command returned {cp.returncode}. Check logs for further information.'
            )
        elif not rsync['quiet']:
            self.middleware.call_sync('alert.oneshot_create', 'RsyncSuccess',
                                      rsync)
Beispiel #16
0
class SharingAFPService(CRUDService):
    class Config:
        namespace = 'sharing.afp'
        datastore = 'sharing.afp_share'
        datastore_prefix = 'afp_'
        datastore_extend = 'sharing.afp.extend'

    @accepts(Dict(
        'sharingafp_create',
        Str('path', required=True),
        Bool('home', default=False),
        Str('name'),
        Str('comment'),
        List('allow', default=[]),
        List('deny', default=[]),
        List('ro', default=[]),
        List('rw', default=[]),
        Bool('timemachine', default=False),
        Int('timemachine_quota', default=0),
        Bool('nodev', default=False),
        Bool('nostat', default=False),
        Bool('upriv', default=True),
        UnixPerm('fperm', default='644'),
        UnixPerm('dperm', default='755'),
        UnixPerm('umask', default='000'),
        List('hostsallow', items=[], default=[]),
        List('hostsdeny', items=[], default=[]),
        Str('vuid', null=True, default=''),
        Str('auxparams', max_length=None),
        Bool('enabled', default=True),
        register=True
    ))
    async def do_create(self, data):
        """
        Create AFP share.

        `allow`, `deny`, `ro`, and `rw` are lists of users and groups. Groups are designated by
        an @ prefix.

        `hostsallow` and `hostsdeny` are lists of hosts and/or networks.
        """
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingafp_create', verrors)
        await self.validate(data, 'sharingafp_create', verrors)

        await check_path_resides_within_volume(
            verrors, self.middleware, 'sharingafp_create.path', path)

        verrors.check()

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(data)
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.extend(data)

        await self._service_change('afp', 'reload')

        return data

    @accepts(
        Int('id'),
        Patch(
            'sharingafp_create',
            'sharingafp_update',
            ('attr', {'update': True})
        )
    )
    async def do_update(self, id, data):
        """
        Update AFP share `id`.
        """
        verrors = ValidationErrors()
        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)],
            {'extend': self._config.datastore_extend,
             'prefix': self._config.datastore_prefix,
             'get': True})
        path = data.get('path')

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingafp_update', verrors, id=id)
        await self.validate(new, 'sharingafp_update', verrors, old=old)

        if path:
            await check_path_resides_within_volume(
                verrors, self.middleware, 'sharingafp_create.path', path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.middleware.call(
            'datastore.update', self._config.datastore, id, new,
            {'prefix': self._config.datastore_prefix})
        await self.extend(new)

        await self._service_change('afp', 'reload')

        return new

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete AFP share `id`.
        """
        result = await self.middleware.call('datastore.delete', self._config.datastore, id)
        await self._service_change('afp', 'reload')
        return result

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        await self.home_exists(data['home'], schema_name, verrors, old)
        if data['vuid']:
            try:
                uuid.UUID(data['vuid'], version=4)
            except ValueError:
                verrors.add(f'{schema_name}.vuid', 'vuid must be a valid UUID.')

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters, {'prefix': self._config.datastore_prefix})

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')

    @private
    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']
        home = data['home']
        name_filters = [('name', '=', name)]
        path_filters = [('path', '=', path)]

        if not name:
            if home:
                name = 'Homes'
            else:
                name = path.rsplit('/', 1)[-1]

        if id is not None:
            name_filters.append(('id', '!=', id))
            path_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore,
            name_filters,
            {'prefix': self._config.datastore_prefix})
        path_result = await self.middleware.call(
            'datastore.query', self._config.datastore,
            path_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        if path_result:
            verrors.add(f'{schema_name}.path',
                        'A share with this path already exists.')

        return name

    @private
    async def extend(self, data):
        data['allow'] = data['allow'].split()
        data['deny'] = data['deny'].split()
        data['ro'] = data['ro'].split()
        data['rw'] = data['rw'].split()
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()

        return data

    @private
    async def compress(self, data):
        data['allow'] = ' '.join(data['allow'])
        data['deny'] = ' '.join(data['deny'])
        data['ro'] = ' '.join(data['ro'])
        data['rw'] = ' '.join(data['rw'])
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        if not data['vuid'] and data['timemachine']:
            data['vuid'] = str(uuid.uuid4())
        return data
Beispiel #17
0
class RsyncModService(CRUDService):
    class Config:
        datastore = 'services.rsyncmod'
        datastore_prefix = 'rsyncmod_'
        datastore_extend = 'rsyncmod.rsync_mod_extend'

    @private
    async def rsync_mod_extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        data['mode'] = data['mode'].upper()
        return data

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        await check_path_resides_within_volume(verrors, self.middleware,
                                               f'{schema_name}.path',
                                               data.get('path'))

        for entity in ('user', 'group'):
            value = data.get(entity)
            if value not in map(
                    lambda e: e[entity if entity == 'group' else 'username'],
                    await self.middleware.call(f'{entity}.query')):
                verrors.add(f'{schema_name}.{entity}',
                            f'Please specify a valid {entity}')

        verrors.check()

        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data['mode'] = data['mode'].lower()

        return data

    @accepts(
        Dict(
            'rsyncmod_create',
            Str('name', validators=[Match(r'[^/\]]')]),
            Str('comment'),
            Str('path', required=True),
            Str('mode', enum=['RO', 'RW', 'WO']),
            Int('maxconn'),
            Str('user', default='nobody'),
            Str('group', default='nobody'),
            List('hostsallow', items=[Str('hostsallow')], default=[]),
            List('hostsdeny', items=[Str('hostdeny')], default=[]),
            Str('auxiliary', max_length=None),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsyncmod module.

        `path` represents the path to pool/dataset.

        `maxconn` is an integer value representing the maximum number of simultaneous connections. Zero represents
        unlimited.

        `hostsallow` is a list of patterns to match hostname/ip address of a connecting client. If list is empty,
        all hosts are allowed.

        `hostsdeny` is a list of patterns to match hostname/ip address of a connecting client. If the pattern is
        matched, access is denied to the client. If no client should be denied, this should be left empty.

        `auxiliary` attribute can be used to pass on any additional parameters from rsyncd.conf(5).
        """

        data = await self.common_validation(data, 'rsyncmod_create')

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self._get_instance(data['id'])

    @accepts(Int('id'),
             Patch('rsyncmod_create', 'rsyncmod_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsyncmod module of `id`.
        """
        module = await self._get_instance(id)
        module.update(data)

        module = await self.common_validation(module, 'rsyncmod_update')

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, module,
                                   {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Rsyncmod module of `id`.
        """
        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)
Beispiel #18
0
class AFPService(SystemServiceService):

    class Config:
        service = 'afp'
        datastore_extend = 'afp.extend'
        datastore_prefix = 'afp_srv_'

    @private
    async def extend(self, afp):
        for i in ('map_acls', 'chmod_request'):
            afp[i] = afp[i].upper()
        return afp

    @private
    async def compress(self, afp):
        for i in ('map_acls', 'chmod_request'):
            value = afp.get(i)
            if value:
                afp[i] = value.lower()
        return afp

    @accepts(Dict(
        'afp_update',
        Bool('guest'),
        Str('guest_user'),
        List('bindip', items=[Str('ip', validators=[IpAddress()])]),
        Int('connections_limit', validators=[Range(min=1, max=65535)]),
        Dir('dbpath'),
        Str('global_aux', max_length=None),
        Str('map_acls', enum=['RIGHTS', 'MODE', 'NONE']),
        Str('chmod_request', enum=['PRESERVE', 'SIMPLE', 'IGNORE']),
        Str('loglevel', enum=[x.name for x in AFPLogLevel]),
        update=True
    ))
    async def do_update(self, data):
        """
        Update AFP service settings.

        `bindip` is a list of IPs to bind AFP to. Leave blank (empty list) to bind to all
        available IPs.

        `map_acls` defines how to map the effective permissions of authenticated users.
        RIGHTS - Unix-style permissions
        MODE - ACLs
        NONE - Do not map

        `chmod_request` defines advanced permission control that deals with ACLs.
        PRESERVE - Preserve ZFS ACEs for named users and groups or POSIX ACL group mask
        SIMPLE - Change permission as requested without any extra steps
        IGNORE - Permission change requests are ignored
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new['dbpath']:
            await check_path_resides_within_volume(
                verrors, self.middleware, 'afp_update.dbpath', new['dbpath'],
            )

        verrors.check()

        new = await self.compress(new)
        await self._update_service(old, new)

        return await self.config()

    @accepts()
    async def bindip_choices(self):
        """
        List of valid choices for IP addresses to which to bind the AFP service.
        """
        return {
            d['address']: d['address'] for d in await self.middleware.call('interface.ip_in_use')
        }
Beispiel #19
0
class CronJobService(CRUDService):
    class Config:
        datastore = 'tasks.cronjob'
        datastore_prefix = 'cron_'
        datastore_extend = 'cronjob.cron_extend'
        namespace = 'cronjob'

    @private
    def cron_extend(self, data):
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        user = data.get('user')
        if user:
            # Windows users can have spaces in their usernames
            # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808
            if ' ' in user:
                verrors.add(f'{schema}.user', 'Usernames cannot have spaces')

            elif not (await self.middleware.call('notifier.get_user_object',
                                                 user)):
                verrors.add(f'{schema}.user', 'Specified user does not exist')

        return verrors, data

    @accepts(
        Dict('cron_job_create',
             Bool('enabled'),
             Bool('stderr'),
             Bool('stdout'),
             Cron('schedule'),
             Str('command', required=True),
             Str('description'),
             Str('user', required=True),
             register=True))
    async def do_create(self, data):
        """
        Create a new cron job.

        `stderr` and `stdout` are boolean values which if `true`, represent that we would like to suppress
        standard error / standard output respectively.

        .. examples(websocket)::

          Create a cron job which executes `touch /tmp/testfile` after every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "cronjob.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "command": "touch /tmp/testfile",
                    "description": "Test command",
                    "user": "******",
                    "stderr": true,
                    "stdout": true
                }]
            }
        """
        verrors, data = await self.validate_data(data, 'cron_job_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('service.restart', 'cron')

        return data

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('cron_job_create', 'cron_job_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update cronjob of `id`.
        """
        task_data = await self.query(filters=[('id', '=', id)],
                                     options={'get': True})
        original_data = task_data.copy()
        task_data.update(data)
        verrors, task_data = await self.validate_data(task_data,
                                                      'cron_job_update')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(task_data)
        Cron.convert_schedule_to_db_format(original_data)

        if len(set(task_data.items()) ^ set(original_data.items())) > 0:

            await self.middleware.call(
                'datastore.update', self._config.datastore, id, task_data,
                {'prefix': self._config.datastore_prefix})

            await self.middleware.call('service.restart', 'cron')

        return await self.query(filters=[('id', '=', id)],
                                options={'get': True})

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete cronjob of `id`.
        """
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self.middleware.call('service.restart', 'cron')

        return response
Beispiel #20
0
class SSHService(SystemServiceService):
    class Config:
        service = "ssh"
        service_model = "ssh"
        datastore_prefix = "ssh_"

    @accepts()
    def bindiface_choices(self):
        """
        Available choices for the bindiface attribute of SSH service.
        """
        return self.middleware.call_sync('interface.choices')

    @accepts(
        Dict('ssh_update',
             List('bindiface', items=[Str('iface')]),
             Int('tcpport', validators=[Range(min=1, max=65535)]),
             Bool('rootlogin'),
             Bool('passwordauth'),
             Bool('kerberosauth'),
             Bool('tcpfwd'),
             Bool('compression'),
             Str('sftp_log_level',
                 enum=[
                     "", "QUIET", "FATAL", "ERROR", "INFO", "VERBOSE", "DEBUG",
                     "DEBUG2", "DEBUG3"
                 ]),
             Str('sftp_log_facility',
                 enum=[
                     "", "DAEMON", "USER", "AUTH", "LOCAL0", "LOCAL1",
                     "LOCAL2", "LOCAL3", "LOCAL4", "LOCAL5", "LOCAL6", "LOCAL7"
                 ]),
             List('weak_ciphers',
                  items=[Str('cipher', enum=['AES128-CBC', 'NONE'])]),
             Str('options', max_length=None),
             update=True))
    async def do_update(self, data):
        """
        Update settings of SSH daemon service.

        If `bindiface` is empty it will listen for all available addresses.

        .. examples(websocket)::

          Make sshd listen only to igb0 interface.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "ssh.update",
                "params": [{
                    "bindiface": ["igb0"]
                }]
            }

        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        if new['bindiface']:
            verrors = ValidationErrors()
            iface_choices = await self.middleware.call('ssh.bindiface_choices')
            invalid_ifaces = list(
                filter(lambda x: x not in iface_choices, new['bindiface']))
            if invalid_ifaces:
                verrors.add(
                    'ssh_update.bindiface',
                    f'The following interfaces are not valid: {", ".join(invalid_ifaces)}',
                )
            verrors.check()

        await self._update_service(old, new)

        keyfile = "/usr/local/etc/ssh/ssh_host_ecdsa_key.pub"
        if os.path.exists(keyfile):
            with open(keyfile, "rb") as f:
                pubkey = f.read().strip().split(None, 3)[1]
            decoded_key = base64.b64decode(pubkey)
            key_digest = hashlib.sha256(decoded_key).digest()
            ssh_fingerprint = (b"SHA256:" +
                               base64.b64encode(key_digest).replace(
                                   b"=", b"")).decode("utf-8")

            syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_USER)
            syslog.syslog(
                syslog.LOG_ERR,
                'ECDSA Fingerprint of the SSH KEY: ' + ssh_fingerprint)
            syslog.closelog()

        return new

    @private
    def save_keys(self):
        update = {}
        for i in [
                "ssh_host_key",
                "ssh_host_key.pub",
                "ssh_host_dsa_key",
                "ssh_host_dsa_key.pub",
                "ssh_host_dsa_key-cert.pub",
                "ssh_host_ecdsa_key",
                "ssh_host_ecdsa_key.pub",
                "ssh_host_ecdsa_key-cert.pub",
                "ssh_host_rsa_key",
                "ssh_host_rsa_key.pub",
                "ssh_host_rsa_key-cert.pub",
                "ssh_host_ed25519_key",
                "ssh_host_ed25519_key.pub",
                "ssh_host_ed25519_key-cert.pub",
        ]:
            path = os.path.join("/usr/local/etc/ssh", i)
            if os.path.exists(path):
                with open(path, "rb") as f:
                    data = base64.b64encode(f.read()).decode("ascii")

                column = i.replace(
                    ".",
                    "_",
                ).replace("-", "_")

                update[column] = data

        old = self.middleware.call_sync('ssh.config')
        self.middleware.call_sync('datastore.update', 'services.ssh',
                                  old['id'], update)
Beispiel #21
0
class RsyncTaskService(TaskPathService):

    share_task_type = 'Rsync'

    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'
        datastore_extend_context = 'rsynctask.rsync_task_extend_context'
        cli_namespace = 'task.rsync'

    ENTRY = Patch(
        'rsync_task_create',
        'rsync_task_entry',
        ('rm', {
            'name': 'validate_rpath'
        }),
        ('add', Int('id')),
        ('add', Bool('locked')),
        ('add', Dict('job', null=True, additional_attrs=True)),
    )

    @private
    async def rsync_task_extend(self, data, context):
        try:
            data['extra'] = shlex.split(data['extra'].replace('"',
                                                              r'"\"').replace(
                                                                  "'", r'"\"'))
        except ValueError:
            # This is to handle the case where the extra value is misconfigured for old cases
            # Moving on, we are going to verify that it can be split successfully using shlex
            data['extra'] = data['extra'].split()

        for field in ('mode', 'direction'):
            data[field] = data[field].upper()
        Cron.convert_db_format_to_schedule(data)
        data['job'] = context['jobs'].get(data['id'])
        return data

    @private
    async def rsync_task_extend_context(self, rows, extra):
        jobs = {}
        for j in await self.middleware.call("core.get_jobs",
                                            [("method", "=", "rsynctask.run")],
                                            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        return {
            "jobs": jobs,
        }

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = None
        with contextlib.suppress(KeyError):
            user = await self.middleware.call('dscache.get_uncached_user',
                                              username)

        if not user:
            verrors.add(f'{schema}.user',
                        f'Provided user "{username}" does not exist')
            raise verrors

        await self.validate_path_field(data, schema, verrors)

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        data['extra'] = ' '.join(data['extra'])
        try:
            shlex.split(data['extra'].replace('"',
                                              r'"\"').replace("'", r'"\"'))
        except ValueError as e:
            verrors.add(f'{schema}.extra', f'Please specify valid value: {e}')

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'MODULE' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'SSH':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user['pw_dir'], '.ssh',
                                               'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(
                glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.')
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}')

            if (data['enabled'] and data['validate_rpath'] and remote_path
                    and remote_host and remote_port):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.rsplit('@', 1)
                else:
                    remote_username = username

                try:
                    async with await asyncio.wait_for(
                            asyncssh.connect(remote_host,
                                             port=remote_port,
                                             username=remote_username,
                                             client_keys=key_files,
                                             known_hosts=None),
                            timeout=5,
                    ) as conn:
                        await conn.run(f'test -d {shlex.quote(remote_path)}',
                                       check=True)
                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(f'{schema}.remotehost', e.__str__())

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field')
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}')

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__(
                        )
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data['enabled'] and data['validate_rpath']:
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        # Keeping compatibility with legacy UI
        for field in ('mode', 'direction'):
            data[field] = data[field].lower()

        return verrors, data

    @accepts(
        Dict(
            'rsync_task_create',
            Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
            Str('user', required=True),
            Str('remotehost'),
            Int('remoteport'),
            Str('mode', enum=['MODULE', 'SSH'], default='MODULE'),
            Str('remotemodule'),
            Str('remotepath'),
            Bool('validate_rpath', default=True),
            Str('direction', enum=['PULL', 'PUSH'], default='PUSH'),
            Str('desc'),
            Cron(
                'schedule',
                defaults={'minute': '00'},
            ),
            Bool('recursive'),
            Bool('times'),
            Bool('compress'),
            Bool('archive'),
            Bool('delete'),
            Bool('quiet'),
            Bool('preserveperm'),
            Bool('preserveattr'),
            Bool('delayupdates'),
            List('extra', items=[Str('extra')]),
            Bool('enabled', default=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsync Task.

        See the comment in Rsyncmod about `path` length limits.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data,
                                                       'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(data['id'])

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('rsync_task_create', 'rsync_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsync Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        old.pop('job')

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new,
                                                       'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)
        new.pop(self.locked_field)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete Rsync Task of `id`.
        """
        res = await self.middleware.call('datastore.delete',
                                         self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res

    @private
    async def commandline(self, id):
        """
        Helper method to generate the rsync command avoiding code duplication.
        """
        rsync = await self._get_instance(id)
        path = shlex.quote(rsync['path'])

        line = ['rsync']
        for name, flag in (
            ('archive', '-a'),
            ('compress', '-zz'),
            ('delayupdates', '--delay-updates'),
            ('delete', '--delete-delay'),
            ('preserveattr', '-X'),
            ('preserveperm', '-p'),
            ('recursive', '-r'),
            ('times', '-t'),
        ):
            if rsync[name]:
                line.append(flag)
        if rsync['extra']:
            line.append(' '.join(rsync['extra']))

        # Do not use username if one is specified in host field
        # See #5096 for more details
        if '@' in rsync['remotehost']:
            remote = rsync['remotehost']
        else:
            remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'

        if rsync['mode'] == 'MODULE':
            module_args = [path, f'{remote}::"{rsync["remotemodule"]}"']
            if rsync['direction'] != 'PUSH':
                module_args.reverse()
            line += module_args
        else:
            line += [
                '-e',
                f'"ssh -p {rsync["remoteport"]} -o BatchMode=yes -o StrictHostKeyChecking=yes"'
            ]
            path_args = [
                path, f'{remote}:"{shlex.quote(rsync["remotepath"])}"'
            ]
            if rsync['direction'] != 'PUSH':
                path_args.reverse()
            line += path_args

        if rsync['quiet']:
            line += ['>', '/dev/null', '2>&1']

        return ' '.join(line)

    @item_method
    @accepts(Int('id'))
    @returns()
    @job(lock=lambda args: args[-1], lock_queue_size=1, logs=True)
    def run(self, job, id):
        """
        Job to run rsync task of `id`.

        Output is saved to job log excerpt (not syslog).
        """
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'rsync')

        rsync = self.middleware.call_sync('rsynctask.get_instance', id)
        if rsync['locked']:
            self.middleware.call_sync('rsynctask.generate_locked_alert', id)
            return

        commandline = self.middleware.call_sync('rsynctask.commandline', id)

        cp = run_command_with_user_context(commandline, rsync['user'],
                                           lambda v: job.logs_fd.write(v))

        for klass in ('RsyncSuccess',
                      'RsyncFailed') if not rsync['quiet'] else ():
            self.middleware.call_sync('alert.oneshot_delete', klass,
                                      rsync['id'])

        if cp.returncode not in RsyncReturnCode.nonfatals():
            err = None
            if cp.returncode == RsyncReturnCode.STREAMIO and rsync['compress']:
                err = (
                    "rsync command with compression enabled failed with STREAMIO error. "
                    "This may indicate that remote server lacks support for the new-style "
                    "compression used by TrueNAS.")

            if not rsync['quiet']:
                self.middleware.call_sync(
                    'alert.oneshot_create', 'RsyncFailed', {
                        'id': rsync['id'],
                        'direction': rsync['direction'],
                        'path': rsync['path'],
                    })

            if err:
                msg = f'{err} Check logs for further information'
            else:
                try:
                    rc_name = RsyncReturnCode(cp.returncode).name
                except ValueError:
                    rc_name = 'UNKNOWN'

                msg = (f'rsync command returned {cp.returncode} - {rc_name}. '
                       'Check logs for further information.')
            raise CallError(msg)

        elif not rsync['quiet']:
            self.middleware.call_sync(
                'alert.oneshot_create', 'RsyncSuccess', {
                    'id': rsync['id'],
                    'direction': rsync['direction'],
                    'path': rsync['path'],
                })
Beispiel #22
0
class PoolService(CRUDService):

    GELI_KEYPATH = '/data/geli'

    @filterable
    async def query(self, filters=None, options=None):
        filters = filters or []
        options = options or {}
        options['extend'] = 'pool.pool_extend'
        options['prefix'] = 'vol_'
        return await self.middleware.call('datastore.query', 'storage.volume',
                                          filters, options)

    @private
    async def pool_extend(self, pool):
        pool.pop('fstype', None)
        """
        If pool is encrypted we need to check if the pool is imported
        or if all geli providers exist.
        """
        try:
            zpool = (await
                     self.middleware.call('zfs.pool.query',
                                          [('id', '=', pool['name'])]))[0]
        except Exception:
            zpool = None

        if zpool:
            pool['status'] = zpool['status']
            pool['scan'] = zpool['scan']
        else:
            pool.update({
                'status': 'OFFLINE',
                'scan': None,
            })

        if pool['encrypt'] > 0:
            if zpool:
                pool['is_decrypted'] = True
            else:
                decrypted = True
                for ed in await self.middleware.call(
                        'datastore.query', 'storage.encrypteddisk',
                    [('encrypted_volume', '=', pool['id'])]):
                    if not os.path.exists(
                            f'/dev/{ed["encrypted_provider"]}.eli'):
                        decrypted = False
                        break
                pool['is_decrypted'] = decrypted
        else:
            pool['is_decrypted'] = True
        return pool

    @item_method
    @accepts(Int('id', required=False))
    async def get_disks(self, oid=None):
        """
        Get all disks in use by pools.
        If `id` is provided only the disks from the given pool `id` will be returned.
        """
        filters = []
        if oid:
            filters.append(('id', '=', oid))
        for pool in await self.query(filters):
            if pool['is_decrypted']:
                async for i in await self.middleware.call(
                        'zfs.pool.get_disks', pool['name']):
                    yield i
            else:
                for encrypted_disk in await self.middleware.call(
                        'datastore.query', 'storage.encrypteddisk',
                    [('encrypted_volume', '=', pool['id'])]):
                    disk = {
                        k[len("disk_"):]: v
                        for k, v in encrypted_disk["encrypted_disk"].items()
                    }
                    name = await self.middleware.call("disk.get_name", disk)
                    if os.path.exists(os.path.join("/dev", name)):
                        yield name

    @item_method
    @accepts(Int('id'))
    async def download_encryption_key(self, oid):
        """
        Download encryption key for a given pool `id`.
        """
        pool = await self.query([('id', '=', oid)], {'get': True})
        if not pool['encryptkey']:
            return None

        job_id, url = await self.middleware.call(
            'core.download', 'filesystem.get',
            [os.path.join(self.GELI_KEYPATH, f"{pool['encryptkey']}.key")],
            'geli.key')
        return url

    @private
    def configure_resilver_priority(self):
        """
        Configure resilver priority based on user selected off-peak hours.
        """
        resilver = self.middleware.call_sync('datastore.config',
                                             'storage.resilver')

        if not resilver['enabled'] or not resilver['weekday']:
            return

        higher_prio = False
        weekdays = map(lambda x: int(x), resilver['weekday'].split(','))
        now = datetime.now()
        now_t = now.time()
        # end overlaps the day
        if resilver['begin'] > resilver['end']:
            if now.isoweekday() in weekdays and now_t >= resilver['begin']:
                higher_prio = True
            else:
                lastweekday = now.isoweekday() - 1
                if lastweekday == 0:
                    lastweekday = 7
                if lastweekday in weekdays and now_t < resilver['end']:
                    higher_prio = True
        # end does not overlap the day
        else:
            if now.isoweekday() in weekdays and now_t >= resilver[
                    'begin'] and now_t < resilver['end']:
                higher_prio = True

        if higher_prio:
            resilver_delay = 0
            resilver_min_time_ms = 9000
            scan_idle = 0
        else:
            resilver_delay = 2
            resilver_min_time_ms = 3000
            scan_idle = 50

        sysctl.filter('vfs.zfs.resilver_delay')[0].value = resilver_delay
        sysctl.filter(
            'vfs.zfs.resilver_min_time_ms')[0].value = resilver_min_time_ms
        sysctl.filter('vfs.zfs.scan_idle')[0].value = scan_idle

    @accepts()
    async def import_find(self):
        """
        Get a list of pools available for import with the following details:
        name, guid, status, hostname.
        """

        existing_guids = [
            i['guid'] for i in await self.middleware.call('pool.query')
        ]

        for pool in await self.middleware.call('zfs.pool.find_import'):
            if pool['status'] == 'UNAVAIL':
                continue
            # Exclude pools with same guid as existing pools (in database)
            # It could be the pool is in the database but was exported/detached for some reason
            # See #6808
            if pool['guid'] in existing_guids:
                continue
            entry = {}
            for i in ('name', 'guid', 'status', 'hostname'):
                entry[i] = pool[i]
            yield entry

    @accepts(
        Dict(
            'pool_import',
            Str('guid', required=True),
            Str('name'),
            Str('passphrase', private=True),
            List('devices', items=[Str('device')]),
        ))
    @job(lock='import_pool', pipes=['input'], check_pipes=False)
    async def import_pool(self, job, data):
        """
        Import a pool.

        Errors:
            ENOENT - Pool not found
        """

        pool = None
        for p in await self.middleware.call('zfs.pool.find_import'):
            if p['guid'] == data['guid']:
                pool = p
                break
        if pool is None:
            raise CallError(f'Pool with guid "{data["guid"]}" not found',
                            errno.ENOENT)

        if data['devices']:
            job.check_pipe("input")
            args = [job.pipes.input.r, data['passphrase'], data['devices']]
        else:
            args = []

        await self.middleware.call('notifier.volume_import',
                                   data.get('name') or pool['name'],
                                   data['guid'], *args)
        return True

    @accepts(Str('volume'), Str('fs_type'),
             Dict('fs_options', additional_attrs=True), Str('dst_path'))
    @job(lock=lambda args: 'volume_import', logs=True)
    async def import_disk(self, job, volume, fs_type, fs_options, dst_path):
        job.set_progress(None, description="Mounting")

        src = os.path.join('/var/run/importcopy/tmpdir',
                           os.path.relpath(volume, '/'))

        if os.path.exists(src):
            os.rmdir(src)

        try:
            os.makedirs(src)

            async with KernelModuleContextManager({
                    "msdosfs": "msdosfs_iconv",
                    "ntfs": "fuse"
            }.get(fs_type)):
                async with MountFsContextManager(self.middleware, volume, src,
                                                 fs_type, fs_options, ["ro"]):
                    job.set_progress(None, description="Importing")

                    line = [
                        '/usr/local/bin/rsync', '--info=progress2',
                        '--modify-window=1', '-rltvh', '--no-perms', src + '/',
                        dst_path
                    ]
                    rsync_proc = await Popen(
                        line,
                        stdout=subprocess.PIPE,
                        stderr=subprocess.PIPE,
                        bufsize=0,
                        preexec_fn=os.setsid,
                    )
                    try:
                        progress_buffer = JobProgressBuffer(job)
                        while True:
                            line = await rsync_proc.stdout.readline()
                            job.logs_fd.write(line)
                            if line:
                                try:
                                    line = line.decode("utf-8",
                                                       "ignore").strip()
                                    bits = re.split("\s+", line)
                                    if len(bits) == 6 and bits[1].endswith(
                                            "%") and bits[1][:-1].isdigit():
                                        progress_buffer.set_progress(
                                            int(bits[1][:-1]))
                                    elif not line.endswith('/'):
                                        if (line not in [
                                                'sending incremental file list'
                                        ] and 'xfr#' not in line):
                                            progress_buffer.set_progress(
                                                None, extra=line)
                                except Exception:
                                    logger.warning(
                                        'Parsing error in rsync task',
                                        exc_info=True)
                            else:
                                break

                        progress_buffer.flush()
                        await rsync_proc.wait()
                        if rsync_proc.returncode != 0:
                            raise Exception("rsync failed with exit code %r" %
                                            rsync_proc.returncode)
                    except asyncio.CancelledError:
                        rsync_proc.kill()
                        raise

                    job.set_progress(100, description="Done", extra="")
        finally:
            os.rmdir(src)

    @accepts()
    def import_disk_msdosfs_locales(self):
        return [
            locale.strip()
            for locale in subprocess.check_output(["locale", "-a"],
                                                  encoding="utf-8").split("\n")
            if locale.strip()
        ]

    """
    These methods are hacks for old UI which supports only one volume import at a time
    """

    dismissed_import_disk_jobs = set()

    @private
    async def get_current_import_disk_job(self):
        import_jobs = await self.middleware.call(
            'core.get_jobs', [('method', '=', 'pool.import_disk')])
        not_dismissed_import_jobs = [
            job for job in import_jobs
            if job["id"] not in self.dismissed_import_disk_jobs
        ]
        if not_dismissed_import_jobs:
            return not_dismissed_import_jobs[0]

    @private
    async def dismiss_current_import_disk_job(self):
        current_import_job = await self.get_current_import_disk_job()
        if current_import_job:
            self.dismissed_import_disk_jobs.add(current_import_job["id"])
Beispiel #23
0
class ZFSSnapshot(CRUDService):
    class Config:
        namespace = 'zfs.snapshot'
        process_pool = True

    @filterable
    def query(self, filters=None, options=None):
        """
        Query all ZFS Snapshots with `query-filters` and `query-options`.
        """
        # Special case for faster listing of snapshot names (#53149)
        if (options and options.get('select') == ['name']
                and (not filters
                     or filter_getattrs(filters).issubset({'name', 'pool'}))):
            # Using zfs list -o name is dozens of times faster than py-libzfs
            cmd = ['zfs', 'list', '-H', '-o', 'name', '-t', 'snapshot']
            order_by = options.get('order_by')
            # -s name makes it even faster
            if not order_by or order_by == ['name']:
                cmd += ['-s', 'name']
            cp = subprocess.run(
                cmd,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                universal_newlines=True,
            )
            if cp.returncode != 0:
                raise CallError(f'Failed to retrieve snapshots: {cp.stderr}')
            stdout = cp.stdout.strip()
            if not stdout:
                return []
            snaps = [{
                'name': i,
                'pool': i.split('/', 1)[0]
            } for i in stdout.split('\n')]
            if filters:
                return filter_list(snaps, filters, options)
            return snaps
        with libzfs.ZFS() as zfs:
            # Handle `id` filter to avoid getting all snapshots first
            snapshots = []
            if filters and len(filters) == 1 and list(
                    filters[0][:2]) == ['id', '=']:
                try:
                    snapshots.append(
                        zfs.get_snapshot(filters[0][2]).__getstate__())
                except libzfs.ZFSException as e:
                    if e.code != libzfs.Error.NOENT:
                        raise
            else:
                for i in zfs.snapshots:
                    try:
                        snapshots.append(i.__getstate__())
                    except libzfs.ZFSException as e:
                        # snapshot may have been deleted while this is running
                        if e.code != libzfs.Error.NOENT:
                            raise
        # FIXME: awful performance with hundreds/thousands of snapshots
        return filter_list(snapshots, filters, options)

    @accepts(
        Dict(
            'snapshot_create',
            Str('dataset', required=True, empty=False),
            Str('name', empty=False),
            Str('naming_schema',
                empty=False,
                validators=[ReplicationSnapshotNamingSchema()]),
            Bool('recursive', default=False),
            Bool('vmware_sync', default=False),
            Dict('properties', additional_attrs=True),
        ))
    def do_create(self, data):
        """
        Take a snapshot from a given dataset.
        """

        dataset = data['dataset']
        recursive = data['recursive']
        properties = data['properties']

        verrors = ValidationErrors()

        if 'name' in data and 'naming_schema' in data:
            verrors.add(
                'snapshot_create.naming_schema',
                'You can\'t specify name and naming schema at the same time')
        elif 'name' in data:
            name = data['name']
        elif 'naming_schema' in data:
            # We can't do `strftime` here because we are in the process pool and `TZ` environment variable update
            # is not propagated here.
            name = self.middleware.call_sync('replication.new_snapshot_name',
                                             data['naming_schema'])
        else:
            verrors.add('snapshot_create.naming_schema',
                        'You must specify either name or naming schema')

        if verrors:
            raise verrors

        vmware_context = None
        if data['vmware_sync']:
            vmware_context = self.middleware.call_sync('vmware.snapshot_begin',
                                                       dataset, recursive)

        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(dataset)
                ds.snapshot(f'{dataset}@{name}',
                            recursive=recursive,
                            fsopts=properties)

                if vmware_context and vmware_context['vmsynced']:
                    ds.properties['freenas:vmsynced'] = libzfs.ZFSUserProperty(
                        'Y')

            self.logger.info(f"Snapshot taken: {dataset}@{name}")
        except libzfs.ZFSException as err:
            self.logger.error(f'Failed to snapshot {dataset}@{name}: {err}')
            raise CallError(f'Failed to snapshot {dataset}@{name}: {err}')
        else:
            return self.middleware.call_sync('zfs.snapshot.get_instance',
                                             f'{dataset}@{name}')
        finally:
            if vmware_context:
                self.middleware.call_sync('vmware.snapshot_end',
                                          vmware_context)

    @accepts(
        Dict('snapshot_remove', Str('dataset', required=True),
             Str('name', required=True), Bool('defer_delete')))
    def remove(self, data):
        """
        Remove a snapshot from a given dataset.

        Returns:
            bool: True if succeed otherwise False.
        """
        self.logger.debug(
            'zfs.snapshot.remove is deprecated, use zfs.snapshot.delete')
        snapshot_name = data['dataset'] + '@' + data['name']
        try:
            self.do_delete(snapshot_name,
                           {'defer': data.get('defer_delete') or False})
        except Exception:
            return False
        return True

    @accepts(
        Str('id'),
        Dict('options', Bool('defer', default=False)),
    )
    def do_delete(self, id, options):
        """
        Delete snapshot of name `id`.

        `options.defer` will defer the deletion of snapshot.
        """
        try:
            with libzfs.ZFS() as zfs:
                snap = zfs.get_snapshot(id)
                snap.delete(defer=options['defer'])
        except libzfs.ZFSException as e:
            raise CallError(str(e))
        else:
            return True

    @accepts(Dict(
        'snapshot_clone',
        Str('snapshot'),
        Str('dataset_dst'),
    ))
    def clone(self, data):
        """
        Clone a given snapshot to a new dataset.

        Returns:
            bool: True if succeed otherwise False.
        """

        snapshot = data.get('snapshot', '')
        dataset_dst = data.get('dataset_dst', '')

        if not snapshot or not dataset_dst:
            return False

        try:
            with libzfs.ZFS() as zfs:
                snp = zfs.get_snapshot(snapshot)
                snp.clone(dataset_dst)
                dataset = zfs.get_dataset(dataset_dst)
                if dataset.type.name == 'FILESYSTEM':
                    dataset.mount_recursive()
            self.logger.info("Cloned snapshot {0} to dataset {1}".format(
                snapshot, dataset_dst))
            return True
        except libzfs.ZFSException as err:
            self.logger.error("{0}".format(err))
            return False

    @accepts(
        Str('id'),
        Dict(
            'options',
            Bool('recursive', default=False),
            Bool('recursive_clones', default=False),
            Bool('force', default=False),
        ),
    )
    def rollback(self, id, options):
        """
        Rollback to a given snapshot `id`.

        `options.recursive` will destroy any snapshots and bookmarks more recent than the one
        specified.

        `options.recursive_clones` is just like `recursive` but will also destroy any clones.

        `options.force` will force unmount of any clones.
        """
        args = []
        if options['force']:
            args += ['-f']
        if options['recursive']:
            args += ['-r']
        if options['recursive_clones']:
            args += ['-R']

        try:
            subprocess.run(
                ['zfs', 'rollback'] + args + [id],
                text=True,
                capture_output=True,
                check=True,
            )
        except subprocess.CalledProcessError as e:
            raise CallError(f'Failed to rollback snapshot: {e.stderr.strip()}')
Beispiel #24
0
class PoolDatasetService(CRUDService):
    class Config:
        namespace = 'pool.dataset'

    @filterable
    def query(self, filters, options):
        # Otimization for cases in which they can be filtered at zfs.dataset.query
        zfsfilters = []
        for f in filters:
            if len(f) == 3:
                if f[0] in ('id', 'name', 'pool', 'type'):
                    zfsfilters.append(f)
        datasets = self.middleware.call_sync('zfs.dataset.query', zfsfilters,
                                             None)
        return filter_list(self.__transform(datasets), filters, options)

    def __transform(self, datasets):
        """
        We need to transform the data zfs gives us to make it consistent/user-friendly,
        making it match whatever pool.dataset.{create,update} uses as input.
        """
        def transform(dataset):
            for orig_name, new_name, method in (
                ('org.freenas:description', 'comments', None),
                ('dedup', 'deduplication', str.upper),
                ('atime', None, str.upper),
                ('casesensitivity', None, str.upper),
                ('exec', None, str.upper),
                ('sync', None, str.upper),
                ('compression', None, str.upper),
                ('origin', None, None),
                ('quota', None, _null),
                ('refquota', None, _null),
                ('reservation', None, _null),
                ('refreservation', None, _null),
                ('copies', None, None),
                ('snapdir', None, str.upper),
                ('readonly', None, str.upper),
                ('recordsize', None, None),
                ('sparse', None, None),
                ('volsize', None, None),
                ('volblocksize', None, None),
            ):
                if orig_name not in dataset['properties']:
                    continue
                i = new_name or orig_name
                dataset[i] = dataset['properties'][orig_name]
                if method:
                    dataset[i]['value'] = method(dataset[i]['value'])
            del dataset['properties']

            rv = []
            for child in dataset['children']:
                rv.append(transform(child))
            dataset['children'] = rv
            return dataset

        rv = []
        for dataset in datasets:
            rv.append(transform(dataset))
        return rv

    @accepts(
        Dict(
            'pool_dataset_create',
            Str('name', required=True),
            Str('type', enum=['FILESYSTEM', 'VOLUME'], default='FILESYSTEM'),
            Int('volsize'),
            Str('volblocksize',
                enum=[
                    '512',
                    '1K',
                    '2K',
                    '4K',
                    '8K',
                    '16K',
                    '32K',
                    '64K',
                    '128K',
                ]),
            Bool('sparse'),
            Str('comments'),
            Str('sync', enum=[
                'STANDARD',
                'ALWAYS',
                'DISABLED',
            ]),
            Str('compression',
                enum=[
                    'OFF',
                    'LZ4',
                    'GZIP-1',
                    'GZIP-6',
                    'GZIP-9',
                    'ZLE',
                    'LZJB',
                ]),
            Str('atime', enum=['ON', 'OFF']),
            Str('exec', enum=['ON', 'OFF']),
            Int('quota'),
            Int('refquota'),
            Int('reservation'),
            Int('refreservation'),
            Int('copies'),
            Str('snapdir', enum=['VISIBLE', 'HIDDEN']),
            Str('deduplication', enum=['ON', 'VERIFY', 'OFF']),
            Str('readonly', enum=['ON', 'OFF']),
            Str('recordsize',
                enum=[
                    '512',
                    '1K',
                    '2K',
                    '4K',
                    '8K',
                    '16K',
                    '32K',
                    '64K',
                    '128K',
                    '256K',
                    '512K',
                    '1024K',
                ]),
            Str('casesensitivity', enum=['SENSITIVE', 'INSENSITIVE', 'MIXED']),
            register=True,
        ))
    async def do_create(self, data):
        """
        Creates a dataset/zvol.

        `volsize` is required for type=VOLUME and is supposed to be a multiple of the block size.
        """

        verrors = ValidationErrors()
        await self.__common_validation(verrors, 'pool_dataset_create', data,
                                       'CREATE')
        if verrors:
            raise verrors

        props = {}
        for i, real_name, transform in (
            ('atime', None, str.lower),
            ('casesensitivity', None, str.lower),
            ('comments', 'org.freenas:description', None),
            ('compression', None, str.lower),
            ('copies', None, lambda x: str(x)),
            ('deduplication', 'dedup', str.lower),
            ('exec', None, str.lower),
            ('quota', None, _none),
            ('readonly', None, str.lower),
            ('recordsize', None, None),
            ('refquota', None, _none),
            ('refreservation', None, _none),
            ('reservation', None, _none),
            ('snapdir', None, str.lower),
            ('sparse', None, None),
            ('sync', None, str.lower),
            ('volblocksize', None, None),
            ('volsize', None, lambda x: str(x)),
        ):
            if i not in data:
                continue
            name = real_name or i
            props[name] = data[i] if not transform else transform(data[i])

        await self.middleware.call('zfs.dataset.create', {
            'name': data['name'],
            'type': data['type'],
            'properties': props,
        })

        await self.middleware.call('zfs.dataset.mount', data['name'])

    def _add_inherit(name):
        def add(attr):
            attr.enum.append('INHERIT')

        return {'name': name, 'method': add}

    @accepts(
        Str('id'),
        Patch(
            'pool_dataset_create',
            'pool_dataset_update',
            ('rm', {
                'name': 'name'
            }),
            ('rm', {
                'name': 'type'
            }),
            ('rm', {
                'name': 'casesensitivity'
            }),  # Its a readonly attribute
            ('rm', {
                'name': 'sparse'
            }),  # Create time only attribute
            ('rm', {
                'name': 'volblocksize'
            }),  # Create time only attribute
            ('edit', _add_inherit('atime')),
            ('edit', _add_inherit('exec')),
            ('edit', _add_inherit('sync')),
            ('edit', _add_inherit('compression')),
            ('edit', _add_inherit('deduplication')),
            ('edit', _add_inherit('readonly')),
            ('edit', _add_inherit('recordsize')),
            ('edit', _add_inherit('snapdir')),
        ))
    async def do_update(self, id, data):
        """
        Updates a dataset/zvol `id`.
        """

        verrors = ValidationErrors()

        dataset = await self.middleware.call('pool.dataset.query',
                                             [('id', '=', id)])
        if not dataset:
            verrors.add('id', f'{id} does not exist', errno.ENOENT)
        else:
            data['type'] = dataset[0]['type']
            await self.__common_validation(verrors, 'pool_dataset_update',
                                           data, 'UPDATE')
        if verrors:
            raise verrors

        props = {}
        for i, real_name, transform, inheritable in (
            ('atime', None, str.lower, True),
            ('comments', 'org.freenas:description', None, False),
            ('sync', None, str.lower, True),
            ('compression', None, str.lower, True),
            ('deduplication', 'dedup', str.lower, True),
            ('exec', None, str.lower, True),
            ('quota', None, _none, False),
            ('refquota', None, _none, False),
            ('reservation', None, _none, False),
            ('refreservation', None, _none, False),
            ('copies', None, None, False),
            ('snapdir', None, str.lower, True),
            ('readonly', None, str.lower, True),
            ('recordsize', None, None, True),
            ('volsize', None, lambda x: str(x), False),
        ):
            if i not in data:
                continue
            name = real_name or i
            if inheritable and data[i] == 'INHERIT':
                props[name] = {'source': 'INHERIT'}
            else:
                props[name] = {
                    'value': data[i] if not transform else transform(data[i])
                }

        return await self.middleware.call('zfs.dataset.update', id,
                                          {'properties': props})

    async def __common_validation(self, verrors, schema, data, mode):
        assert mode in ('CREATE', 'UPDATE')

        if data['type'] == 'FILESYSTEM':
            for i in ('sparse', 'volsize', 'volblocksize'):
                if i in data:
                    verrors.add(f'{schema}.{i}',
                                'This field is not valid for FILESYSTEM')
        elif data['type'] == 'VOLUME':
            if mode == 'CREATE' and 'volsize' not in data:
                verrors.add(f'{schema}.volsize',
                            'This field is required for VOLUME')

            for i in (
                    'atime',
                    'casesensitivity',
                    'quota',
                    'refquota',
                    'recordsize',
            ):
                if i in data:
                    verrors.add(f'{schema}.{i}',
                                'This field is not valid for VOLUME')

    @accepts(Str('id'))
    async def do_delete(self, id):
        return await self.middleware.call('zfs.dataset.delete', id)

    @item_method
    @accepts(Str('id'))
    async def promote(self, id):
        """
        Promote the cloned dataset `id`
        """
        dataset = await self.middleware.call('zfs.dataset.query',
                                             [('id', '=', id)])
        if not dataset:
            raise CallError(f'Dataset "{id}" does not exist.', errno.ENOENT)
        if not dataset[0]['properties']['origin']['value']:
            raise CallError('Only cloned datasets can be promoted.',
                            errno.EBADMSG)
        return await self.middleware.call('zfs.dataset.promote', id)
Beispiel #25
0
class ZFSPoolService(CRUDService):
    class Config:
        namespace = 'zfs.pool'
        private = True
        process_pool = True

    @filterable
    def query(self, filters, options):
        # We should not get datasets, there is zfs.dataset.query for that
        state_kwargs = {'datasets_recursive': False}
        with libzfs.ZFS() as zfs:
            # Handle `id` filter specially to avoiding getting all pool
            if filters and len(filters) == 1 and list(
                    filters[0][:2]) == ['id', '=']:
                try:
                    pools = [
                        zfs.get(filters[0][2]).__getstate__(**state_kwargs)
                    ]
                except libzfs.ZFSException:
                    pools = []
            else:
                pools = [i.__getstate__(**state_kwargs) for i in zfs.pools]
        return filter_list(pools, filters, options)

    @accepts(
        Dict(
            'zfspool_create',
            Str('name', required=True),
            List('vdevs',
                 items=[
                     Dict(
                         'vdev',
                         Str('root',
                             enum=[
                                 'DATA', 'CACHE', 'LOG', 'SPARE', 'SPECIAL',
                                 'DEDUP'
                             ],
                             required=True),
                         Str('type',
                             enum=[
                                 'RAIDZ1', 'RAIDZ2', 'RAIDZ3', 'MIRROR',
                                 'STRIPE'
                             ],
                             required=True),
                         List('devices', items=[Str('disk')], required=True),
                     ),
                 ],
                 required=True),
            Dict('options', additional_attrs=True),
            Dict('fsoptions', additional_attrs=True),
        ), )
    def do_create(self, data):
        with libzfs.ZFS() as zfs:
            topology = convert_topology(zfs, data['vdevs'])
            zfs.create(data['name'], topology, data['options'],
                       data['fsoptions'])

        return self.middleware.call_sync('zfs.pool.get_instance', data['name'])

    @accepts(Str('pool'),
             Dict(
                 'options',
                 Dict('properties', additional_attrs=True),
             ))
    def do_update(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                for k, v in options['properties'].items():
                    prop = pool.properties[k]
                    if 'value' in v:
                        prop.value = v['value']
                    elif 'parsed' in v:
                        prop.parsed = v['parsed']
        except libzfs.ZFSException as e:
            raise CallError(str(e))

    @accepts(Str('pool'), Dict(
        'options',
        Bool('force', default=False),
    ))
    def do_delete(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                zfs.destroy(name, force=options['force'])
        except libzfs.ZFSException as e:
            errno_ = errno.EFAULT
            if e.code == libzfs.Error.UMOUNTFAILED:
                errno_ = errno.EBUSY
            raise CallError(str(e), errno_)

    @accepts(Str('pool', required=True))
    def upgrade(self, pool):
        try:
            with libzfs.ZFS() as zfs:
                zfs.get(pool).upgrade()
        except libzfs.ZFSException as e:
            raise CallError(str(e))

    @accepts(Str('pool'), Dict(
        'options',
        Bool('force', default=False),
    ))
    def export(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                # FIXME: force not yet implemented
                pool = zfs.get(name)
                zfs.export_pool(pool)
        except libzfs.ZFSException as e:
            raise CallError(str(e))

    @accepts(Str('pool'))
    def get_devices(self, name):
        try:
            with libzfs.ZFS() as zfs:
                return [i.replace('/dev/', '') for i in zfs.get(name).disks]
        except libzfs.ZFSException as e:
            raise CallError(str(e), errno.ENOENT)

    @accepts(
        Str('name'),
        List('new', default=None, null=True),
        List('existing',
             items=[
                 Dict(
                     'attachvdev',
                     Str('target'),
                     Str('type', enum=['DISK']),
                     Str('path'),
                 ),
             ],
             null=True,
             default=None),
    )
    @job()
    def extend(self, job, name, new=None, existing=None):
        """
        Extend a zfs pool `name` with `new` vdevs or attach to `existing` vdevs.
        """

        if new is None and existing is None:
            raise CallError('New or existing vdevs must be provided',
                            errno.EINVAL)

        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)

                if new:
                    topology = convert_topology(zfs, new)
                    pool.attach_vdevs(topology)

                # Make sure we can find all target vdev
                for i in (existing or []):
                    target = find_vdev(pool, i['target'])
                    if target is None:
                        raise CallError(
                            f"Failed to find vdev for {i['target']}",
                            errno.EINVAL)
                    i['target'] = target

                for i in (existing or []):
                    newvdev = libzfs.ZFSVdev(zfs, i['type'].lower())
                    newvdev.path = i['path']
                    i['target'].attach(newvdev)

        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    def __zfs_vdev_operation(self, name, label, op, *args):
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                target = find_vdev(pool, label)
                if target is None:
                    raise CallError(f'Failed to find vdev for {label}',
                                    errno.EINVAL)
                op(target, *args)
        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    @accepts(Str('pool'), Str('label'),
             Dict('options', Bool('clear_label', default=False)))
    def detach(self, name, label, options):
        """
        Detach device `label` from the pool `pool`.
        """
        self.detach_remove_impl('detach', name, label, options)

    def detach_remove_impl(self, op, name, label, options):
        def impl(target):
            getattr(target, op)()
            if options['clear_label']:
                self.clear_label(target.path)

        self.__zfs_vdev_operation(name, label, impl)

    @accepts(Str('device'))
    def clear_label(self, device):
        """
        Clear label from `device`.
        """
        try:
            libzfs.clear_label(device)
        except (libzfs.ZFSException, OSError) as e:
            raise CallError(str(e))

    @accepts(Str('pool'), Str('label'))
    def offline(self, name, label):
        """
        Offline device `label` from the pool `pool`.
        """
        self.__zfs_vdev_operation(name, label, lambda target: target.offline())

    @accepts(Str('pool'), Str('label'), Bool('expand', default=False))
    def online(self, name, label, expand=False):
        """
        Online device `label` from the pool `pool`.
        """
        self.__zfs_vdev_operation(name, label,
                                  lambda target, *args: target.online(*args),
                                  expand)

    @accepts(Str('pool'), Str('label'),
             Dict('options', Bool('clear_label', default=False)))
    def remove(self, name, label, options):
        """
        Remove device `label` from the pool `pool`.
        """
        self.detach_remove_impl('remove', name, label, options)

    @accepts(Str('pool'), Str('label'), Str('dev'))
    def replace(self, name, label, dev):
        """
        Replace device `label` with `dev` in pool `name`.
        """
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                target = find_vdev(pool, label)
                if target is None:
                    raise CallError(f'Failed to find vdev for {label}',
                                    errno.EINVAL)

                newvdev = libzfs.ZFSVdev(zfs, 'disk')
                newvdev.path = f'/dev/{dev}'
                # FIXME: Replace using old path is not working for some reason
                # Lets use guid for now.
                target.path = str(target.guid)
                target.replace(newvdev)
        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    @accepts(Str('name', required=True),
             Str('action', enum=['START', 'STOP', 'PAUSE'], default='START'))
    @job(lock=lambda i: f'{i[0]}-{i[1] if len(i) >= 2 else "START"}')
    def scrub(self, job, name, action=None):
        """
        Start/Stop/Pause a scrub on pool `name`.
        """
        if action != 'PAUSE':
            try:
                with libzfs.ZFS() as zfs:
                    pool = zfs.get(name)

                    if action == 'START':
                        pool.start_scrub()
                    else:
                        pool.stop_scrub()
            except libzfs.ZFSException as e:
                raise CallError(str(e), e.code)
        else:
            proc = subprocess.Popen(f'zpool scrub -p {name}'.split(' '),
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
            proc.communicate()

            if proc.returncode != 0:
                raise CallError('Unable to pause scrubbing')

        def watch():
            while True:
                with libzfs.ZFS() as zfs:
                    scrub = zfs.get(name).scrub.__getstate__()

                if scrub['pause']:
                    job.set_progress(100, 'Scrub paused')
                    break

                if scrub['function'] != 'SCRUB':
                    break

                if scrub['state'] == 'FINISHED':
                    job.set_progress(100, 'Scrub finished')
                    break

                if scrub['state'] == 'CANCELED':
                    break

                if scrub['state'] == 'SCANNING':
                    job.set_progress(scrub['percentage'], 'Scrubbing')
                time.sleep(1)

        if action == 'START':
            t = threading.Thread(target=watch, daemon=True)
            t.start()
            t.join()

    @accepts()
    def find_import(self):
        with libzfs.ZFS() as zfs:
            return [i.__getstate__() for i in zfs.find_import()]

    @accepts(
        Str('name_or_guid'),
        Dict('options', additional_attrs=True),
        Bool('any_host', default=True),
        Str('cachefile', null=True, default=None),
        Str('new_name', null=True, default=None),
    )
    def import_pool(self, name_or_guid, options, any_host, cachefile,
                    new_name):
        found = False
        with libzfs.ZFS() as zfs:
            for pool in zfs.find_import(cachefile=cachefile,
                                        search_paths=['/dev/disk/by-partuuid']
                                        if osc.IS_LINUX else None):
                if pool.name == name_or_guid or str(pool.guid) == name_or_guid:
                    found = pool
                    break

            if not found:
                raise CallError(f'Pool {name_or_guid} not found.',
                                errno.ENOENT)

            try:
                zfs.import_pool(found,
                                new_name or found.name,
                                options,
                                any_host=any_host)
            except libzfs.ZFSException as e:
                # We only log if some datasets failed to mount after pool import
                if e.code != libzfs.Error.MOUNTFAILED:
                    raise
                else:
                    self.logger.error(
                        'Failed to mount datasets after importing "%s" pool: %s',
                        name_or_guid,
                        str(e),
                        exc_info=True)

    @accepts(Str('pool'))
    def find_not_online(self, pool):
        pool = self.middleware.call_sync('zfs.pool.query', [['id', '=', pool]],
                                         {'get': True})

        unavails = []
        for nodes in pool['groups'].values():
            for node in nodes:
                unavails.extend(self.__find_not_online(node))
        return unavails

    def __find_not_online(self, node):
        if len(node['children']) == 0 and node['status'] not in ('ONLINE',
                                                                 'AVAIL'):
            return [node]

        unavails = []
        for child in node['children']:
            unavails.extend(self.__find_not_online(child))
        return unavails

    def get_vdev(self, name, vname):
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                vdev = find_vdev(pool, vname)
                if not vdev:
                    raise CallError(f'{vname} not found in {name}',
                                    errno.ENOENT)
                return vdev.__getstate__()
        except libzfs.ZFSException as e:
            raise CallError(str(e))
Beispiel #26
0
class ChartReleaseService(Service):
    class Config:
        namespace = 'chart.release'

    @accepts(Str('chart_release_name'))
    @returns(
        Dict(example={'minio2': ['minio/minio:RELEASE.2022-03-05T06-32-39Z']}))
    async def get_chart_releases_using_chart_release_images(
            self, chart_release_name):
        """
        Retrieve chart releases which are consuming any images in use by `chart_release_name`.
        """
        chart_releases = await self.middleware.call(
            'chart.release.query', [], {'extra': {
                'retrieve_resources': True
            }})
        idx = next((idx for (idx, d) in enumerate(chart_releases)
                    if d['name'] == chart_release_name), None)
        if idx is None:
            raise CallError(f'{chart_release_name!r} not found',
                            errno=errno.ENOENT)

        chart_release = chart_releases.pop(idx)
        return get_chart_releases_consuming_image(
            chart_release['resources']['container_images'], chart_releases,
            True)

    @private
    async def delete_unused_app_images(self, chart_release):
        failed_to_delete = {}
        to_delete_tags = await self.get_to_delete_unused_app_images(
            chart_release)
        for image in await self.middleware.call('container.image.query', [[
                'OR', [['complete_tags', 'rin', tag] for tag in to_delete_tags]
        ]], {'extra': {
                'complete_tags': True
        }}) if to_delete_tags else []:
            try:
                await self.middleware.call('container.image.delete',
                                           image['id'])
            except Exception as e:
                failed_to_delete[', '.join(image['complete_tags'])] = str(e)
        return failed_to_delete

    @private
    async def get_to_delete_unused_app_images(self, chart_release):
        to_delete = {
            normalize_reference(i)['complete_tag']
            for i in chart_release['resources']['container_images']
        }
        in_use = get_chart_releases_consuming_image(
            to_delete, await
            self.middleware.call('chart.release.query',
                                 [['id', '!=', chart_release['name']]],
                                 {'extra': {
                                     'retrieve_resources': True
                                 }}), True)
        for image_list in in_use.values():
            for image in filter(lambda i: i['complete_tag'] in to_delete,
                                map(normalize_reference, image_list)):
                to_delete.remove(image['complete_tag'])
        return list(to_delete)
Beispiel #27
0
class FilesystemService(Service):
    @accepts(Str('path', required=True), Ref('query-filters'),
             Ref('query-options'))
    def listdir(self, path, filters=None, options=None):
        """
        Get the contents of a directory.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
          acl(bool): extended ACL is present on file
        """
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_symlink():
                etype = 'SYMLINK'
            elif entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            else:
                etype = 'OTHER'

            data = {
                'name':
                entry.name,
                'path':
                entry.path,
                'realpath':
                os.path.realpath(entry.path)
                if etype == 'SYMLINK' else entry.path,
                'type':
                etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size':
                    stat.st_size,
                    'mode':
                    stat.st_mode,
                    'acl':
                    False if self.acl_is_trivial(data["realpath"]) else True,
                    'uid':
                    stat.st_uid,
                    'gid':
                    stat.st_gid,
                })
            except FileNotFoundError:
                data.update({
                    'size': None,
                    'mode': None,
                    'acl': None,
                    'uid': None,
                    'gid': None
                })
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})

    @accepts(Str('path'))
    def stat(self, path):
        """
        Return the filesystem stat(2) for a given `path`.
        """
        try:
            stat = os.stat(path, follow_symlinks=False)
        except FileNotFoundError:
            raise CallError(f'Path {path} not found', errno.ENOENT)

        stat = {
            'size': stat.st_size,
            'mode': stat.st_mode,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
            'atime': stat.st_atime,
            'mtime': stat.st_mtime,
            'ctime': stat.st_ctime,
            'dev': stat.st_dev,
            'inode': stat.st_ino,
            'nlink': stat.st_nlink,
        }

        try:
            stat['user'] = pwd.getpwuid(stat['uid']).pw_name
        except KeyError:
            stat['user'] = None

        try:
            stat['group'] = grp.getgrgid(stat['gid']).gr_name
        except KeyError:
            stat['group'] = None

        stat['acl'] = False if self.acl_is_trivial(path) else True

        return stat

    @private
    @accepts(
        Str('path'),
        Str('content', max_length=2048000),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    def file_receive(self, path, content, options=None):
        """
        Simplified file receiving method for small files.

        `content` must be a base 64 encoded file content.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'
        with open(path, openmode) as f:
            f.write(binascii.a2b_base64(content))
        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @private
    @accepts(
        Str('path'),
        Dict(
            'options',
            Int('offset'),
            Int('maxlen'),
        ),
    )
    def file_get_contents(self, path, options=None):
        """
        Get contents of a file `path` in base64 encode.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        options = options or {}
        if not os.path.exists(path):
            return None
        with open(path, 'rb') as f:
            if options.get('offset'):
                f.seek(options['offset'])
            data = binascii.b2a_base64(f.read(
                options.get('maxlen'))).decode().strip()
        return data

    @accepts(Str('path'))
    @job(pipes=["output"])
    async def get(self, job, path):
        """
        Job to get contents of `path`.
        """

        if not os.path.isfile(path):
            raise CallError(f'{path} is not a file')

        with open(path, 'rb') as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, f,
                                                job.pipes.output.w)

    @accepts(
        Str('path'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    @job(pipes=["input"])
    async def put(self, job, path, options=None):
        """
        Job to put contents to `path`.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'

        with open(path, openmode) as f:
            await self.middleware.run_in_thread(shutil.copyfileobj,
                                                job.pipes.input.r, f)

        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @accepts(Str('path'))
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Raises:
            CallError(ENOENT) - Path not found
        """
        try:
            statfs = bsd.statfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)
        return {
            **statfs.__getstate__(),
            'total_bytes': statfs.total_blocks * statfs.blocksize,
            'free_bytes': statfs.free_blocks * statfs.blocksize,
            'avail_bytes': statfs.avail_blocks * statfs.blocksize,
        }

    def __convert_to_basic_permset(self, permset):
        """
        Convert "advanced" ACL permset format to basic format using
        bitwise operation and constants defined in py-bsd/bsd/acl.pyx,
        py-bsd/defs.pxd and acl.h.

        If the advanced ACL can't be converted without losing
        information, we return 'OTHER'.

        Reverse process converts the constant's value to a dictionary
        using a bitwise operation.
        """
        perm = 0
        for k, v, in permset.items():
            if v:
                perm |= acl.NFS4Perm[k]

        try:
            SimplePerm = (acl.NFS4BasicPermset(perm)).name
        except Exception:
            SimplePerm = 'OTHER'

        return SimplePerm

    def __convert_to_basic_flagset(self, flagset):
        flags = 0
        for k, v, in flagset.items():
            if k == "INHERITED":
                continue
            if v:
                flags |= acl.NFS4Flag[k]

        try:
            SimpleFlag = (acl.NFS4BasicFlagset(flags)).name
        except Exception:
            SimpleFlag = 'OTHER'

        return SimpleFlag

    def __convert_to_adv_permset(self, basic_perm):
        permset = {}
        perm_mask = acl.NFS4BasicPermset[basic_perm].value
        for name, member in acl.NFS4Perm.__members__.items():
            if perm_mask & member.value:
                permset.update({name: True})
            else:
                permset.update({name: False})

        return permset

    def __convert_to_adv_flagset(self, basic_flag):
        flagset = {}
        flag_mask = acl.NFS4BasicFlagset[basic_flag].value
        for name, member in acl.NFS4Flag.__members__.items():
            if flag_mask & member.value:
                flagset.update({name: True})
            else:
                flagset.update({name: False})

        return flagset

    def _winacl(self, path, action, uid, gid, options):
        chroot_dir = os.path.dirname(path)
        target = os.path.basename(path)
        winacl = subprocess.run([
            '/usr/local/bin/winacl', '-a', action, '-O',
            str(uid), '-G',
            str(gid), '-rx' if options['traverse'] else '-r', '-c', chroot_dir,
            '-p', target
        ],
                                check=False,
                                capture_output=True)
        if winacl.returncode != 0:
            CallError(
                f"Winacl {action} on path {path} failed with error: [{winacl.stderr.decode().strip()}]"
            )

    def _common_perm_path_validate(self, path):
        if not os.path.exists(path):
            raise CallError(f"Path not found: {path}", errno.ENOENT)

        if not os.path.realpath(path).startswith('/mnt/'):
            raise CallError(
                f"Changing permissions on paths outside of /mnt is not permitted: {path}",
                errno.EPERM)

        if os.path.realpath(path) in [
                x['path'] for x in self.middleware.call_sync('pool.query')
        ]:
            raise CallError(
                f"Changing permissions of root level dataset is not permitted: {path}",
                errno.EPERM)

    @accepts(Str('path'))
    def acl_is_trivial(self, path):
        """
        Returns True if the ACL can be fully expressed as a file mode without losing
        any access rules, or if the path does not support NFSv4 ACLs (for example
        a path on a tmpfs filesystem).
        """
        if not os.path.exists(path):
            raise CallError(f'Path not found [{path}].', errno.ENOENT)

        if osc.IS_LINUX:
            posix1e_acl = self.getacl_posix1e(path, True)
            return True if len(posix1e_acl['acl']) == 3 else False

        if not os.pathconf(path, 64):
            return True

        return acl.ACL(file=path).is_trivial

    @accepts(
        Dict(
            'filesystem_ownership', Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict('options', Bool('recursive', default=False),
                 Bool('traverse', default=False))))
    @job(lock="perm_change")
    def chown(self, job, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """
        job.set_progress(0, 'Preparing to change owner.')

        self._common_perm_path_validate(data['path'])

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']
        options = data['options']

        if not options['recursive']:
            job.set_progress(100, 'Finished changing owner.')
            os.chown(data['path'], uid, gid)
        else:
            job.set_progress(10,
                             f'Recursively changing owner of {data["path"]}.')
            self._winacl(data['path'], 'chown', uid, gid, options)
            job.set_progress(100, 'Finished changing owner.')

    @accepts(
        Dict(
            'filesystem_permission', Str('path', required=True),
            UnixPerm('mode', null=True), Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
            )))
    @job(lock="perm_change")
    def setperm(self, job, data):
        """
        Remove extended ACL from specified path.

        If `mode` is specified then the mode will be applied to the
        path and files and subdirectories depending on which `options` are
        selected. Mode should be formatted as string representation of octal
        permissions bits.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `stripacl` setperm will fail if an extended ACL is present on `path`,
        unless `stripacl` is set to True.

        `recursive` remove ACLs recursively, but do not traverse dataset
        boundaries.

        `traverse` remove ACLs from child datasets.

        If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
        will be converted to trivial ACLs. An ACL is trivial if it can be
        expressed as a file mode without losing any access rules.

        """
        job.set_progress(0, 'Preparing to set permissions.')
        options = data['options']
        mode = data.get('mode', None)

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']

        self._common_perm_path_validate(data['path'])

        acl_is_trivial = self.middleware.call_sync('filesystem.acl_is_trivial',
                                                   data['path'])
        if not acl_is_trivial and not options['stripacl']:
            raise CallError(
                f'Non-trivial ACL present on [{data["path"]}]. Option "stripacl" required to change permission.',
                errno.EINVAL)

        if mode is not None:
            mode = int(mode, 8)

        a = acl.ACL(file=data['path'])
        a.strip()
        a.apply(data['path'])

        if mode:
            os.chmod(data['path'], mode)

        if uid or gid:
            os.chown(data['path'], uid, gid)

        if not options['recursive']:
            job.set_progress(100, 'Finished setting permissions.')
            return

        action = 'clone' if mode else 'strip'
        job.set_progress(
            10, f'Recursively setting permissions on {data["path"]}.')
        self._winacl(data['path'], action, uid, gid, options)
        job.set_progress(100, 'Finished setting permissions.')

    @accepts()
    async def default_acl_choices(self):
        """
        Get list of default ACL types.
        """
        acl_choices = []
        for x in ACLDefault:
            if x.value['visible']:
                acl_choices.append(x.name)

        return acl_choices

    @accepts(
        Str('acl_type', default='OPEN', enum=[x.name for x in ACLDefault]),
        Str('share_type', default='NONE', enum=['NONE', 'AFP', 'SMB', 'NFS']),
    )
    async def get_default_acl(self, acl_type, share_type):
        """
        Returns a default ACL depending on the usage specified by `acl_type`.
        If an admin group is defined, then an entry granting it full control will
        be placed at the top of the ACL. Optionally may pass `share_type` to argument
        to get share-specific template ACL.
        """
        acl = []
        admin_group = (await self.middleware.call('smb.config'))['admin_group']
        if acl_type == 'HOME' and (await self.middleware.call(
                'activedirectory.get_state')) == 'HEALTHY':
            acl_type = 'DOMAIN_HOME'
        if admin_group:
            acl.append({
                'tag':
                'GROUP',
                'id': (await self.middleware.call('dscache.get_uncached_group',
                                                  admin_group))['gr_gid'],
                'perms': {
                    'BASIC': 'FULL_CONTROL'
                },
                'flags': {
                    'BASIC': 'INHERIT'
                },
                'type':
                'ALLOW'
            })
        if share_type == 'SMB':
            acl.append({
                'tag': 'GROUP',
                'id': int(SMBBuiltin['USERS'].value[1][9:]),
                'perms': {
                    'BASIC': 'MODIFY'
                },
                'flags': {
                    'BASIC': 'INHERIT'
                },
                'type': 'ALLOW'
            })
        acl.extend((ACLDefault[acl_type].value)['acl'])

        return acl

    def _is_inheritable(self, flags):
        """
        Takes ACE flags and return True if any inheritance bits are set.
        """
        inheritance_flags = [
            'FILE_INHERIT', 'DIRECTORY_INHERIT', 'NO_PROPAGATE_INHERIT',
            'INHERIT_ONLY'
        ]
        for i in inheritance_flags:
            if flags.get(i):
                return True

        return False

    @private
    def canonicalize_acl_order(self, acl):
        """
        Convert flags to advanced, then separate the ACL into two lists. One for ACEs that have been inherited,
        one for aces that have not been inherited. Non-inherited ACEs take precedence
        and so they are placed first in finalized combined list. Within each list, the
        ACEs are orderd according to the following:

        1) Deny ACEs that apply to the object itself (NOINHERIT)

        2) Deny ACEs that apply to a subobject of the object (INHERIT)

        3) Allow ACEs that apply to the object itself (NOINHERIT)

        4) Allow ACEs that apply to a subobject of the object (INHERIT)

        See http://docs.microsoft.com/en-us/windows/desktop/secauthz/order-of-aces-in-a-dacl

        The "INHERITED" bit is stripped in filesystem.getacl when generating a BASIC flag type.
        It is best practice to use a non-simplified ACL for canonicalization.
        """
        inherited_aces = []
        final_acl = []
        non_inherited_aces = []
        for entry in acl:
            entry['flags'] = self.__convert_to_adv_flagset(
                entry['flags']
                ['BASIC']) if 'BASIC' in entry['flags'] else entry['flags']
            if entry['flags'].get('INHERITED'):
                inherited_aces.append(entry)
            else:
                non_inherited_aces.append(entry)

        if inherited_aces:
            inherited_aces = sorted(
                inherited_aces,
                key=lambda x:
                (x['type'] == 'ALLOW', self._is_inheritable(x['flags'])),
            )
        if non_inherited_aces:
            non_inherited_aces = sorted(
                non_inherited_aces,
                key=lambda x:
                (x['type'] == 'ALLOW', self._is_inheritable(x['flags'])),
            )
        final_acl = non_inherited_aces + inherited_aces
        return final_acl

    @private
    def getacl_posix1e(self, path, simplified):
        st = os.stat(path)
        ret = {
            'uid': st.st_uid,
            'gid': st.st_gid,
            'acl': [],
            'flags': {
                'setuid': bool(st.st_mode & pystat.S_ISUID),
                'setgid': bool(st.st_mode & pystat.S_ISGID),
                'sticky': bool(st.st_mode & pystat.S_ISVTX),
            },
            'acltype': ACLType.POSIX1E.name
        }

        ret['uid'] = st.st_uid
        ret['gid'] = st.st_gid

        gfacl = subprocess.run(
            ['getfacl', '-c' if osc.IS_LINUX else '-q', '-n', path],
            check=False,
            capture_output=True)
        if gfacl.returncode != 0:
            raise CallError(
                f"Failed to get POSIX1e ACL on path [{path}]: {gfacl.stderr.decode()}"
            )

        # linux output adds extra line to output if it's an absolute path and extra newline at end.
        entries = gfacl.stdout.decode().splitlines()
        if osc.IS_LINUX:
            entries = entries[:-1]

        for entry in entries:
            if entry.startswith("#"):
                continue
            ace = {
                "default": False,
                "tag": None,
                "id": -1,
                "perms": {
                    "READ": False,
                    "WRITE": False,
                    "EXECUTE": False,
                }
            }

            tag, id, perms = entry.rsplit(":", 3)
            ace['perms'].update({
                "READ": perms[0].casefold() == "r",
                "WRITE": perms[1].casefold() == "w",
                "EXECUTE": perms[2].casefold() == "x",
            })
            if tag.startswith('default'):
                ace['default'] = True
                tag = tag[8:]

            ace['tag'] = tag.upper()
            if id.isdigit():
                ace['id'] = int(id)
            ret['acl'].append(ace)

        return ret

    @private
    def getacl_nfs4(self, path, simplified):
        stat = os.stat(path)

        a = acl.ACL(file=path)
        fs_acl = a.__getstate__()

        if not simplified:
            advanced_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': entry['perms'],
                    'flags': entry['flags'],
                }
                if ace['tag'] == 'everyone@' and self.__convert_to_basic_permset(
                        ace['perms']) == 'NOPERMS':
                    continue

                advanced_acl.append(ace)

            return {
                'uid': stat.st_uid,
                'gid': stat.st_gid,
                'acl': advanced_acl,
                'acltype': ACLType.NFS4.name
            }

        simple_acl = []
        for entry in fs_acl:
            ace = {
                'tag': (acl.ACLWho[entry['tag']]).value,
                'id': entry['id'],
                'type': entry['type'],
                'perms': {
                    'BASIC': self.__convert_to_basic_permset(entry['perms'])
                },
                'flags': {
                    'BASIC': self.__convert_to_basic_flagset(entry['flags'])
                },
            }
            if ace['tag'] == 'everyone@' and ace['perms']['BASIC'] == 'NOPERMS':
                continue

            for key in ['perms', 'flags']:
                if ace[key]['BASIC'] == 'OTHER':
                    ace[key] = entry[key]

            simple_acl.append(ace)

        return {
            'uid': stat.st_uid,
            'gid': stat.st_gid,
            'acl': simple_acl,
            'acltype': ACLType.NFS4.name
        }

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
    )
    def getacl(self, path, simplified=True):
        """
        Return ACL of a given path. This may return a POSIX1e ACL or a NFSv4 ACL. The acl type is indicated
        by the `ACLType` key.

        Errata about ACLType NFSv4:

        `simplified` returns a shortened form of the ACL permset and flags.

        `TRAVERSE` sufficient rights to traverse a directory, but not read contents.

        `READ` sufficient rights to traverse a directory, and read file contents.

        `MODIFIY` sufficient rights to traverse, read, write, and modify a file. Equivalent to modify_set.

        `FULL_CONTROL` all permissions.

        If the permisssions do not fit within one of the pre-defined simplified permissions types, then
        the full ACL entry will be returned.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled. We also remove it here to avoid confusion.
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        if osc.IS_LINUX or not os.pathconf(path, 64):
            return self.getacl_posix1e(path, simplified)

        return self.getacl_nfs4(path, simplified)

    @private
    def setacl_posix1e(self, job, data):
        job.set_progress(0, 'Preparing to set acl.')
        if osc.IS_FREEBSD:
            raise CallError(
                "POSIX1e brand ACLs not supported on the FreeBSD-based TrueNAS platform",
                errno.EOPNOTSUPP)

        options = data['options']
        recursive = options.get('recursive')
        dacl = data.get('dacl', [])
        path = data['path']

        aclcheck = ACLType.POSIX1E.validate(data)

        if not aclcheck['is_valid']:
            raise CallError(
                f"POSIX1e ACL is invalid: {' '.join(aclcheck['errors'])}")

        stripacl = subprocess.run(
            ['setfacl', '-bR' if recursive else '-b', path],
            check=False,
            capture_output=True)
        if stripacl.returncode != 0:
            raise CallError(f"Failed to remove POSIX1e ACL from [{path}]: "
                            f"{stripacl.stderr.decode()}")

        if options['stripacl']:
            job.set_progress(100, "Finished removing POSIX1e ACL")
            return

        job.set_progress(50, 'Reticulating splines.')

        for idx, ace in enumerate(dacl):
            if idx == 0:
                aclstring = ""
            else:
                aclstring += ","

            if ace['id'] == -1:
                ace['id'] = ''

            ace['tag'] = ace['tag'].rstrip('_OBJ').lower()

            if ace['default']:
                aclstring += "default:"

            aclstring += f"{ace['tag']}:{ace['id']}:"
            aclstring += 'r' if ace['perms']['READ'] else '-'
            aclstring += 'w' if ace['perms']['WRITE'] else '-'
            aclstring += 'x' if ace['perms']['EXECUTE'] else '-'

        self.logger.debug("acl string: %s", aclstring)
        setacl = subprocess.run(
            ['setfacl', '-mR' if recursive else '-m', aclstring, path],
            check=False,
            capture_output=True)
        if setacl.returncode != 0:
            return CallError(f'Failed to set ACL on path [{path}]: ',
                             f'{setacl.stderr.decode()}')

        job.set_progress(100, 'Finished setting POSIX1e ACL.')

    @private
    def setacl_nfs4(self, job, data):
        job.set_progress(0, 'Preparing to set acl.')
        options = data['options']
        dacl = data.get('dacl', [])

        if osc.IS_LINUX or not os.pathconf(data['path'], 64):
            raise CallError(
                f"NFSv4 ACLS are not supported on path {data['path']}",
                errno.EOPNOTSUPP)

        self._common_perm_path_validate(data['path'])

        if dacl and options['stripacl']:
            raise CallError(
                'Setting ACL and stripping ACL are not permitted simultaneously.',
                errno.EINVAL)

        uid = -1 if data.get('uid', None) is None else data['uid']
        gid = -1 if data.get('gid', None) is None else data['gid']
        if options['stripacl']:
            a = acl.ACL(file=data['path'])
            a.strip()
            a.apply(data['path'])
        else:
            inheritable_is_present = False
            cleaned_acl = []
            lockace_is_present = False
            for entry in dacl:
                ace = {
                    'tag': (acl.ACLWho(entry['tag'])).name,
                    'id':
                    entry['id'],
                    'type':
                    entry['type'],
                    'perms':
                    self.__convert_to_adv_permset(entry['perms']['BASIC'])
                    if 'BASIC' in entry['perms'] else entry['perms'],
                    'flags':
                    self.__convert_to_adv_flagset(entry['flags']['BASIC'])
                    if 'BASIC' in entry['flags'] else entry['flags'],
                }
                if ace['flags'].get('INHERIT_ONLY') and not ace['flags'].get(
                        'DIRECTORY_INHERIT', False) and not ace['flags'].get(
                            'FILE_INHERIT', False):
                    raise CallError(
                        'Invalid flag combination. DIRECTORY_INHERIT or FILE_INHERIT must be set if INHERIT_ONLY is set.',
                        errno.EINVAL)
                if ace['tag'] == 'EVERYONE' and self.__convert_to_basic_permset(
                        ace['perms']) == 'NOPERMS':
                    lockace_is_present = True
                elif ace['flags'].get('DIRECTORY_INHERIT') or ace['flags'].get(
                        'FILE_INHERIT'):
                    inheritable_is_present = True

                cleaned_acl.append(ace)

            if not inheritable_is_present:
                raise CallError(
                    'At least one inheritable ACL entry is required',
                    errno.EINVAL)

            if options['canonicalize']:
                cleaned_acl = self.canonicalize_acl_order(cleaned_acl)

            if not lockace_is_present:
                locking_ace = {
                    'tag': 'EVERYONE',
                    'id': None,
                    'type': 'ALLOW',
                    'perms': self.__convert_to_adv_permset('NOPERMS'),
                    'flags': self.__convert_to_adv_flagset('INHERIT')
                }
                cleaned_acl.append(locking_ace)

            a = acl.ACL()
            a.__setstate__(cleaned_acl)
            a.apply(data['path'])

        if not options['recursive']:
            os.chown(data['path'], uid, gid)
            job.set_progress(100, 'Finished setting NFS4 ACL.')
            return

        job.set_progress(10, f'Recursively setting ACL on {data["path"]}.')
        self._winacl(data['path'], 'clone', uid, gid, options)
        job.set_progress(100, 'Finished setting NFS4 ACL.')

    @accepts(
        Dict(
            'filesystem_acl', Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            List('dacl',
                 items=[
                     Dict(
                         'aclentry',
                         Str('tag',
                             enum=[
                                 'owner@', 'group@', 'everyone@', 'USER',
                                 'GROUP'
                             ]),
                         Int('id', null=True),
                         Str('type', enum=['ALLOW', 'DENY']),
                         Dict(
                             'perms',
                             Bool('READ_DATA'),
                             Bool('WRITE_DATA'),
                             Bool('APPEND_DATA'),
                             Bool('READ_NAMED_ATTRS'),
                             Bool('WRITE_NAMED_ATTRS'),
                             Bool('EXECUTE'),
                             Bool('DELETE_CHILD'),
                             Bool('READ_ATTRIBUTES'),
                             Bool('WRITE_ATTRIBUTES'),
                             Bool('DELETE'),
                             Bool('READ_ACL'),
                             Bool('WRITE_ACL'),
                             Bool('WRITE_OWNER'),
                             Bool('SYNCHRONIZE'),
                             Str('BASIC',
                                 enum=[
                                     'FULL_CONTROL', 'MODIFY', 'READ',
                                     'TRAVERSE'
                                 ]),
                         ),
                         Dict(
                             'flags',
                             Bool('FILE_INHERIT'),
                             Bool('DIRECTORY_INHERIT'),
                             Bool('NO_PROPAGATE_INHERIT'),
                             Bool('INHERIT_ONLY'),
                             Bool('INHERITED'),
                             Str('BASIC', enum=['INHERIT', 'NOINHERIT']),
                         ),
                     ),
                     Dict(
                         'posix1e_ace',
                         Bool('default', default=False),
                         Str('tag',
                             enum=[
                                 'USER_OBJ', 'GROUP_OBJ', 'USER', 'GROUP',
                                 'OTHER', 'MASK'
                             ]),
                         Int('id', default=-1),
                         Dict(
                             'perms',
                             Bool('READ', default=False),
                             Bool('WRITE', default=False),
                             Bool('EXECUTE', default=False),
                         ),
                     )
                 ],
                 default=[]),
            Dict(
                'nfs41_flags',
                Bool('autoinherit', default=False),
                Bool('protected', default=False),
            ),
            Str('acltype',
                enum=[x.name for x in ACLType],
                default=ACLType.NFS4.name),
            Dict('options', Bool('stripacl', default=False),
                 Bool('recursive', default=False),
                 Bool('traverse', default=False),
                 Bool('canonicalize', default=True))))
    @job(lock="perm_change")
    def setacl(self, job, data):
        """
        Set ACL of a given path. Takes the following parameters:
        `path` full path to directory or file.

        `dacl` "simplified" ACL here or a full ACL.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `recursive` apply the ACL recursively

        `traverse` traverse filestem boundaries (ZFS datasets)

        `strip` convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        `canonicalize` reorder ACL entries so that they are in concanical form as described
        in the Microsoft documentation MS-DTYP 2.4.5 (ACL)

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """
        acltype = ACLType[data['acltype']]
        if acltype == ACLType.NFS4:
            return self.setacl_nfs4(job, data)
        else:
            return self.setacl_posix1e(job, data)

    @private
    async def children_are_locked(self, path, child):
        if child["locked"] and path.startswith(child["mountpoint"]):
            return True

        if not path.startswith(child["mountpoint"]):
            return False

        if child.get("children"):
            for c in child["children"]:
                is_locked = await self.children_are_locked(path, c)

                if is_locked:
                    return True

        return False

    @private
    async def path_is_encrypted(self, path):
        ds = await self.middleware.call("pool.dataset.from_path", path, True)

        if ds["locked"]:
            return True

        if not ds["children"]:
            return False

        return await self.children_are_locked(path, ds)
Beispiel #28
0
class ServiceService(CRUDService):

    SERVICE_DEFS = {
        's3':
        ServiceDefinition('minio', '/var/run/minio.pid'),
        'ssh':
        ServiceDefinition('sshd', '/var/run/sshd.pid'),
        'rsync':
        ServiceDefinition('rsync', '/var/run/rsyncd.pid'),
        'nfs':
        ServiceDefinition('nfsd', None),
        'afp':
        ServiceDefinition('netatalk', None),
        'cifs':
        ServiceDefinition('smbd', '/var/run/samba4/smbd.pid'),
        'dynamicdns':
        ServiceDefinition('inadyn', None),
        'snmp':
        ServiceDefinition('snmpd', '/var/run/net_snmpd.pid'),
        'ftp':
        ServiceDefinition('proftpd', '/var/run/proftpd.pid'),
        'tftp':
        ServiceDefinition('inetd', '/var/run/inetd.pid'),
        'iscsitarget':
        ServiceDefinition('ctld', '/var/run/ctld.pid'),
        'lldp':
        ServiceDefinition('ladvd', '/var/run/ladvd.pid'),
        'ups':
        ServiceDefinition('upsd', '/var/db/nut/upsd.pid'),
        'upsmon':
        ServiceDefinition('upsmon', '/var/db/nut/upsmon.pid'),
        'smartd':
        ServiceDefinition('smartd', 'smartd-daemon',
                          '/var/run/smartd-daemon.pid'),
        'webshell':
        ServiceDefinition(None, '/var/run/webshell.pid'),
        'webdav':
        ServiceDefinition('httpd', '/var/run/httpd.pid'),
        'netdata':
        ServiceDefinition('netdata', '/var/db/netdata/netdata.pid'),
        'asigra':
        ServiceDefinition('asigra', '/var/run/dssystem.pid')
    }

    @filterable
    async def query(self, filters=None, options=None):
        if options is None:
            options = {}
        options['prefix'] = 'srv_'

        services = await self.middleware.call('datastore.query',
                                              'services.services', filters,
                                              options)

        # In case a single service has been requested
        if not isinstance(services, list):
            services = [services]

        jobs = {
            asyncio.ensure_future(self._get_status(entry)): entry
            for entry in services
        }
        if jobs:
            done, pending = await asyncio.wait(list(jobs.keys()), timeout=15)

        def result(task):
            """
            Method to handle results of the coroutines.
            In case of error or timeout, provide UNKNOWN state.
            """
            result = None
            try:
                if task in done:
                    result = task.result()
            except Exception:
                pass
            if result is None:
                entry = jobs.get(task)
                self.logger.warn('Failed to get status for %s',
                                 entry['service'])
                entry['state'] = 'UNKNOWN'
                entry['pids'] = []
                return entry
            else:
                return result

        services = list(map(result, jobs))
        return filter_list(services, filters, options)

    @accepts(
        Str('id_or_name'),
        Dict(
            'service-update',
            Bool('enable', default=False),
        ),
    )
    async def do_update(self, id_or_name, data):
        """
        Update service entry of `id_or_name`.

        Currently it only accepts `enable` option which means whether the
        service should start on boot.

        """
        if not id_or_name.isdigit():
            svc = await self.middleware.call(
                'datastore.query', 'services.services',
                [('srv_service', '=', id_or_name)])
            if not svc:
                raise CallError(f'Service {id_or_name} not found.',
                                errno.ENOENT)
            id_or_name = svc[0]['id']

        rv = await self.middleware.call('datastore.update',
                                        'services.services', id_or_name,
                                        {'srv_enable': data['enable']})
        await self.middleware.call('etc.generate', 'rc')
        return rv

    @accepts(
        Str('service'),
        Dict(
            'service-control',
            Bool('onetime', default=True),
            Bool('wait', default=None, null=True),
            Bool('sync', default=None, null=True),
            register=True,
        ),
    )
    async def start(self, service, options=None):
        """ Start the service specified by `service`.

        The helper will use method self._start_[service]() to start the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service, 'start',
                                        options)
        sn = self._started_notify("start", service)
        await self._simplecmd("start", service, options)
        return await self.started(service, sn)

    async def started(self, service, sn=None):
        """
        Test if service specified by `service` has been started.
        """
        if sn:
            await self.middleware.run_in_thread(sn.join)

        try:
            svc = await self.query([('service', '=', service)], {'get': True})
            self.middleware.send_event('service.query', 'CHANGED', fields=svc)
            return svc['state'] == 'RUNNING'
        except IndexError:
            f = getattr(self, '_started_' + service, None)
            if callable(f):
                if inspect.iscoroutinefunction(f):
                    return (await f())[0]
                else:
                    return f()[0]
            else:
                return (await self._started(service))[0]

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def stop(self, service, options=None):
        """ Stop the service specified by `service`.

        The helper will use method self._stop_[service]() to stop the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service, 'stop',
                                        options)
        sn = self._started_notify("stop", service)
        await self._simplecmd("stop", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def restart(self, service, options=None):
        """
        Restart the service specified by `service`.

        The helper will use method self._restart_[service]() to restart the service.
        If the method does not exist, it would fallback using service(8)."""
        await self.middleware.call_hook('service.pre_action', service,
                                        'restart', options)
        sn = self._started_notify("restart", service)
        await self._simplecmd("restart", service, options)
        return await self.started(service, sn)

    @accepts(
        Str('service'),
        Ref('service-control'),
    )
    async def reload(self, service, options=None):
        """
        Reload the service specified by `service`.

        The helper will use method self._reload_[service]() to reload the service.
        If the method does not exist, the helper will try self.restart of the
        service instead."""
        await self.middleware.call_hook('service.pre_action', service,
                                        'reload', options)
        try:
            await self._simplecmd("reload", service, options)
        except Exception as e:
            await self.restart(service, options)
        return await self.started(service)

    async def _get_status(self, service):
        f = getattr(self, '_started_' + service['service'], None)
        if callable(f):
            if inspect.iscoroutinefunction(f):
                running, pids = await f()
            else:
                running, pids = f()
        else:
            running, pids = await self._started(service['service'])

        if running:
            state = 'RUNNING'
        else:
            state = 'STOPPED'

        service['state'] = state
        service['pids'] = pids
        return service

    async def _simplecmd(self, action, what, options=None):
        self.logger.debug("Calling: %s(%s) ", action, what)
        f = getattr(self, '_' + action + '_' + what, None)
        if f is None:
            # Provide generic start/stop/restart verbs for rc.d scripts
            if what in self.SERVICE_DEFS:
                if self.SERVICE_DEFS[what].rc_script:
                    what = self.SERVICE_DEFS[what].rc_script
            if action in ("start", "stop", "restart", "reload"):
                if action == 'restart':
                    await self._system("/usr/sbin/service " + what +
                                       " forcestop ")
                await self._service(what, action, **options)
            else:
                raise ValueError("Internal error: Unknown command")
        else:
            call = f(**(options or {}))
            if inspect.iscoroutinefunction(f):
                await call

    async def _system(self, cmd, options=None):
        stdout = DEVNULL
        if options and 'stdout' in options:
            stdout = options['stdout']
        stderr = DEVNULL
        if options and 'stderr' in options:
            stderr = options['stderr']

        proc = await Popen(cmd,
                           stdout=stdout,
                           stderr=stderr,
                           shell=True,
                           close_fds=True)
        await proc.communicate()
        return proc.returncode

    async def _service(self, service, verb, **options):
        onetime = options.pop('onetime', None)
        force = options.pop('force', None)
        quiet = options.pop('quiet', None)
        extra = options.pop('extra', '')

        # force comes before one which comes before quiet
        # they are mutually exclusive
        preverb = ''
        if force:
            preverb = 'force'
        elif onetime:
            preverb = 'one'
        elif quiet:
            preverb = 'quiet'

        return await self._system(
            '/usr/sbin/service {} {}{} {}'.format(
                service,
                preverb,
                verb,
                extra,
            ), options)

    def _started_notify(self, verb, what):
        """
        The check for started [or not] processes is currently done in 2 steps
        This is the first step which involves a thread StartNotify that watch for event
        before actually start/stop rc.d scripts

        Returns:
            StartNotify object if the service is known or None otherwise
        """

        if what in self.SERVICE_DEFS:
            sn = StartNotify(verb=verb,
                             pidfile=self.SERVICE_DEFS[what].pidfile)
            sn.start()
            return sn
        else:
            return None

    async def _started(self, what, notify=None):
        """
        This is the second step::
        Wait for the StartNotify thread to finish and then check for the
        status of pidfile/procname using pgrep

        Returns:
            True whether the service is alive, False otherwise
        """

        if what in self.SERVICE_DEFS:
            if notify:
                await self.middleware.run_in_thread(notify.join)

            if self.SERVICE_DEFS[what].pidfile:
                pgrep = "/bin/pgrep -F {}{}".format(
                    self.SERVICE_DEFS[what].pidfile,
                    ' ' + self.SERVICE_DEFS[what].procname
                    if self.SERVICE_DEFS[what].procname else '',
                )
            else:
                pgrep = "/bin/pgrep {}".format(
                    self.SERVICE_DEFS[what].procname)
            proc = await Popen(pgrep,
                               shell=True,
                               stdout=PIPE,
                               stderr=PIPE,
                               close_fds=True)
            data = (await proc.communicate())[0].decode()

            if proc.returncode == 0:
                return True, [
                    int(i) for i in data.strip().split('\n') if i.isdigit()
                ]
        return False, []

    async def _start_asigra(self, **kwargs):
        await self.middleware.call('asigra.setup_filesystems')
        await self.middleware.call('asigra.setup_postgresql')
        await self._service("postgresql", "start", force=True, **kwargs)
        await self.middleware.call('asigra.setup_asigra')
        await self.middleware.call('etc.generate', 'asigra')
        await self._service("dssystem", "start", force=True, **kwargs)

    async def _stop_asigra(self, **kwargs):
        await self._service("dssystem", "stop", force=True, **kwargs)
        await self._service("postgresql", "stop", force=True, **kwargs)

    async def _restart_asigra(self, **kwargs):
        await self._stop_asigra(**kwargs)
        await self._start_asigra(**kwargs)

    async def _started_asigra(self, **kwargs):
        if await self._service("dssystem", "status", force=True, **
                               kwargs) != 0:
            return False, []
        return True, []

    async def _start_webdav(self, **kwargs):
        await self.middleware.call('etc.generate', 'webdav')
        await self._service("apache24", "start", **kwargs)

    async def _stop_webdav(self, **kwargs):
        await self._service("apache24", "stop", **kwargs)

    async def _restart_webdav(self, **kwargs):
        await self._service("apache24", "stop", force=True, **kwargs)
        await self.middleware.call('etc.generate', 'webdav')
        await self._service("apache24", "restart", **kwargs)

    async def _reload_webdav(self, **kwargs):
        await self.middleware.call('etc.generate', 'webdav')
        await self._service("apache24", "reload", **kwargs)

    async def _restart_django(self, **kwargs):
        await self._service("django", "restart", **kwargs)

    async def _start_webshell(self, **kwargs):
        await self._system(
            "/usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py")

    async def _restart_webshell(self, **kwargs):
        try:
            with open('/var/run/webshell.pid', 'r') as f:
                pid = f.read()
                os.kill(int(pid), signal.SIGTERM)
                time.sleep(0.2)
                os.kill(int(pid), signal.SIGKILL)
        except Exception:
            pass
        await self._system(
            "ulimit -n 1024 && /usr/local/bin/python /usr/local/www/freenasUI/tools/webshell.py"
        )

    async def _restart_iscsitarget(self, **kwargs):
        await self.middleware.call("etc.generate", "ctld")
        await self._service("ctld", "stop", force=True, **kwargs)
        await self.middleware.call("etc.generate", "ctld")
        await self._service("ctld", "restart", **kwargs)

    async def _start_iscsitarget(self, **kwargs):
        await self.middleware.call("etc.generate", "ctld")
        await self._service("ctld", "start", **kwargs)

    async def _stop_iscsitarget(self, **kwargs):
        with contextlib.suppress(IndexError):
            sysctl.filter("kern.cam.ctl.ha_peer")[0].value = ""

        await self._service("ctld", "stop", force=True, **kwargs)

    async def _reload_iscsitarget(self, **kwargs):
        await self.middleware.call("etc.generate", "ctld")
        await self._service("ctld", "reload", **kwargs)

    async def _start_collectd(self, **kwargs):
        if not await self.started('rrdcached'):
            # Let's ensure that before we start collectd, rrdcached is always running
            await self.start('rrdcached')

        await self.middleware.call('etc.generate', 'collectd')
        await self._service("collectd", "restart", **kwargs)

    async def _restart_collectd(self, **kwargs):
        await self._service("collectd", "stop", **kwargs)
        await self._start_collectd(**kwargs)

    async def _started_collectd(self, **kwargs):
        if await self._service('collectd', 'status', quiet=True, **kwargs):
            return False, []
        else:
            return True, []

    async def _started_rrdcached(self, **kwargs):
        if await self._service('rrdcached', 'status', quiet=True, **kwargs):
            return False, []
        else:
            return True, []

    async def _stop_rrdcached(self, **kwargs):
        await self._service("collectd", "stop", **kwargs)
        await self._service('rrdcached', 'stop', **kwargs)

    async def _restart_rrdcached(self, **kwargs):
        await self._stop_rrdcached(**kwargs)
        await self.start('rrdcached')
        await self.start('collectd')

    async def _reload_rc(self, **kwargs):
        await self.middleware.call('etc.generate', 'rc')

    async def _restart_powerd(self, **kwargs):
        await self.middleware.call('etc.generate', 'rc')
        await self._service('powerd', 'restart', **kwargs)

    async def _reload_sysctl(self, **kwargs):
        await self.middleware.call('etc.generate', 'sysctl')

    async def _start_network(self, **kwargs):
        await self.middleware.call('interface.sync')
        await self.middleware.call('route.sync')

    async def _reload_named(self, **kwargs):
        await self._service("named", "reload", **kwargs)

    async def _restart_syscons(self, **kwargs):
        await self.middleware.call('etc.generate', 'rc')
        await self._service('syscons', 'restart', **kwargs)

    async def _reload_hostname(self, **kwargs):
        await self._system('/bin/hostname ""')
        await self.middleware.call('etc.generate', 'hostname')
        await self.middleware.call('etc.generate', 'rc')
        await self._service("hostname", "start", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", quiet=True, **kwargs)
        await self._restart_collectd(**kwargs)

    async def _reload_resolvconf(self, **kwargs):
        await self._reload_hostname()
        await self.middleware.call('dns.sync')

    async def _reload_networkgeneral(self, **kwargs):
        await self._reload_resolvconf()
        await self._service("routing", "restart", **kwargs)

    async def _start_routing(self, **kwargs):
        await self.middleware.call('etc.generate', 'rc')
        await self._service('routing', 'start', **kwargs)

    async def _reload_timeservices(self, **kwargs):
        await self.middleware.call('etc.generate', 'localtime')
        await self.middleware.call('etc.generate', 'ntpd')
        await self._service("ntpd", "restart", **kwargs)
        settings = await self.middleware.call('datastore.query',
                                              'system.settings', [], {
                                                  'order_by': ['-id'],
                                                  'get': True
                                              })
        os.environ['TZ'] = settings['stg_timezone']
        time.tzset()

    async def _restart_ntpd(self, **kwargs):
        await self.middleware.call('etc.generate', 'ntpd')
        await self._service('ntpd', 'restart', **kwargs)

    async def _start_smartd(self, **kwargs):
        await self.middleware.call("etc.generate", "smartd")
        await self._service("smartd-daemon", "start", **kwargs)

    def _initializing_smartd_pid(self):
        """
        smartd initialization can take a long time if lots of disks are present
        It only writes pidfile at the end of the initialization but forks immediately
        This method returns PID of smartd process that is still initializing and has not written pidfile yet
        """
        if os.path.exists(self.SERVICE_DEFS["smartd"].pidfile):
            # Already started, no need for special handling
            return

        for process in psutil.process_iter(attrs=["cmdline", "create_time"]):
            if process.info["cmdline"][:1] == ["/usr/local/sbin/smartd"]:
                break
        else:
            # No smartd process present
            return

        lifetime = time.time() - process.info["create_time"]
        if lifetime < 300:
            # Looks like just the process we need
            return process.pid

        self.logger.warning(
            "Got an orphan smartd process: pid=%r, lifetime=%r", process.pid,
            lifetime)

    async def _started_smartd(self, **kwargs):
        result = await self._started("smartd")
        if result[0]:
            return result

        if await self.middleware.run_in_thread(self._initializing_smartd_pid
                                               ) is not None:
            return True, []

        return False, []

    async def _reload_smartd(self, **kwargs):
        await self.middleware.call("etc.generate", "smartd")

        pid = await self.middleware.run_in_thread(self._initializing_smartd_pid
                                                  )
        if pid is None:
            await self._service("smartd-daemon", "reload", **kwargs)
            return

        os.kill(pid, signal.SIGKILL)
        await self._service("smartd-daemon", "start", **kwargs)

    async def _restart_smartd(self, **kwargs):
        await self.middleware.call("etc.generate", "smartd")

        pid = await self.middleware.run_in_thread(self._initializing_smartd_pid
                                                  )
        if pid is None:
            await self._service("smartd-daemon", "stop", force=True, **kwargs)
            await self._service("smartd-daemon", "restart", **kwargs)
            return

        os.kill(pid, signal.SIGKILL)
        await self._service("smartd-daemon", "start", **kwargs)

    async def _stop_smartd(self, **kwargs):
        pid = await self.middleware.run_in_thread(self._initializing_smartd_pid
                                                  )
        if pid is None:
            await self._service("smartd-daemon", "stop", force=True, **kwargs)
            return

        os.kill(pid, signal.SIGKILL)

    async def _reload_ssh(self, **kwargs):
        await self.middleware.call('etc.generate', 'ssh')
        await self.middleware.call('mdnsadvertise.restart')
        await self._service("openssh", "reload", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssh(self, **kwargs):
        await self.middleware.call('etc.generate', 'ssh')
        await self.middleware.call('mdnsadvertise.restart')
        await self._service("openssh", "start", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _stop_ssh(self, **kwargs):
        await self._service("openssh", "stop", force=True, **kwargs)
        await self.middleware.call('mdnsadvertise.restart')

    async def _restart_ssh(self, **kwargs):
        await self.middleware.call('etc.generate', 'ssh')
        await self._service("openssh", "stop", force=True, **kwargs)
        await self.middleware.call('mdnsadvertise.restart')
        await self._service("openssh", "restart", **kwargs)
        await self._service("ix_sshd_save_keys", "start", quiet=True, **kwargs)

    async def _start_ssl(self, **kwargs):
        await self.middleware.call('etc.generate', 'ssl')

    async def _start_s3(self, **kwargs):
        await self.middleware.call('etc.generate', 's3')
        await self._service("minio",
                            "start",
                            quiet=True,
                            stdout=None,
                            stderr=None,
                            **kwargs)

    async def _reload_s3(self, **kwargs):
        await self.middleware.call('etc.generate', 's3')
        await self._service("minio",
                            "restart",
                            quiet=True,
                            stdout=None,
                            stderr=None,
                            **kwargs)

    async def _reload_rsync(self, **kwargs):
        await self.middleware.call('etc.generate', 'rsync')
        await self._service("rsyncd", "restart", **kwargs)

    async def _restart_rsync(self, **kwargs):
        await self._stop_rsync()
        await self._start_rsync()

    async def _start_rsync(self, **kwargs):
        await self.middleware.call('etc.generate', 'rsync')
        await self._service("rsyncd", "start", **kwargs)

    async def _stop_rsync(self, **kwargs):
        await self._service("rsyncd", "stop", force=True, **kwargs)

    async def _started_nis(self, **kwargs):
        return (await self.middleware.call('nis.started')), []

    async def _start_nis(self, **kwargs):
        return (await self.middleware.call('nis.start')), []

    async def _restart_nis(self, **kwargs):
        await self.middleware.call('nis.stop')
        return (await self.middleware.call('nis.start')), []

    async def _stop_nis(self, **kwargs):
        return (await self.middleware.call('nis.stop')), []

    async def _started_ldap(self, **kwargs):
        if (await self._system('/usr/sbin/service ix-ldap status') != 0):
            return False, []
        return await self.middleware.call('notifier.ldap_status'), []

    async def _start_ldap(self, **kwargs):
        await self.middleware.call('etc.generate', 'rc')
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl start"):
            res = True
        return res

    async def _stop_ldap(self, **kwargs):
        await self.middleware.call('etc.generate', 'rc')
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl stop"):
            res = True
        return res

    async def _restart_ldap(self, **kwargs):
        await self.middleware.call('etc.generate', 'rc')
        res = False
        if not await self._system("/etc/directoryservice/LDAP/ctl restart"):
            res = True
        return res

    async def _start_lldp(self, **kwargs):
        await self._service("ladvd", "start", **kwargs)

    async def _stop_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)

    async def _restart_lldp(self, **kwargs):
        await self._service("ladvd", "stop", force=True, **kwargs)
        await self._service("ladvd", "restart", **kwargs)

    async def _clear_activedirectory_config(self):
        await self._system(
            "/bin/rm -f /etc/directoryservice/ActiveDirectory/config")

    async def _started_activedirectory(self, **kwargs):
        # Perform a wbinfo -t because it's the most accurate single test we have to
        # detect problems with AD join. The default winbind timeout is 60 seconds (as of Samba 4.7).
        # This can be controlled by the smb4.conf parameter "winbind request timeout = "
        if await self._system('/usr/local/bin/wbinfo -t') != 0:
            self.logger.debug('AD status check: wbinfo -t failed')
            return False, []
        return True, []

    async def _start_activedirectory(self, **kwargs):
        res = False
        if not await self._system(
                "/etc/directoryservice/ActiveDirectory/ctl start"):
            res = True
        return res

    async def _stop_activedirectory(self, **kwargs):
        res = False
        if not await self._system(
                "/etc/directoryservice/ActiveDirectory/ctl stop"):
            res = True
        return res

    async def _restart_activedirectory(self, **kwargs):
        res = False
        if not await self._system(
                "/etc/directoryservice/ActiveDirectory/ctl restart"):
            res = True
        return res

    async def _reload_activedirectory(self, **kwargs):
        # Steps required to force winbind to connect to new DC if DC it's connected to goes down
        # We may need to expand the list of operations below to include fresh kinit. Some
        # information about winbind connection is stored in samba's gencache. In test cases, flushing
        # gencache (net cache flush) was not required to do this.
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("samba_server", "start", quiet=True, **kwargs)

    async def _restart_syslogd(self, **kwargs):
        await self.middleware.call("etc.generate", "syslogd")
        await self._system("/etc/local/rc.d/syslog-ng restart")

    async def _start_syslogd(self, **kwargs):
        await self.middleware.call("etc.generate", "syslogd")
        await self._system("/etc/local/rc.d/syslog-ng start")

    async def _stop_syslogd(self, **kwargs):
        await self._system("/etc/local/rc.d/syslog-ng stop")

    async def _reload_syslogd(self, **kwargs):
        await self.middleware.call("etc.generate", "syslogd")
        await self._system("/etc/local/rc.d/syslog-ng reload")

    async def _start_tftp(self, **kwargs):
        await self.middleware.call('etc.generate', 'inetd')
        await self._service("inetd", "start", **kwargs)

    async def _reload_tftp(self, **kwargs):
        await self.middleware.call('etc.generate', 'inetd')
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_tftp(self, **kwargs):
        await self.middleware.call('etc.generate', 'inetd')
        await self._service("inetd", "stop", force=True, **kwargs)
        await self._service("inetd", "restart", **kwargs)

    async def _restart_cron(self, **kwargs):
        await self.middleware.call('etc.generate', 'cron')

    async def _start_motd(self, **kwargs):
        await self.middleware.call('etc.generate', 'motd')
        await self._service("motd", "start", quiet=True, **kwargs)

    async def _start_ttys(self, **kwargs):
        await self.middleware.call('etc.generate', 'ttys')

    async def _reload_ftp(self, **kwargs):
        await self.middleware.call("etc.generate", "ftp")
        await self._service("proftpd", "restart", **kwargs)

    async def _restart_ftp(self, **kwargs):
        await self._stop_ftp()
        await self._start_ftp()

    async def _start_ftp(self, **kwargs):
        await self.middleware.call("etc.generate", "ftp")
        await self._service("proftpd", "start", **kwargs)

    async def _stop_ftp(self, **kwargs):
        await self._service("proftpd", "stop", force=True, **kwargs)

    async def _start_ups(self, **kwargs):
        await self.middleware.call('etc.generate', 'ups')
        await self._service("nut", "start", **kwargs)
        await self._service("nut_upsmon", "start", **kwargs)
        await self._service("nut_upslog", "start", **kwargs)
        if await self.started('collectd'):
            asyncio.ensure_future(self.restart('collectd'))

    async def _stop_ups(self, **kwargs):
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut", "stop", force=True, **kwargs)
        if await self.started('collectd'):
            asyncio.ensure_future(self.restart('collectd'))

    async def _restart_ups(self, **kwargs):
        await self.middleware.call('etc.generate', 'ups')
        await self._service("nut", "stop", force=True, **kwargs)
        await self._service("nut_upsmon", "stop", force=True, **kwargs)
        await self._service("nut_upslog", "stop", force=True, **kwargs)
        await self._service("nut", "restart", **kwargs)
        await self._service("nut_upsmon", "restart", **kwargs)
        await self._service("nut_upslog", "restart", **kwargs)
        if await self.started('collectd'):
            asyncio.ensure_future(self.restart('collectd'))

    async def _started_ups(self, **kwargs):
        return await self._started('upsmon')

    async def _start_afp(self, **kwargs):
        await self.middleware.call("etc.generate", "afpd")
        await self._service("netatalk", "start", **kwargs)

    async def _stop_afp(self, **kwargs):
        await self._service("netatalk", "stop", force=True, **kwargs)
        # when netatalk stops if afpd or cnid_metad is stuck
        # they'll get left behind, which can cause issues
        # restarting netatalk.
        await self._system("pkill -9 afpd")
        await self._system("pkill -9 cnid_metad")

    async def _restart_afp(self, **kwargs):
        await self._stop_afp()
        await self._start_afp()

    async def _reload_afp(self, **kwargs):
        await self.middleware.call("etc.generate", "afpd")
        await self._system("killall -1 netatalk")

    async def _reload_nfs(self, **kwargs):
        await self.middleware.call("etc.generate", "nfsd")

    async def _restart_nfs(self, **kwargs):
        await self._stop_nfs(**kwargs)
        await self._start_nfs(**kwargs)

    async def _stop_nfs(self, **kwargs):
        await self._service("lockd", "stop", force=True, **kwargs)
        await self._service("statd", "stop", force=True, **kwargs)
        await self._service("nfsd", "stop", force=True, **kwargs)
        await self._service("mountd", "stop", force=True, **kwargs)
        await self._service("nfsuserd", "stop", force=True, **kwargs)
        await self._service("gssd", "stop", force=True, **kwargs)
        await self._service("rpcbind", "stop", force=True, **kwargs)

    async def _start_nfs(self, **kwargs):
        nfs = await self.middleware.call('datastore.config', 'services.nfs')
        await self.middleware.call("etc.generate", "nfsd")
        await self._service("rpcbind", "start", quiet=True, **kwargs)
        await self._service("gssd", "start", quiet=True, **kwargs)
        # Workaround to work with "onetime", since the rc scripts depend on rc flags.
        if nfs['nfs_srv_v4']:
            sysctl.filter('vfs.nfsd.server_max_nfsvers')[0].value = 4
            if nfs['nfs_srv_v4_v3owner']:
                # Per RFC7530, sending NFSv3 style UID/GIDs across the wire is now allowed
                # You must have both of these sysctl's set to allow the desired functionality
                sysctl.filter('vfs.nfsd.enable_stringtouid')[0].value = 1
                sysctl.filter('vfs.nfs.enable_uidtostring')[0].value = 1
                await self._service("nfsuserd", "stop", force=True, **kwargs)
            else:
                sysctl.filter('vfs.nfsd.enable_stringtouid')[0].value = 0
                sysctl.filter('vfs.nfs.enable_uidtostring')[0].value = 0
                await self._service("nfsuserd", "start", quiet=True, **kwargs)
        else:
            sysctl.filter('vfs.nfsd.server_max_nfsvers')[0].value = 3
            if nfs['nfs_srv_16']:
                await self._service("nfsuserd", "start", quiet=True, **kwargs)
        await self._service("mountd", "start", quiet=True, **kwargs)
        await self._service("nfsd", "start", quiet=True, **kwargs)
        await self._service("statd", "start", quiet=True, **kwargs)
        await self._service("lockd", "start", quiet=True, **kwargs)

    async def _start_dynamicdns(self, **kwargs):
        await self.middleware.call('etc.generate', 'inadyn')
        await self._service("inadyn", "start", **kwargs)

    async def _restart_dynamicdns(self, **kwargs):
        await self.middleware.call('etc.generate', 'inadyn')
        await self._service("inadyn", "stop", force=True, **kwargs)
        await self._service("inadyn", "restart", **kwargs)

    async def _reload_dynamicdns(self, **kwargs):
        await self.middleware.call('etc.generate', 'inadyn')
        await self._service("inadyn", "stop", force=True, **kwargs)
        await self._service("inadyn", "restart", **kwargs)

    async def _restart_system(self, **kwargs):
        asyncio.ensure_future(
            self.middleware.call('system.reboot', {'delay': 3}))

    async def _stop_system(self, **kwargs):
        asyncio.ensure_future(
            self.middleware.call('system.shutdown', {'delay': 3}))

    async def _reload_cifs(self, **kwargs):
        await self.middleware.call("etc.generate", "smb_share")
        await self._service("samba_server", "reload", force=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _restart_cifs(self, **kwargs):
        await self.middleware.call("etc.generate", "smb")
        await self.middleware.call("etc.generate", "smb_share")
        await self._service("samba_server", "stop", force=True, **kwargs)
        await self._service("samba_server", "restart", quiet=True, **kwargs)
        await self._service("mdnsd", "restart", **kwargs)
        # After mdns is restarted we need to reload netatalk to have it rereregister
        # with mdns. Ticket #7133
        await self._service("netatalk", "reload", **kwargs)

    async def _start_cifs(self, **kwargs):
        await self.middleware.call("etc.generate", "smb")
        await self.middleware.call("etc.generate", "smb_share")
        await self._service("samba_server", "start", quiet=True, **kwargs)

    async def _stop_cifs(self, **kwargs):
        await self._service("samba_server", "stop", force=True, **kwargs)

    async def _started_cifs(self, **kwargs):
        if await self._service("samba_server",
                               "status",
                               quiet=True,
                               onetime=True,
                               **kwargs):
            return False, []
        else:
            return True, []

    async def _start_snmp(self, **kwargs):
        await self.middleware.call("etc.generate", "snmpd")
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _stop_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", quiet=True, **kwargs)

    async def _restart_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self.middleware.call("etc.generate", "snmpd")
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _reload_snmp(self, **kwargs):
        await self._service("snmp-agent", "stop", quiet=True, **kwargs)
        await self._service("snmpd", "stop", force=True, **kwargs)
        await self.middleware.call("etc.generate", "snmpd")
        await self._service("snmpd", "start", quiet=True, **kwargs)
        await self._service("snmp-agent", "start", quiet=True, **kwargs)

    async def _restart_http(self, **kwargs):
        await self.middleware.call("etc.generate", "nginx")
        await self.middleware.call('mdnsadvertise.restart')
        await self._service("nginx", "restart", **kwargs)

    async def _reload_http(self, **kwargs):
        await self.middleware.call("etc.generate", "nginx")
        await self.middleware.call('mdnsadvertise.restart')
        await self._service("nginx", "reload", **kwargs)

    async def _reload_loader(self, **kwargs):
        await self.middleware.call("etc.generate", "loader")

    async def _restart_disk(self, **kwargs):
        await self._reload_disk(**kwargs)

    async def _reload_disk(self, **kwargs):
        await self.middleware.call('etc.generate', 'fstab')
        await self._service("mountlate", "start", quiet=True, **kwargs)
        # Restarting rrdcached can take a long time. There is no
        # benefit in waiting for it, since even if it fails it will not
        # tell the user anything useful.
        asyncio.ensure_future(self.restart("collectd", kwargs))

    async def _reload_user(self, **kwargs):
        await self.middleware.call("etc.generate", "user")
        await self.middleware.call('etc.generate', 'aliases')
        await self.middleware.call('etc.generate', 'sudoers')
        await self.reload("cifs", kwargs)

    async def _restart_system_datasets(self, **kwargs):
        systemdataset = await self.middleware.call('systemdataset.setup')
        if not systemdataset:
            return None
        if systemdataset['syslog']:
            await self.restart("syslogd", kwargs)
        await self.restart("cifs", kwargs)

        # Restarting rrdcached can take a long time. There is no
        # benefit in waiting for it, since even if it fails it will not
        # tell the user anything useful.
        # Restarting rrdcached will make sure that we start/restart collectd as well
        asyncio.ensure_future(self.restart("rrdcached", kwargs))

    async def _start_netdata(self, **kwargs):
        await self.middleware.call('etc.generate', 'netdata')
        await self._service('netdata', 'start', **kwargs)

    async def _restart_netdata(self, **kwargs):
        await self._service('netdata', 'stop')
        await self._start_netdata(**kwargs)
Beispiel #29
0
class IdmapTDBService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_tdb'
        datastore_prefix = 'idmap_tdb_'
        datastore_extend = 'idmap.common_backend_extend'
        namespace = 'idmap.tdb'

    @accepts(
        Dict('idmap_tdb_create',
             Dict(
                 'domain',
                 Int('id'),
                 Str('idmap_domain_name'),
                 Str('idmap_domain_dns_domain_name'),
             ),
             Int('range_low',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Int('range_high',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             register=True))
    async def do_create(self, data):
        """
        Create an entry in the idmap backend table.
        """
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_tdb_create', await
            self.middleware.call('idmap._common_validate', 'tdb', data))
        if verrors:
            raise verrors

        data = await self.middleware.call('idmap.common_backend_compress',
                                          data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_tdb_create", "idmap_tdb_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update an entry in the idmap backend table by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_tdb_update', await
            self.middleware.call('idmap._common_validate', 'tdb', new))

        if verrors:
            raise verrors

        new = await self.middleware.call('idmap.common_backend_compress', new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete idmap to backend mapping by id
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
Beispiel #30
0
class RsyncService(Service):

    def __rsync_worker(self, line, user, job):
        proc_stdout = tempfile.TemporaryFile(mode='w+b', buffering=0)
        try:
            rsync_proc = subprocess.Popen(
                line,
                shell=True,
                stdout=proc_stdout.fileno(),
                stderr=subprocess.PIPE,
                bufsize=0,
                preexec_fn=demote(user)
            )
            seek = 0
            old_seek = 0
            progress = 0
            message = 'Starting rsync copy job...'
            while rsync_proc.poll() is None:
                job.set_progress(progress, message)
                proc_op = ''
                proc_stdout.seek(seek)
                try:
                    while True:
                        op_byte = proc_stdout.read(1).decode('utf8')
                        if op_byte == '':
                            # In this case break before incrementing `seek`
                            break
                        seek += 1
                        if op_byte == '\r':
                            break
                        proc_op += op_byte
                        seek += 1
                    if old_seek != seek:
                        old_seek = seek
                        message = proc_op.strip()
                        try:
                            progress = int([x for x in message.split(' ') if '%' in x][0][:-1])
                        except (IndexError, ValueError):
                            pass
                except BaseException as err:
                    # Catch IOERROR Errno 9 which usually arises because
                    # of already closed fileobject being used here therby
                    # raising Bad File Descriptor error. In this case break
                    # and the outer while loop will check for rsync_proc.poll()
                    # to be None or not and DTRT
                    if hasattr(err, 'errno') and err.errno == 9:
                        break
                    logger.debug('Error whilst parsing rsync progress', exc_info=True)

        except BaseException as e:
            raise CallError(f'Rsync copy job id: {job.id} failed due to: {e}', errno.EIO)

        if rsync_proc.returncode != 0:
            job.set_progress(None, 'Rsync copy job failed')
            raise CallError(
                f'Rsync copy job id: {job.id} returned non-zero exit code. Command used was: {line}. Error: {rsync_proc.stderr.read()}'
            )

    @accepts(Dict(
        'rsync-copy',
        Str('user', required=True),
        Str('path', required=True),
        Str('remote_user'),
        Str('remote_host', required=True),
        Str('remote_path'),
        Int('remote_ssh_port'),
        Str('remote_module'),
        Str('direction', enum=['PUSH', 'PULL'], required=True),
        Str('mode', enum=['MODULE', 'SSH'], required=True),
        Str('remote_password'),
        Dict(
            'properties',
            Bool('recursive'),
            Bool('compress'),
            Bool('times'),
            Bool('archive'),
            Bool('delete'),
            Bool('preserve_permissions'),
            Bool('preserve_attributes'),
            Bool('delay_updates')
        ),
        required=True
    ))
    @job()
    def copy(self, job, rcopy):
        """
        Starts an rsync copy task between current freenas machine
        and specified remote host (or local copy too). It reports
        the progress of the copy task.
        """

        # Assigning variables and such
        user = rcopy.get('user')
        path = rcopy.get('path')
        mode = rcopy.get('mode')
        remote_path = rcopy.get('remote_path')
        remote_host = rcopy.get('remote_host')
        remote_module = rcopy.get('remote_module')
        remote_user = rcopy.get('remote_user', rcopy.get('user'))
        remote_address = remote_host if '@' in remote_host else f'"{remote_user}"@{remote_host}'
        remote_password = rcopy.get('remote_password', None)
        password_file = None
        properties = rcopy.get('properties', defaultdict(bool))

        # Let's do a brief check of all the user provided parameters
        if not path:
            raise ValueError('The path is required')
        elif not os.path.exists(path):
            raise CallError(f'The specified path: {path} does not exist', errno.ENOENT)

        if not remote_host:
            raise ValueError('The remote host is required')

        if mode == 'SSH' and not remote_path:
            raise ValueError('The remote path is required')
        elif mode == 'MODULE' and not remote_module:
            raise ValueError('The remote module is required')

        try:
            pwd.getpwnam(user)
        except KeyError:
            raise CallError(f'User: {user} does not exist', errno.ENOENT)
        if (
            mode == 'SSH' and
            rcopy.get('remote_host') in ['127.0.0.1', 'localhost'] and
            not os.path.exists(remote_path)
        ):
            raise CallError(f'The specified path: {remote_path} does not exist', errno.ENOENT)

        # Phew! with that out of the let's begin the transfer

        line = f'{RSYNC_PATH} --info=progress2 -h'
        if properties:
            if properties.get('recursive'):
                line += ' -r'
            if properties.get('times'):
                line += ' -t'
            if properties.get('compress'):
                line += ' -z'
            if properties.get('archive'):
                line += ' -a'
            if properties.get('preserve_permissions'):
                line += ' -p'
            if properties.get('preserve_attributes'):
                line += ' -X'
            if properties.get('delete'):
                line += ' --delete-delay'
            if properties.get('delay_updates'):
                line += ' --delay-updates'

        if mode == 'MODULE':
            if rcopy.get('direction') == 'PUSH':
                line += f' "{path}" {remote_address}::"{remote_module}"'
            else:
                line += f' {remote_address}::"{remote_module}" "{path}"'
            if remote_password:
                password_file = tempfile.NamedTemporaryFile(mode='w')

                password_file.write(remote_password)
                password_file.flush()
                shutil.chown(password_file.name, user=user)
                os.chmod(password_file.name, 0o600)
                line += f' --password-file={password_file.name}'
        else:
            # there seems to be some code duplication here but hey its simple
            # if you find a way (THAT DOES NOT BREAK localhost based rsync copies)
            # then please go for it
            if rcopy.get('remote_host') in ['127.0.0.1', 'localhost']:
                if rcopy['direction'] == 'PUSH':
                    line += f' "{path}" "{remote_path}"'
                else:
                    line += f' "{remote_path}" "{path}"'
            else:
                line += ' -e "ssh -p {0} -o BatchMode=yes -o StrictHostKeyChecking=yes"'.format(
                    rcopy.get('remote_ssh_port', 22)
                )
                if rcopy['direction'] == 'PUSH':
                    line += f' "{path}" {remote_address}:\\""{remote_path}"\\"'
                else:
                    line += f' {remote_address}:\\""{remote_path}"\\" "{path}"'

        logger.debug(f'Executing rsync job id: {job.id} with the following command {line}')
        try:
            t = threading.Thread(target=self.__rsync_worker, args=(line, user, job), daemon=True)
            t.start()
            t.join()
        finally:
            if password_file:
                password_file.close()

        job.set_progress(100, 'Rsync copy job successfully completed')